python_code
stringlengths
0
1.8M
repo_name
stringclasses
7 values
file_path
stringlengths
5
99
// SPDX-License-Identifier: GPL-2.0 AND MIT /* * Copyright © 2023 Intel Corporation */ #include "ttm_kunit_helpers.h" struct ttm_device_funcs ttm_dev_funcs = { }; EXPORT_SYMBOL_GPL(ttm_dev_funcs); int ttm_device_kunit_init(struct ttm_test_devices *priv, struct ttm_device *ttm, bool use_dma_alloc, bool use_dma32) { struct drm_device *drm = priv->drm; int err; err = ttm_device_init(ttm, &ttm_dev_funcs, drm->dev, drm->anon_inode->i_mapping, drm->vma_offset_manager, use_dma_alloc, use_dma32); return err; } EXPORT_SYMBOL_GPL(ttm_device_kunit_init); struct ttm_buffer_object *ttm_bo_kunit_init(struct kunit *test, struct ttm_test_devices *devs, size_t size) { struct drm_gem_object gem_obj = { .size = size }; struct ttm_buffer_object *bo; bo = kunit_kzalloc(test, sizeof(*bo), GFP_KERNEL); KUNIT_ASSERT_NOT_NULL(test, bo); bo->base = gem_obj; bo->bdev = devs->ttm_dev; return bo; } EXPORT_SYMBOL_GPL(ttm_bo_kunit_init); struct ttm_test_devices *ttm_test_devices_basic(struct kunit *test) { struct ttm_test_devices *devs; devs = kunit_kzalloc(test, sizeof(*devs), GFP_KERNEL); KUNIT_ASSERT_NOT_NULL(test, devs); devs->dev = drm_kunit_helper_alloc_device(test); KUNIT_ASSERT_NOT_ERR_OR_NULL(test, devs->dev); devs->drm = __drm_kunit_helper_alloc_drm_device(test, devs->dev, sizeof(*devs->drm), 0, DRIVER_GEM); KUNIT_ASSERT_NOT_ERR_OR_NULL(test, devs->drm); return devs; } EXPORT_SYMBOL_GPL(ttm_test_devices_basic); struct ttm_test_devices *ttm_test_devices_all(struct kunit *test) { struct ttm_test_devices *devs; struct ttm_device *ttm_dev; int err; devs = ttm_test_devices_basic(test); ttm_dev = kunit_kzalloc(test, sizeof(*ttm_dev), GFP_KERNEL); KUNIT_ASSERT_NOT_NULL(test, ttm_dev); err = ttm_device_kunit_init(devs, ttm_dev, false, false); KUNIT_ASSERT_EQ(test, err, 0); devs->ttm_dev = ttm_dev; return devs; } EXPORT_SYMBOL_GPL(ttm_test_devices_all); void ttm_test_devices_put(struct kunit *test, struct ttm_test_devices *devs) { if (devs->ttm_dev) ttm_device_fini(devs->ttm_dev); drm_kunit_helper_free_device(test, devs->dev); } EXPORT_SYMBOL_GPL(ttm_test_devices_put); int ttm_test_devices_init(struct kunit *test) { struct ttm_test_devices *priv; priv = kunit_kzalloc(test, sizeof(*priv), GFP_KERNEL); KUNIT_ASSERT_NOT_NULL(test, priv); priv = ttm_test_devices_basic(test); test->priv = priv; return 0; } EXPORT_SYMBOL_GPL(ttm_test_devices_init); void ttm_test_devices_fini(struct kunit *test) { ttm_test_devices_put(test, test->priv); } EXPORT_SYMBOL_GPL(ttm_test_devices_fini); MODULE_LICENSE("GPL");
linux-master
drivers/gpu/drm/ttm/tests/ttm_kunit_helpers.c
// SPDX-License-Identifier: GPL-2.0 AND MIT /* * Copyright © 2023 Intel Corporation */ #include <linux/mm.h> #include <drm/ttm/ttm_tt.h> #include <drm/ttm/ttm_pool.h> #include "ttm_kunit_helpers.h" struct ttm_pool_test_case { const char *description; unsigned int order; bool use_dma_alloc; }; struct ttm_pool_test_priv { struct ttm_test_devices *devs; /* Used to create mock ttm_tts */ struct ttm_buffer_object *mock_bo; }; static struct ttm_operation_ctx simple_ctx = { .interruptible = true, .no_wait_gpu = false, }; static int ttm_pool_test_init(struct kunit *test) { struct ttm_pool_test_priv *priv; priv = kunit_kzalloc(test, sizeof(*priv), GFP_KERNEL); KUNIT_ASSERT_NOT_NULL(test, priv); priv->devs = ttm_test_devices_basic(test); test->priv = priv; return 0; } static void ttm_pool_test_fini(struct kunit *test) { struct ttm_pool_test_priv *priv = test->priv; ttm_test_devices_put(test, priv->devs); } static struct ttm_tt *ttm_tt_kunit_init(struct kunit *test, uint32_t page_flags, enum ttm_caching caching, size_t size) { struct ttm_pool_test_priv *priv = test->priv; struct ttm_buffer_object *bo; struct ttm_tt *tt; int err; bo = ttm_bo_kunit_init(test, priv->devs, size); KUNIT_ASSERT_NOT_NULL(test, bo); priv->mock_bo = bo; tt = kunit_kzalloc(test, sizeof(*tt), GFP_KERNEL); KUNIT_ASSERT_NOT_NULL(test, tt); err = ttm_tt_init(tt, priv->mock_bo, page_flags, caching, 0); KUNIT_ASSERT_EQ(test, err, 0); return tt; } static struct ttm_pool *ttm_pool_pre_populated(struct kunit *test, size_t size, enum ttm_caching caching) { struct ttm_pool_test_priv *priv = test->priv; struct ttm_test_devices *devs = priv->devs; struct ttm_pool *pool; struct ttm_tt *tt; unsigned long order = __fls(size / PAGE_SIZE); int err; tt = ttm_tt_kunit_init(test, order, caching, size); KUNIT_ASSERT_NOT_NULL(test, tt); pool = kunit_kzalloc(test, sizeof(*pool), GFP_KERNEL); KUNIT_ASSERT_NOT_NULL(test, pool); ttm_pool_init(pool, devs->dev, NUMA_NO_NODE, true, false); err = ttm_pool_alloc(pool, tt, &simple_ctx); KUNIT_ASSERT_EQ(test, err, 0); ttm_pool_free(pool, tt); ttm_tt_fini(tt); return pool; } static const struct ttm_pool_test_case ttm_pool_basic_cases[] = { { .description = "One page", .order = 0, }, { .description = "More than one page", .order = 2, }, { .description = "Above the allocation limit", .order = MAX_ORDER + 1, }, { .description = "One page, with coherent DMA mappings enabled", .order = 0, .use_dma_alloc = true, }, { .description = "Above the allocation limit, with coherent DMA mappings enabled", .order = MAX_ORDER + 1, .use_dma_alloc = true, }, }; static void ttm_pool_alloc_case_desc(const struct ttm_pool_test_case *t, char *desc) { strscpy(desc, t->description, KUNIT_PARAM_DESC_SIZE); } KUNIT_ARRAY_PARAM(ttm_pool_alloc_basic, ttm_pool_basic_cases, ttm_pool_alloc_case_desc); static void ttm_pool_alloc_basic(struct kunit *test) { struct ttm_pool_test_priv *priv = test->priv; struct ttm_test_devices *devs = priv->devs; const struct ttm_pool_test_case *params = test->param_value; struct ttm_tt *tt; struct ttm_pool *pool; struct page *fst_page, *last_page; enum ttm_caching caching = ttm_uncached; unsigned int expected_num_pages = 1 << params->order; size_t size = expected_num_pages * PAGE_SIZE; int err; tt = ttm_tt_kunit_init(test, 0, caching, size); KUNIT_ASSERT_NOT_NULL(test, tt); pool = kunit_kzalloc(test, sizeof(*pool), GFP_KERNEL); KUNIT_ASSERT_NOT_NULL(test, pool); ttm_pool_init(pool, devs->dev, NUMA_NO_NODE, params->use_dma_alloc, false); KUNIT_ASSERT_PTR_EQ(test, pool->dev, devs->dev); KUNIT_ASSERT_EQ(test, pool->nid, NUMA_NO_NODE); KUNIT_ASSERT_EQ(test, pool->use_dma_alloc, params->use_dma_alloc); err = ttm_pool_alloc(pool, tt, &simple_ctx); KUNIT_ASSERT_EQ(test, err, 0); KUNIT_ASSERT_EQ(test, tt->num_pages, expected_num_pages); fst_page = tt->pages[0]; last_page = tt->pages[tt->num_pages - 1]; if (params->order <= MAX_ORDER) { if (params->use_dma_alloc) { KUNIT_ASSERT_NOT_NULL(test, (void *)fst_page->private); KUNIT_ASSERT_NOT_NULL(test, (void *)last_page->private); } else { KUNIT_ASSERT_EQ(test, fst_page->private, params->order); } } else { if (params->use_dma_alloc) { KUNIT_ASSERT_NOT_NULL(test, (void *)fst_page->private); KUNIT_ASSERT_NULL(test, (void *)last_page->private); } else { /* * We expect to alloc one big block, followed by * order 0 blocks */ KUNIT_ASSERT_EQ(test, fst_page->private, min_t(unsigned int, MAX_ORDER, params->order)); KUNIT_ASSERT_EQ(test, last_page->private, 0); } } ttm_pool_free(pool, tt); ttm_tt_fini(tt); ttm_pool_fini(pool); } static void ttm_pool_alloc_basic_dma_addr(struct kunit *test) { struct ttm_pool_test_priv *priv = test->priv; struct ttm_test_devices *devs = priv->devs; const struct ttm_pool_test_case *params = test->param_value; struct ttm_tt *tt; struct ttm_pool *pool; struct ttm_buffer_object *bo; dma_addr_t dma1, dma2; enum ttm_caching caching = ttm_uncached; unsigned int expected_num_pages = 1 << params->order; size_t size = expected_num_pages * PAGE_SIZE; int err; tt = kunit_kzalloc(test, sizeof(*tt), GFP_KERNEL); KUNIT_ASSERT_NOT_NULL(test, tt); bo = ttm_bo_kunit_init(test, devs, size); KUNIT_ASSERT_NOT_NULL(test, bo); err = ttm_sg_tt_init(tt, bo, 0, caching); KUNIT_ASSERT_EQ(test, err, 0); pool = kunit_kzalloc(test, sizeof(*pool), GFP_KERNEL); KUNIT_ASSERT_NOT_NULL(test, pool); ttm_pool_init(pool, devs->dev, NUMA_NO_NODE, true, false); err = ttm_pool_alloc(pool, tt, &simple_ctx); KUNIT_ASSERT_EQ(test, err, 0); KUNIT_ASSERT_EQ(test, tt->num_pages, expected_num_pages); dma1 = tt->dma_address[0]; dma2 = tt->dma_address[tt->num_pages - 1]; KUNIT_ASSERT_NOT_NULL(test, (void *)(uintptr_t)dma1); KUNIT_ASSERT_NOT_NULL(test, (void *)(uintptr_t)dma2); ttm_pool_free(pool, tt); ttm_tt_fini(tt); ttm_pool_fini(pool); } static void ttm_pool_alloc_order_caching_match(struct kunit *test) { struct ttm_tt *tt; struct ttm_pool *pool; struct ttm_pool_type *pt; enum ttm_caching caching = ttm_uncached; unsigned int order = 0; size_t size = PAGE_SIZE; int err; pool = ttm_pool_pre_populated(test, size, caching); pt = &pool->caching[caching].orders[order]; KUNIT_ASSERT_FALSE(test, list_empty(&pt->pages)); tt = ttm_tt_kunit_init(test, 0, caching, size); KUNIT_ASSERT_NOT_NULL(test, tt); err = ttm_pool_alloc(pool, tt, &simple_ctx); KUNIT_ASSERT_EQ(test, err, 0); KUNIT_ASSERT_TRUE(test, list_empty(&pt->pages)); ttm_pool_free(pool, tt); ttm_tt_fini(tt); ttm_pool_fini(pool); } static void ttm_pool_alloc_caching_mismatch(struct kunit *test) { struct ttm_tt *tt; struct ttm_pool *pool; struct ttm_pool_type *pt_pool, *pt_tt; enum ttm_caching tt_caching = ttm_uncached; enum ttm_caching pool_caching = ttm_cached; size_t size = PAGE_SIZE; unsigned int order = 0; int err; pool = ttm_pool_pre_populated(test, size, pool_caching); pt_pool = &pool->caching[pool_caching].orders[order]; pt_tt = &pool->caching[tt_caching].orders[order]; tt = ttm_tt_kunit_init(test, 0, tt_caching, size); KUNIT_ASSERT_NOT_NULL(test, tt); KUNIT_ASSERT_FALSE(test, list_empty(&pt_pool->pages)); KUNIT_ASSERT_TRUE(test, list_empty(&pt_tt->pages)); err = ttm_pool_alloc(pool, tt, &simple_ctx); KUNIT_ASSERT_EQ(test, err, 0); ttm_pool_free(pool, tt); ttm_tt_fini(tt); KUNIT_ASSERT_FALSE(test, list_empty(&pt_pool->pages)); KUNIT_ASSERT_FALSE(test, list_empty(&pt_tt->pages)); ttm_pool_fini(pool); } static void ttm_pool_alloc_order_mismatch(struct kunit *test) { struct ttm_tt *tt; struct ttm_pool *pool; struct ttm_pool_type *pt_pool, *pt_tt; enum ttm_caching caching = ttm_uncached; unsigned int order = 2; size_t fst_size = (1 << order) * PAGE_SIZE; size_t snd_size = PAGE_SIZE; int err; pool = ttm_pool_pre_populated(test, fst_size, caching); pt_pool = &pool->caching[caching].orders[order]; pt_tt = &pool->caching[caching].orders[0]; tt = ttm_tt_kunit_init(test, 0, caching, snd_size); KUNIT_ASSERT_NOT_NULL(test, tt); KUNIT_ASSERT_FALSE(test, list_empty(&pt_pool->pages)); KUNIT_ASSERT_TRUE(test, list_empty(&pt_tt->pages)); err = ttm_pool_alloc(pool, tt, &simple_ctx); KUNIT_ASSERT_EQ(test, err, 0); ttm_pool_free(pool, tt); ttm_tt_fini(tt); KUNIT_ASSERT_FALSE(test, list_empty(&pt_pool->pages)); KUNIT_ASSERT_FALSE(test, list_empty(&pt_tt->pages)); ttm_pool_fini(pool); } static void ttm_pool_free_dma_alloc(struct kunit *test) { struct ttm_pool_test_priv *priv = test->priv; struct ttm_test_devices *devs = priv->devs; struct ttm_tt *tt; struct ttm_pool *pool; struct ttm_pool_type *pt; enum ttm_caching caching = ttm_uncached; unsigned int order = 2; size_t size = (1 << order) * PAGE_SIZE; tt = ttm_tt_kunit_init(test, 0, caching, size); KUNIT_ASSERT_NOT_NULL(test, tt); pool = kunit_kzalloc(test, sizeof(*pool), GFP_KERNEL); KUNIT_ASSERT_NOT_NULL(test, pool); ttm_pool_init(pool, devs->dev, NUMA_NO_NODE, true, false); ttm_pool_alloc(pool, tt, &simple_ctx); pt = &pool->caching[caching].orders[order]; KUNIT_ASSERT_TRUE(test, list_empty(&pt->pages)); ttm_pool_free(pool, tt); ttm_tt_fini(tt); KUNIT_ASSERT_FALSE(test, list_empty(&pt->pages)); ttm_pool_fini(pool); } static void ttm_pool_free_no_dma_alloc(struct kunit *test) { struct ttm_pool_test_priv *priv = test->priv; struct ttm_test_devices *devs = priv->devs; struct ttm_tt *tt; struct ttm_pool *pool; struct ttm_pool_type *pt; enum ttm_caching caching = ttm_uncached; unsigned int order = 2; size_t size = (1 << order) * PAGE_SIZE; tt = ttm_tt_kunit_init(test, 0, caching, size); KUNIT_ASSERT_NOT_NULL(test, tt); pool = kunit_kzalloc(test, sizeof(*pool), GFP_KERNEL); KUNIT_ASSERT_NOT_NULL(test, pool); ttm_pool_init(pool, devs->dev, NUMA_NO_NODE, false, false); ttm_pool_alloc(pool, tt, &simple_ctx); pt = &pool->caching[caching].orders[order]; KUNIT_ASSERT_TRUE(test, list_is_singular(&pt->pages)); ttm_pool_free(pool, tt); ttm_tt_fini(tt); KUNIT_ASSERT_TRUE(test, list_is_singular(&pt->pages)); ttm_pool_fini(pool); } static void ttm_pool_fini_basic(struct kunit *test) { struct ttm_pool *pool; struct ttm_pool_type *pt; enum ttm_caching caching = ttm_uncached; unsigned int order = 0; size_t size = PAGE_SIZE; pool = ttm_pool_pre_populated(test, size, caching); pt = &pool->caching[caching].orders[order]; KUNIT_ASSERT_FALSE(test, list_empty(&pt->pages)); ttm_pool_fini(pool); KUNIT_ASSERT_TRUE(test, list_empty(&pt->pages)); } static struct kunit_case ttm_pool_test_cases[] = { KUNIT_CASE_PARAM(ttm_pool_alloc_basic, ttm_pool_alloc_basic_gen_params), KUNIT_CASE_PARAM(ttm_pool_alloc_basic_dma_addr, ttm_pool_alloc_basic_gen_params), KUNIT_CASE(ttm_pool_alloc_order_caching_match), KUNIT_CASE(ttm_pool_alloc_caching_mismatch), KUNIT_CASE(ttm_pool_alloc_order_mismatch), KUNIT_CASE(ttm_pool_free_dma_alloc), KUNIT_CASE(ttm_pool_free_no_dma_alloc), KUNIT_CASE(ttm_pool_fini_basic), {} }; static struct kunit_suite ttm_pool_test_suite = { .name = "ttm_pool", .init = ttm_pool_test_init, .exit = ttm_pool_test_fini, .test_cases = ttm_pool_test_cases, }; kunit_test_suites(&ttm_pool_test_suite); MODULE_LICENSE("GPL");
linux-master
drivers/gpu/drm/ttm/tests/ttm_pool_test.c
// SPDX-License-Identifier: GPL-2.0 AND MIT /* * Copyright © 2023 Intel Corporation */ #include <drm/ttm/ttm_resource.h> #include <drm/ttm/ttm_device.h> #include <drm/ttm/ttm_placement.h> #include "ttm_kunit_helpers.h" struct ttm_device_test_case { const char *description; bool use_dma_alloc; bool use_dma32; bool pools_init_expected; }; static void ttm_device_init_basic(struct kunit *test) { struct ttm_test_devices *priv = test->priv; struct ttm_device *ttm_dev; struct ttm_resource_manager *ttm_sys_man; int err; ttm_dev = kunit_kzalloc(test, sizeof(*ttm_dev), GFP_KERNEL); KUNIT_ASSERT_NOT_NULL(test, ttm_dev); err = ttm_device_kunit_init(priv, ttm_dev, false, false); KUNIT_ASSERT_EQ(test, err, 0); KUNIT_EXPECT_PTR_EQ(test, ttm_dev->funcs, &ttm_dev_funcs); KUNIT_ASSERT_NOT_NULL(test, ttm_dev->wq); KUNIT_ASSERT_NOT_NULL(test, ttm_dev->man_drv[TTM_PL_SYSTEM]); ttm_sys_man = &ttm_dev->sysman; KUNIT_ASSERT_NOT_NULL(test, ttm_sys_man); KUNIT_EXPECT_TRUE(test, ttm_sys_man->use_tt); KUNIT_EXPECT_TRUE(test, ttm_sys_man->use_type); KUNIT_ASSERT_NOT_NULL(test, ttm_sys_man->func); KUNIT_EXPECT_PTR_EQ(test, ttm_dev->dev_mapping, priv->drm->anon_inode->i_mapping); ttm_device_fini(ttm_dev); } static void ttm_device_init_multiple(struct kunit *test) { struct ttm_test_devices *priv = test->priv; struct ttm_device *ttm_devs; unsigned int i, num_dev = 3; int err; ttm_devs = kunit_kcalloc(test, num_dev, sizeof(*ttm_devs), GFP_KERNEL); KUNIT_ASSERT_NOT_NULL(test, ttm_devs); for (i = 0; i < num_dev; i++) { err = ttm_device_kunit_init(priv, &ttm_devs[i], false, false); KUNIT_ASSERT_EQ(test, err, 0); KUNIT_EXPECT_PTR_EQ(test, ttm_devs[i].dev_mapping, priv->drm->anon_inode->i_mapping); KUNIT_ASSERT_NOT_NULL(test, ttm_devs[i].wq); KUNIT_EXPECT_PTR_EQ(test, ttm_devs[i].funcs, &ttm_dev_funcs); KUNIT_ASSERT_NOT_NULL(test, ttm_devs[i].man_drv[TTM_PL_SYSTEM]); } KUNIT_ASSERT_EQ(test, list_count_nodes(&ttm_devs[0].device_list), num_dev); for (i = 0; i < num_dev; i++) ttm_device_fini(&ttm_devs[i]); } static void ttm_device_fini_basic(struct kunit *test) { struct ttm_test_devices *priv = test->priv; struct ttm_device *ttm_dev; struct ttm_resource_manager *man; int err; ttm_dev = kunit_kzalloc(test, sizeof(*ttm_dev), GFP_KERNEL); KUNIT_ASSERT_NOT_NULL(test, ttm_dev); err = ttm_device_kunit_init(priv, ttm_dev, false, false); KUNIT_ASSERT_EQ(test, err, 0); man = ttm_manager_type(ttm_dev, TTM_PL_SYSTEM); KUNIT_ASSERT_NOT_NULL(test, man); ttm_device_fini(ttm_dev); KUNIT_ASSERT_FALSE(test, man->use_type); KUNIT_ASSERT_TRUE(test, list_empty(&man->lru[0])); KUNIT_ASSERT_NULL(test, ttm_dev->man_drv[TTM_PL_SYSTEM]); } static void ttm_device_init_no_vma_man(struct kunit *test) { struct ttm_test_devices *priv = test->priv; struct drm_device *drm = priv->drm; struct ttm_device *ttm_dev; struct drm_vma_offset_manager *vma_man; int err; ttm_dev = kunit_kzalloc(test, sizeof(*ttm_dev), GFP_KERNEL); KUNIT_ASSERT_NOT_NULL(test, ttm_dev); /* Let's pretend there's no VMA manager allocated */ vma_man = drm->vma_offset_manager; drm->vma_offset_manager = NULL; err = ttm_device_kunit_init(priv, ttm_dev, false, false); KUNIT_EXPECT_EQ(test, err, -EINVAL); /* Bring the manager back for a graceful cleanup */ drm->vma_offset_manager = vma_man; } static const struct ttm_device_test_case ttm_device_cases[] = { { .description = "No DMA allocations, no DMA32 required", .use_dma_alloc = false, .use_dma32 = false, .pools_init_expected = false, }, { .description = "DMA allocations, DMA32 required", .use_dma_alloc = true, .use_dma32 = true, .pools_init_expected = true, }, { .description = "No DMA allocations, DMA32 required", .use_dma_alloc = false, .use_dma32 = true, .pools_init_expected = false, }, { .description = "DMA allocations, no DMA32 required", .use_dma_alloc = true, .use_dma32 = false, .pools_init_expected = true, }, }; static void ttm_device_case_desc(const struct ttm_device_test_case *t, char *desc) { strscpy(desc, t->description, KUNIT_PARAM_DESC_SIZE); } KUNIT_ARRAY_PARAM(ttm_device, ttm_device_cases, ttm_device_case_desc); static void ttm_device_init_pools(struct kunit *test) { struct ttm_test_devices *priv = test->priv; const struct ttm_device_test_case *params = test->param_value; struct ttm_device *ttm_dev; struct ttm_pool *pool; struct ttm_pool_type pt; int err; ttm_dev = kunit_kzalloc(test, sizeof(*ttm_dev), GFP_KERNEL); KUNIT_ASSERT_NOT_NULL(test, ttm_dev); err = ttm_device_kunit_init(priv, ttm_dev, params->use_dma_alloc, params->use_dma32); KUNIT_ASSERT_EQ(test, err, 0); pool = &ttm_dev->pool; KUNIT_ASSERT_NOT_NULL(test, pool); KUNIT_EXPECT_PTR_EQ(test, pool->dev, priv->dev); KUNIT_EXPECT_EQ(test, pool->use_dma_alloc, params->use_dma_alloc); KUNIT_EXPECT_EQ(test, pool->use_dma32, params->use_dma32); if (params->pools_init_expected) { for (int i = 0; i < TTM_NUM_CACHING_TYPES; ++i) { for (int j = 0; j <= MAX_ORDER; ++j) { pt = pool->caching[i].orders[j]; KUNIT_EXPECT_PTR_EQ(test, pt.pool, pool); KUNIT_EXPECT_EQ(test, pt.caching, i); KUNIT_EXPECT_EQ(test, pt.order, j); if (params->use_dma_alloc) KUNIT_ASSERT_FALSE(test, list_empty(&pt.pages)); } } } ttm_device_fini(ttm_dev); } static struct kunit_case ttm_device_test_cases[] = { KUNIT_CASE(ttm_device_init_basic), KUNIT_CASE(ttm_device_init_multiple), KUNIT_CASE(ttm_device_fini_basic), KUNIT_CASE(ttm_device_init_no_vma_man), KUNIT_CASE_PARAM(ttm_device_init_pools, ttm_device_gen_params), {} }; static struct kunit_suite ttm_device_test_suite = { .name = "ttm_device", .init = ttm_test_devices_init, .exit = ttm_test_devices_fini, .test_cases = ttm_device_test_cases, }; kunit_test_suites(&ttm_device_test_suite); MODULE_LICENSE("GPL");
linux-master
drivers/gpu/drm/ttm/tests/ttm_device_test.c
// SPDX-License-Identifier: GPL-2.0-only #include <linux/pci.h> #include <linux/vmalloc.h> #include <drm/drm_atomic.h> #include <drm/drm_atomic_helper.h> #include <drm/drm_drv.h> #include <drm/drm_gem_atomic_helper.h> #include <drm/drm_probe_helper.h> #include "mgag200_drv.h" static int mgag200_g200_init_pci_options(struct pci_dev *pdev) { struct device *dev = &pdev->dev; bool has_sgram; u32 option; int err; err = pci_read_config_dword(pdev, PCI_MGA_OPTION, &option); if (err != PCIBIOS_SUCCESSFUL) { dev_err(dev, "pci_read_config_dword(PCI_MGA_OPTION) failed: %d\n", err); return pcibios_err_to_errno(err); } has_sgram = !!(option & PCI_MGA_OPTION_HARDPWMSK); if (has_sgram) option = 0x4049cd21; else option = 0x40499121; return mgag200_init_pci_options(pdev, option, 0x00008000); } static void mgag200_g200_init_registers(struct mgag200_g200_device *g200) { static const u8 dacvalue[] = { MGAG200_DAC_DEFAULT(0x00, 0xc9, 0x1f, 0x04, 0x2d, 0x19) }; struct mga_device *mdev = &g200->base; size_t i; for (i = 0; i < ARRAY_SIZE(dacvalue); ++i) { if ((i <= 0x17) || (i == 0x1b) || (i == 0x1c) || ((i >= 0x1f) && (i <= 0x29)) || ((i >= 0x30) && (i <= 0x37))) continue; WREG_DAC(i, dacvalue[i]); } mgag200_init_registers(mdev); } /* * PIXPLLC */ static int mgag200_g200_pixpllc_atomic_check(struct drm_crtc *crtc, struct drm_atomic_state *new_state) { static const int post_div_max = 7; static const int in_div_min = 1; static const int in_div_max = 6; static const int feed_div_min = 7; static const int feed_div_max = 127; struct drm_device *dev = crtc->dev; struct mgag200_g200_device *g200 = to_mgag200_g200_device(dev); struct drm_crtc_state *new_crtc_state = drm_atomic_get_new_crtc_state(new_state, crtc); struct mgag200_crtc_state *new_mgag200_crtc_state = to_mgag200_crtc_state(new_crtc_state); long clock = new_crtc_state->mode.clock; struct mgag200_pll_values *pixpllc = &new_mgag200_crtc_state->pixpllc; u8 testp, testm, testn; u8 n = 0, m = 0, p, s; long f_vco; long computed; long delta, tmp_delta; long ref_clk = g200->ref_clk; long p_clk_min = g200->pclk_min; long p_clk_max = g200->pclk_max; if (clock > p_clk_max) { drm_err(dev, "Pixel Clock %ld too high\n", clock); return -EINVAL; } if (clock < p_clk_min >> 3) clock = p_clk_min >> 3; f_vco = clock; for (testp = 0; testp <= post_div_max && f_vco < p_clk_min; testp = (testp << 1) + 1, f_vco <<= 1) ; p = testp + 1; delta = clock; for (testm = in_div_min; testm <= in_div_max; testm++) { for (testn = feed_div_min; testn <= feed_div_max; testn++) { computed = ref_clk * (testn + 1) / (testm + 1); if (computed < f_vco) tmp_delta = f_vco - computed; else tmp_delta = computed - f_vco; if (tmp_delta < delta) { delta = tmp_delta; m = testm + 1; n = testn + 1; } } } f_vco = ref_clk * n / m; if (f_vco < 100000) s = 0; else if (f_vco < 140000) s = 1; else if (f_vco < 180000) s = 2; else s = 3; drm_dbg_kms(dev, "clock: %ld vco: %ld m: %d n: %d p: %d s: %d\n", clock, f_vco, m, n, p, s); pixpllc->m = m; pixpllc->n = n; pixpllc->p = p; pixpllc->s = s; return 0; } static void mgag200_g200_pixpllc_atomic_update(struct drm_crtc *crtc, struct drm_atomic_state *old_state) { struct drm_device *dev = crtc->dev; struct mga_device *mdev = to_mga_device(dev); struct drm_crtc_state *crtc_state = crtc->state; struct mgag200_crtc_state *mgag200_crtc_state = to_mgag200_crtc_state(crtc_state); struct mgag200_pll_values *pixpllc = &mgag200_crtc_state->pixpllc; unsigned int pixpllcm, pixpllcn, pixpllcp, pixpllcs; u8 xpixpllcm, xpixpllcn, xpixpllcp; pixpllcm = pixpllc->m - 1; pixpllcn = pixpllc->n - 1; pixpllcp = pixpllc->p - 1; pixpllcs = pixpllc->s; xpixpllcm = pixpllcm; xpixpllcn = pixpllcn; xpixpllcp = (pixpllcs << 3) | pixpllcp; WREG_MISC_MASKED(MGAREG_MISC_CLKSEL_MGA, MGAREG_MISC_CLKSEL_MASK); WREG_DAC(MGA1064_PIX_PLLC_M, xpixpllcm); WREG_DAC(MGA1064_PIX_PLLC_N, xpixpllcn); WREG_DAC(MGA1064_PIX_PLLC_P, xpixpllcp); } /* * Mode-setting pipeline */ static const struct drm_plane_helper_funcs mgag200_g200_primary_plane_helper_funcs = { MGAG200_PRIMARY_PLANE_HELPER_FUNCS, }; static const struct drm_plane_funcs mgag200_g200_primary_plane_funcs = { MGAG200_PRIMARY_PLANE_FUNCS, }; static const struct drm_crtc_helper_funcs mgag200_g200_crtc_helper_funcs = { MGAG200_CRTC_HELPER_FUNCS, }; static const struct drm_crtc_funcs mgag200_g200_crtc_funcs = { MGAG200_CRTC_FUNCS, }; static const struct drm_encoder_funcs mgag200_g200_dac_encoder_funcs = { MGAG200_DAC_ENCODER_FUNCS, }; static const struct drm_connector_helper_funcs mgag200_g200_vga_connector_helper_funcs = { MGAG200_VGA_CONNECTOR_HELPER_FUNCS, }; static const struct drm_connector_funcs mgag200_g200_vga_connector_funcs = { MGAG200_VGA_CONNECTOR_FUNCS, }; static int mgag200_g200_pipeline_init(struct mga_device *mdev) { struct drm_device *dev = &mdev->base; struct drm_plane *primary_plane = &mdev->primary_plane; struct drm_crtc *crtc = &mdev->crtc; struct drm_encoder *encoder = &mdev->encoder; struct mga_i2c_chan *i2c = &mdev->i2c; struct drm_connector *connector = &mdev->connector; int ret; ret = drm_universal_plane_init(dev, primary_plane, 0, &mgag200_g200_primary_plane_funcs, mgag200_primary_plane_formats, mgag200_primary_plane_formats_size, mgag200_primary_plane_fmtmods, DRM_PLANE_TYPE_PRIMARY, NULL); if (ret) { drm_err(dev, "drm_universal_plane_init() failed: %d\n", ret); return ret; } drm_plane_helper_add(primary_plane, &mgag200_g200_primary_plane_helper_funcs); drm_plane_enable_fb_damage_clips(primary_plane); ret = drm_crtc_init_with_planes(dev, crtc, primary_plane, NULL, &mgag200_g200_crtc_funcs, NULL); if (ret) { drm_err(dev, "drm_crtc_init_with_planes() failed: %d\n", ret); return ret; } drm_crtc_helper_add(crtc, &mgag200_g200_crtc_helper_funcs); /* FIXME: legacy gamma tables, but atomic gamma doesn't work without */ drm_mode_crtc_set_gamma_size(crtc, MGAG200_LUT_SIZE); drm_crtc_enable_color_mgmt(crtc, 0, false, MGAG200_LUT_SIZE); encoder->possible_crtcs = drm_crtc_mask(crtc); ret = drm_encoder_init(dev, encoder, &mgag200_g200_dac_encoder_funcs, DRM_MODE_ENCODER_DAC, NULL); if (ret) { drm_err(dev, "drm_encoder_init() failed: %d\n", ret); return ret; } ret = mgag200_i2c_init(mdev, i2c); if (ret) { drm_err(dev, "failed to add DDC bus: %d\n", ret); return ret; } ret = drm_connector_init_with_ddc(dev, connector, &mgag200_g200_vga_connector_funcs, DRM_MODE_CONNECTOR_VGA, &i2c->adapter); if (ret) { drm_err(dev, "drm_connector_init_with_ddc() failed: %d\n", ret); return ret; } drm_connector_helper_add(connector, &mgag200_g200_vga_connector_helper_funcs); ret = drm_connector_attach_encoder(connector, encoder); if (ret) { drm_err(dev, "drm_connector_attach_encoder() failed: %d\n", ret); return ret; } return 0; } /* * DRM Device */ static const struct mgag200_device_info mgag200_g200_device_info = MGAG200_DEVICE_INFO_INIT(2048, 2048, 0, false, 1, 3, false); static void mgag200_g200_interpret_bios(struct mgag200_g200_device *g200, const unsigned char *bios, size_t size) { static const char matrox[] = {'M', 'A', 'T', 'R', 'O', 'X'}; static const unsigned int expected_length[6] = { 0, 64, 64, 64, 128, 128 }; struct mga_device *mdev = &g200->base; struct drm_device *dev = &mdev->base; const unsigned char *pins; unsigned int pins_len, version; int offset; int tmp; /* Test for MATROX string. */ if (size < 45 + sizeof(matrox)) return; if (memcmp(&bios[45], matrox, sizeof(matrox)) != 0) return; /* Get the PInS offset. */ if (size < MGA_BIOS_OFFSET + 2) return; offset = (bios[MGA_BIOS_OFFSET + 1] << 8) | bios[MGA_BIOS_OFFSET]; /* Get PInS data structure. */ if (size < offset + 6) return; pins = bios + offset; if (pins[0] == 0x2e && pins[1] == 0x41) { version = pins[5]; pins_len = pins[2]; } else { version = 1; pins_len = pins[0] + (pins[1] << 8); } if (version < 1 || version > 5) { drm_warn(dev, "Unknown BIOS PInS version: %d\n", version); return; } if (pins_len != expected_length[version]) { drm_warn(dev, "Unexpected BIOS PInS size: %d expected: %d\n", pins_len, expected_length[version]); return; } if (size < offset + pins_len) return; drm_dbg_kms(dev, "MATROX BIOS PInS version %d size: %d found\n", version, pins_len); /* Extract the clock values */ switch (version) { case 1: tmp = pins[24] + (pins[25] << 8); if (tmp) g200->pclk_max = tmp * 10; break; case 2: if (pins[41] != 0xff) g200->pclk_max = (pins[41] + 100) * 1000; break; case 3: if (pins[36] != 0xff) g200->pclk_max = (pins[36] + 100) * 1000; if (pins[52] & 0x20) g200->ref_clk = 14318; break; case 4: if (pins[39] != 0xff) g200->pclk_max = pins[39] * 4 * 1000; if (pins[92] & 0x01) g200->ref_clk = 14318; break; case 5: tmp = pins[4] ? 8000 : 6000; if (pins[123] != 0xff) g200->pclk_min = pins[123] * tmp; if (pins[38] != 0xff) g200->pclk_max = pins[38] * tmp; if (pins[110] & 0x01) g200->ref_clk = 14318; break; default: break; } } static void mgag200_g200_init_refclk(struct mgag200_g200_device *g200) { struct mga_device *mdev = &g200->base; struct drm_device *dev = &mdev->base; struct pci_dev *pdev = to_pci_dev(dev->dev); unsigned char __iomem *rom; unsigned char *bios; size_t size; g200->pclk_min = 50000; g200->pclk_max = 230000; g200->ref_clk = 27050; rom = pci_map_rom(pdev, &size); if (!rom) return; bios = vmalloc(size); if (!bios) goto out; memcpy_fromio(bios, rom, size); if (size != 0 && bios[0] == 0x55 && bios[1] == 0xaa) mgag200_g200_interpret_bios(g200, bios, size); drm_dbg_kms(dev, "pclk_min: %ld pclk_max: %ld ref_clk: %ld\n", g200->pclk_min, g200->pclk_max, g200->ref_clk); vfree(bios); out: pci_unmap_rom(pdev, rom); } static const struct mgag200_device_funcs mgag200_g200_device_funcs = { .pixpllc_atomic_check = mgag200_g200_pixpllc_atomic_check, .pixpllc_atomic_update = mgag200_g200_pixpllc_atomic_update, }; struct mga_device *mgag200_g200_device_create(struct pci_dev *pdev, const struct drm_driver *drv) { struct mgag200_g200_device *g200; struct mga_device *mdev; struct drm_device *dev; resource_size_t vram_available; int ret; g200 = devm_drm_dev_alloc(&pdev->dev, drv, struct mgag200_g200_device, base.base); if (IS_ERR(g200)) return ERR_CAST(g200); mdev = &g200->base; dev = &mdev->base; pci_set_drvdata(pdev, dev); ret = mgag200_g200_init_pci_options(pdev); if (ret) return ERR_PTR(ret); ret = mgag200_device_preinit(mdev); if (ret) return ERR_PTR(ret); mgag200_g200_init_refclk(g200); ret = mgag200_device_init(mdev, &mgag200_g200_device_info, &mgag200_g200_device_funcs); if (ret) return ERR_PTR(ret); mgag200_g200_init_registers(g200); vram_available = mgag200_device_probe_vram(mdev); ret = mgag200_mode_config_init(mdev, vram_available); if (ret) return ERR_PTR(ret); ret = mgag200_g200_pipeline_init(mdev); if (ret) return ERR_PTR(ret); drm_mode_config_reset(dev); return mdev; }
linux-master
drivers/gpu/drm/mgag200/mgag200_g200.c
// SPDX-License-Identifier: GPL-2.0-only #include <linux/pci.h> #include <drm/drm_atomic.h> #include <drm/drm_atomic_helper.h> #include <drm/drm_drv.h> #include <drm/drm_gem_atomic_helper.h> #include <drm/drm_probe_helper.h> #include "mgag200_drv.h" /* * PIXPLLC */ static int mgag200_g200eh3_pixpllc_atomic_check(struct drm_crtc *crtc, struct drm_atomic_state *new_state) { static const unsigned int vcomax = 3000000; static const unsigned int vcomin = 1500000; static const unsigned int pllreffreq = 25000; struct drm_crtc_state *new_crtc_state = drm_atomic_get_new_crtc_state(new_state, crtc); struct mgag200_crtc_state *new_mgag200_crtc_state = to_mgag200_crtc_state(new_crtc_state); long clock = new_crtc_state->mode.clock; struct mgag200_pll_values *pixpllc = &new_mgag200_crtc_state->pixpllc; unsigned int delta, tmpdelta; unsigned int testp, testm, testn; unsigned int p, m, n, s; unsigned int computed; m = n = p = s = 0; delta = 0xffffffff; testp = 0; for (testm = 150; testm >= 6; testm--) { if (clock * testm > vcomax) continue; if (clock * testm < vcomin) continue; for (testn = 120; testn >= 60; testn--) { computed = (pllreffreq * testn) / testm; if (computed > clock) tmpdelta = computed - clock; else tmpdelta = clock - computed; if (tmpdelta < delta) { delta = tmpdelta; n = testn + 1; m = testm + 1; p = testp + 1; } if (delta == 0) break; } if (delta == 0) break; } pixpllc->m = m; pixpllc->n = n; pixpllc->p = p; pixpllc->s = s; return 0; } /* * Mode-setting pipeline */ static const struct drm_plane_helper_funcs mgag200_g200eh3_primary_plane_helper_funcs = { MGAG200_PRIMARY_PLANE_HELPER_FUNCS, }; static const struct drm_plane_funcs mgag200_g200eh3_primary_plane_funcs = { MGAG200_PRIMARY_PLANE_FUNCS, }; static const struct drm_crtc_helper_funcs mgag200_g200eh3_crtc_helper_funcs = { MGAG200_CRTC_HELPER_FUNCS, }; static const struct drm_crtc_funcs mgag200_g200eh3_crtc_funcs = { MGAG200_CRTC_FUNCS, }; static const struct drm_encoder_funcs mgag200_g200eh3_dac_encoder_funcs = { MGAG200_DAC_ENCODER_FUNCS, }; static const struct drm_connector_helper_funcs mgag200_g200eh3_vga_connector_helper_funcs = { MGAG200_VGA_CONNECTOR_HELPER_FUNCS, }; static const struct drm_connector_funcs mgag200_g200eh3_vga_connector_funcs = { MGAG200_VGA_CONNECTOR_FUNCS, }; static int mgag200_g200eh3_pipeline_init(struct mga_device *mdev) { struct drm_device *dev = &mdev->base; struct drm_plane *primary_plane = &mdev->primary_plane; struct drm_crtc *crtc = &mdev->crtc; struct drm_encoder *encoder = &mdev->encoder; struct mga_i2c_chan *i2c = &mdev->i2c; struct drm_connector *connector = &mdev->connector; int ret; ret = drm_universal_plane_init(dev, primary_plane, 0, &mgag200_g200eh3_primary_plane_funcs, mgag200_primary_plane_formats, mgag200_primary_plane_formats_size, mgag200_primary_plane_fmtmods, DRM_PLANE_TYPE_PRIMARY, NULL); if (ret) { drm_err(dev, "drm_universal_plane_init() failed: %d\n", ret); return ret; } drm_plane_helper_add(primary_plane, &mgag200_g200eh3_primary_plane_helper_funcs); drm_plane_enable_fb_damage_clips(primary_plane); ret = drm_crtc_init_with_planes(dev, crtc, primary_plane, NULL, &mgag200_g200eh3_crtc_funcs, NULL); if (ret) { drm_err(dev, "drm_crtc_init_with_planes() failed: %d\n", ret); return ret; } drm_crtc_helper_add(crtc, &mgag200_g200eh3_crtc_helper_funcs); /* FIXME: legacy gamma tables, but atomic gamma doesn't work without */ drm_mode_crtc_set_gamma_size(crtc, MGAG200_LUT_SIZE); drm_crtc_enable_color_mgmt(crtc, 0, false, MGAG200_LUT_SIZE); encoder->possible_crtcs = drm_crtc_mask(crtc); ret = drm_encoder_init(dev, encoder, &mgag200_g200eh3_dac_encoder_funcs, DRM_MODE_ENCODER_DAC, NULL); if (ret) { drm_err(dev, "drm_encoder_init() failed: %d\n", ret); return ret; } ret = mgag200_i2c_init(mdev, i2c); if (ret) { drm_err(dev, "failed to add DDC bus: %d\n", ret); return ret; } ret = drm_connector_init_with_ddc(dev, connector, &mgag200_g200eh3_vga_connector_funcs, DRM_MODE_CONNECTOR_VGA, &i2c->adapter); if (ret) { drm_err(dev, "drm_connector_init_with_ddc() failed: %d\n", ret); return ret; } drm_connector_helper_add(connector, &mgag200_g200eh3_vga_connector_helper_funcs); ret = drm_connector_attach_encoder(connector, encoder); if (ret) { drm_err(dev, "drm_connector_attach_encoder() failed: %d\n", ret); return ret; } return 0; } /* * DRM device */ static const struct mgag200_device_info mgag200_g200eh3_device_info = MGAG200_DEVICE_INFO_INIT(2048, 2048, 0, false, 1, 0, false); static const struct mgag200_device_funcs mgag200_g200eh3_device_funcs = { .pixpllc_atomic_check = mgag200_g200eh3_pixpllc_atomic_check, .pixpllc_atomic_update = mgag200_g200eh_pixpllc_atomic_update, // same as G200EH }; struct mga_device *mgag200_g200eh3_device_create(struct pci_dev *pdev, const struct drm_driver *drv) { struct mga_device *mdev; struct drm_device *dev; resource_size_t vram_available; int ret; mdev = devm_drm_dev_alloc(&pdev->dev, drv, struct mga_device, base); if (IS_ERR(mdev)) return mdev; dev = &mdev->base; pci_set_drvdata(pdev, dev); ret = mgag200_init_pci_options(pdev, 0x00000120, 0x0000b000); if (ret) return ERR_PTR(ret); ret = mgag200_device_preinit(mdev); if (ret) return ERR_PTR(ret); ret = mgag200_device_init(mdev, &mgag200_g200eh3_device_info, &mgag200_g200eh3_device_funcs); if (ret) return ERR_PTR(ret); mgag200_g200eh_init_registers(mdev); // same as G200EH vram_available = mgag200_device_probe_vram(mdev); ret = mgag200_mode_config_init(mdev, vram_available); if (ret) return ERR_PTR(ret); ret = mgag200_g200eh3_pipeline_init(mdev); if (ret) return ERR_PTR(ret); drm_mode_config_reset(dev); return mdev; }
linux-master
drivers/gpu/drm/mgag200/mgag200_g200eh3.c
// SPDX-License-Identifier: GPL-2.0-only #include <linux/delay.h> #include "mgag200_drv.h" void mgag200_bmc_disable_vidrst(struct mga_device *mdev) { u8 tmp; int iter_max; /* * 1 - The first step is to inform the BMC of an upcoming mode * change. We are putting the misc<0> to output. */ WREG8(DAC_INDEX, MGA1064_GEN_IO_CTL); tmp = RREG8(DAC_DATA); tmp |= 0x10; WREG_DAC(MGA1064_GEN_IO_CTL, tmp); /* we are putting a 1 on the misc<0> line */ WREG8(DAC_INDEX, MGA1064_GEN_IO_DATA); tmp = RREG8(DAC_DATA); tmp |= 0x10; WREG_DAC(MGA1064_GEN_IO_DATA, tmp); /* * 2- Second step to mask any further scan request. This is * done by asserting the remfreqmsk bit (XSPAREREG<7>) */ WREG8(DAC_INDEX, MGA1064_SPAREREG); tmp = RREG8(DAC_DATA); tmp |= 0x80; WREG_DAC(MGA1064_SPAREREG, tmp); /* * 3a- The third step is to verify if there is an active scan. * We are waiting for a 0 on remhsyncsts <XSPAREREG<0>). */ iter_max = 300; while (!(tmp & 0x1) && iter_max) { WREG8(DAC_INDEX, MGA1064_SPAREREG); tmp = RREG8(DAC_DATA); udelay(1000); iter_max--; } /* * 3b- This step occurs only if the remove is actually * scanning. We are waiting for the end of the frame which is * a 1 on remvsyncsts (XSPAREREG<1>) */ if (iter_max) { iter_max = 300; while ((tmp & 0x2) && iter_max) { WREG8(DAC_INDEX, MGA1064_SPAREREG); tmp = RREG8(DAC_DATA); udelay(1000); iter_max--; } } } void mgag200_bmc_enable_vidrst(struct mga_device *mdev) { u8 tmp; /* Ensure that the vrsten and hrsten are set */ WREG8(MGAREG_CRTCEXT_INDEX, 1); tmp = RREG8(MGAREG_CRTCEXT_DATA); WREG8(MGAREG_CRTCEXT_DATA, tmp | 0x88); /* Assert rstlvl2 */ WREG8(DAC_INDEX, MGA1064_REMHEADCTL2); tmp = RREG8(DAC_DATA); tmp |= 0x8; WREG8(DAC_DATA, tmp); udelay(10); /* Deassert rstlvl2 */ tmp &= ~0x08; WREG8(DAC_INDEX, MGA1064_REMHEADCTL2); WREG8(DAC_DATA, tmp); /* Remove mask of scan request */ WREG8(DAC_INDEX, MGA1064_SPAREREG); tmp = RREG8(DAC_DATA); tmp &= ~0x80; WREG8(DAC_DATA, tmp); /* Put back a 0 on the misc<0> line */ WREG8(DAC_INDEX, MGA1064_GEN_IO_DATA); tmp = RREG8(DAC_DATA); tmp &= ~0x10; WREG_DAC(MGA1064_GEN_IO_DATA, tmp); }
linux-master
drivers/gpu/drm/mgag200/mgag200_bmc.c
// SPDX-License-Identifier: GPL-2.0-only #include <linux/delay.h> #include <linux/pci.h> #include <drm/drm_atomic.h> #include <drm/drm_atomic_helper.h> #include <drm/drm_drv.h> #include <drm/drm_gem_atomic_helper.h> #include <drm/drm_probe_helper.h> #include "mgag200_drv.h" static void mgag200_g200ev_init_registers(struct mga_device *mdev) { static const u8 dacvalue[] = { MGAG200_DAC_DEFAULT(0x00, MGA1064_PIX_CLK_CTL_SEL_PLL, MGA1064_MISC_CTL_VGA8 | MGA1064_MISC_CTL_DAC_RAM_CS, 0x00, 0x00, 0x00) }; size_t i; for (i = 0; i < ARRAY_SIZE(dacvalue); i++) { if ((i <= 0x17) || (i == 0x1b) || (i == 0x1c) || ((i >= 0x1f) && (i <= 0x29)) || ((i >= 0x30) && (i <= 0x37)) || ((i >= 0x44) && (i <= 0x4e))) continue; WREG_DAC(i, dacvalue[i]); } mgag200_init_registers(mdev); } static void mgag200_g200ev_set_hiprilvl(struct mga_device *mdev) { WREG_ECRT(0x06, 0x00); } /* * PIXPLLC */ static int mgag200_g200ev_pixpllc_atomic_check(struct drm_crtc *crtc, struct drm_atomic_state *new_state) { static const unsigned int vcomax = 550000; static const unsigned int vcomin = 150000; static const unsigned int pllreffreq = 50000; struct drm_crtc_state *new_crtc_state = drm_atomic_get_new_crtc_state(new_state, crtc); struct mgag200_crtc_state *new_mgag200_crtc_state = to_mgag200_crtc_state(new_crtc_state); long clock = new_crtc_state->mode.clock; struct mgag200_pll_values *pixpllc = &new_mgag200_crtc_state->pixpllc; unsigned int delta, tmpdelta; unsigned int testp, testm, testn; unsigned int p, m, n, s; unsigned int computed; m = n = p = s = 0; delta = 0xffffffff; for (testp = 16; testp > 0; testp--) { if (clock * testp > vcomax) continue; if (clock * testp < vcomin) continue; for (testn = 1; testn < 257; testn++) { for (testm = 1; testm < 17; testm++) { computed = (pllreffreq * testn) / (testm * testp); if (computed > clock) tmpdelta = computed - clock; else tmpdelta = clock - computed; if (tmpdelta < delta) { delta = tmpdelta; n = testn; m = testm; p = testp; } } } } pixpllc->m = m; pixpllc->n = n; pixpllc->p = p; pixpllc->s = s; return 0; } static void mgag200_g200ev_pixpllc_atomic_update(struct drm_crtc *crtc, struct drm_atomic_state *old_state) { struct drm_device *dev = crtc->dev; struct mga_device *mdev = to_mga_device(dev); struct drm_crtc_state *crtc_state = crtc->state; struct mgag200_crtc_state *mgag200_crtc_state = to_mgag200_crtc_state(crtc_state); struct mgag200_pll_values *pixpllc = &mgag200_crtc_state->pixpllc; unsigned int pixpllcm, pixpllcn, pixpllcp, pixpllcs; u8 xpixpllcm, xpixpllcn, xpixpllcp, tmp; pixpllcm = pixpllc->m - 1; pixpllcn = pixpllc->n - 1; pixpllcp = pixpllc->p - 1; pixpllcs = pixpllc->s; xpixpllcm = pixpllcm; xpixpllcn = pixpllcn; xpixpllcp = (pixpllcs << 3) | pixpllcp; WREG_MISC_MASKED(MGAREG_MISC_CLKSEL_MGA, MGAREG_MISC_CLKSEL_MASK); WREG8(DAC_INDEX, MGA1064_PIX_CLK_CTL); tmp = RREG8(DAC_DATA); tmp |= MGA1064_PIX_CLK_CTL_CLK_DIS; WREG8(DAC_DATA, tmp); tmp = RREG8(MGAREG_MEM_MISC_READ); tmp |= 0x3 << 2; WREG8(MGAREG_MEM_MISC_WRITE, tmp); WREG8(DAC_INDEX, MGA1064_PIX_PLL_STAT); tmp = RREG8(DAC_DATA); WREG8(DAC_DATA, tmp & ~0x40); WREG8(DAC_INDEX, MGA1064_PIX_CLK_CTL); tmp = RREG8(DAC_DATA); tmp |= MGA1064_PIX_CLK_CTL_CLK_POW_DOWN; WREG8(DAC_DATA, tmp); WREG_DAC(MGA1064_EV_PIX_PLLC_M, xpixpllcm); WREG_DAC(MGA1064_EV_PIX_PLLC_N, xpixpllcn); WREG_DAC(MGA1064_EV_PIX_PLLC_P, xpixpllcp); udelay(50); WREG8(DAC_INDEX, MGA1064_PIX_CLK_CTL); tmp = RREG8(DAC_DATA); tmp &= ~MGA1064_PIX_CLK_CTL_CLK_POW_DOWN; WREG8(DAC_DATA, tmp); udelay(500); WREG8(DAC_INDEX, MGA1064_PIX_CLK_CTL); tmp = RREG8(DAC_DATA); tmp &= ~MGA1064_PIX_CLK_CTL_SEL_MSK; tmp |= MGA1064_PIX_CLK_CTL_SEL_PLL; WREG8(DAC_DATA, tmp); WREG8(DAC_INDEX, MGA1064_PIX_PLL_STAT); tmp = RREG8(DAC_DATA); WREG8(DAC_DATA, tmp | 0x40); tmp = RREG8(MGAREG_MEM_MISC_READ); tmp |= (0x3 << 2); WREG8(MGAREG_MEM_MISC_WRITE, tmp); WREG8(DAC_INDEX, MGA1064_PIX_CLK_CTL); tmp = RREG8(DAC_DATA); tmp &= ~MGA1064_PIX_CLK_CTL_CLK_DIS; WREG8(DAC_DATA, tmp); } /* * Mode-setting pipeline */ static const struct drm_plane_helper_funcs mgag200_g200ev_primary_plane_helper_funcs = { MGAG200_PRIMARY_PLANE_HELPER_FUNCS, }; static const struct drm_plane_funcs mgag200_g200ev_primary_plane_funcs = { MGAG200_PRIMARY_PLANE_FUNCS, }; static void mgag200_g200ev_crtc_helper_atomic_enable(struct drm_crtc *crtc, struct drm_atomic_state *old_state) { struct drm_device *dev = crtc->dev; struct mga_device *mdev = to_mga_device(dev); const struct mgag200_device_funcs *funcs = mdev->funcs; struct drm_crtc_state *crtc_state = crtc->state; struct drm_display_mode *adjusted_mode = &crtc_state->adjusted_mode; struct mgag200_crtc_state *mgag200_crtc_state = to_mgag200_crtc_state(crtc_state); const struct drm_format_info *format = mgag200_crtc_state->format; if (funcs->disable_vidrst) funcs->disable_vidrst(mdev); mgag200_set_format_regs(mdev, format); mgag200_set_mode_regs(mdev, adjusted_mode); if (funcs->pixpllc_atomic_update) funcs->pixpllc_atomic_update(crtc, old_state); mgag200_g200ev_set_hiprilvl(mdev); mgag200_enable_display(mdev); if (funcs->enable_vidrst) funcs->enable_vidrst(mdev); } static const struct drm_crtc_helper_funcs mgag200_g200ev_crtc_helper_funcs = { .mode_valid = mgag200_crtc_helper_mode_valid, .atomic_check = mgag200_crtc_helper_atomic_check, .atomic_flush = mgag200_crtc_helper_atomic_flush, .atomic_enable = mgag200_g200ev_crtc_helper_atomic_enable, .atomic_disable = mgag200_crtc_helper_atomic_disable }; static const struct drm_crtc_funcs mgag200_g200ev_crtc_funcs = { MGAG200_CRTC_FUNCS, }; static const struct drm_encoder_funcs mgag200_g200ev_dac_encoder_funcs = { MGAG200_DAC_ENCODER_FUNCS, }; static const struct drm_connector_helper_funcs mgag200_g200ev_vga_connector_helper_funcs = { MGAG200_VGA_CONNECTOR_HELPER_FUNCS, }; static const struct drm_connector_funcs mgag200_g200ev_vga_connector_funcs = { MGAG200_VGA_CONNECTOR_FUNCS, }; static int mgag200_g200ev_pipeline_init(struct mga_device *mdev) { struct drm_device *dev = &mdev->base; struct drm_plane *primary_plane = &mdev->primary_plane; struct drm_crtc *crtc = &mdev->crtc; struct drm_encoder *encoder = &mdev->encoder; struct mga_i2c_chan *i2c = &mdev->i2c; struct drm_connector *connector = &mdev->connector; int ret; ret = drm_universal_plane_init(dev, primary_plane, 0, &mgag200_g200ev_primary_plane_funcs, mgag200_primary_plane_formats, mgag200_primary_plane_formats_size, mgag200_primary_plane_fmtmods, DRM_PLANE_TYPE_PRIMARY, NULL); if (ret) { drm_err(dev, "drm_universal_plane_init() failed: %d\n", ret); return ret; } drm_plane_helper_add(primary_plane, &mgag200_g200ev_primary_plane_helper_funcs); drm_plane_enable_fb_damage_clips(primary_plane); ret = drm_crtc_init_with_planes(dev, crtc, primary_plane, NULL, &mgag200_g200ev_crtc_funcs, NULL); if (ret) { drm_err(dev, "drm_crtc_init_with_planes() failed: %d\n", ret); return ret; } drm_crtc_helper_add(crtc, &mgag200_g200ev_crtc_helper_funcs); /* FIXME: legacy gamma tables, but atomic gamma doesn't work without */ drm_mode_crtc_set_gamma_size(crtc, MGAG200_LUT_SIZE); drm_crtc_enable_color_mgmt(crtc, 0, false, MGAG200_LUT_SIZE); encoder->possible_crtcs = drm_crtc_mask(crtc); ret = drm_encoder_init(dev, encoder, &mgag200_g200ev_dac_encoder_funcs, DRM_MODE_ENCODER_DAC, NULL); if (ret) { drm_err(dev, "drm_encoder_init() failed: %d\n", ret); return ret; } ret = mgag200_i2c_init(mdev, i2c); if (ret) { drm_err(dev, "failed to add DDC bus: %d\n", ret); return ret; } ret = drm_connector_init_with_ddc(dev, connector, &mgag200_g200ev_vga_connector_funcs, DRM_MODE_CONNECTOR_VGA, &i2c->adapter); if (ret) { drm_err(dev, "drm_connector_init_with_ddc() failed: %d\n", ret); return ret; } drm_connector_helper_add(connector, &mgag200_g200ev_vga_connector_helper_funcs); ret = drm_connector_attach_encoder(connector, encoder); if (ret) { drm_err(dev, "drm_connector_attach_encoder() failed: %d\n", ret); return ret; } return 0; } /* * DRM device */ static const struct mgag200_device_info mgag200_g200ev_device_info = MGAG200_DEVICE_INFO_INIT(2048, 2048, 32700, false, 0, 1, false); static const struct mgag200_device_funcs mgag200_g200ev_device_funcs = { .pixpllc_atomic_check = mgag200_g200ev_pixpllc_atomic_check, .pixpllc_atomic_update = mgag200_g200ev_pixpllc_atomic_update, }; struct mga_device *mgag200_g200ev_device_create(struct pci_dev *pdev, const struct drm_driver *drv) { struct mga_device *mdev; struct drm_device *dev; resource_size_t vram_available; int ret; mdev = devm_drm_dev_alloc(&pdev->dev, drv, struct mga_device, base); if (IS_ERR(mdev)) return mdev; dev = &mdev->base; pci_set_drvdata(pdev, dev); ret = mgag200_init_pci_options(pdev, 0x00000120, 0x0000b000); if (ret) return ERR_PTR(ret); ret = mgag200_device_preinit(mdev); if (ret) return ERR_PTR(ret); ret = mgag200_device_init(mdev, &mgag200_g200ev_device_info, &mgag200_g200ev_device_funcs); if (ret) return ERR_PTR(ret); mgag200_g200ev_init_registers(mdev); vram_available = mgag200_device_probe_vram(mdev); ret = mgag200_mode_config_init(mdev, vram_available); if (ret) return ERR_PTR(ret); ret = mgag200_g200ev_pipeline_init(mdev); if (ret) return ERR_PTR(ret); drm_mode_config_reset(dev); return mdev; }
linux-master
drivers/gpu/drm/mgag200/mgag200_g200ev.c
// SPDX-License-Identifier: GPL-2.0-only #include <linux/delay.h> #include <linux/pci.h> #include <drm/drm_atomic.h> #include <drm/drm_atomic_helper.h> #include <drm/drm_drv.h> #include <drm/drm_gem_atomic_helper.h> #include <drm/drm_probe_helper.h> #include "mgag200_drv.h" static void mgag200_g200er_init_registers(struct mga_device *mdev) { static const u8 dacvalue[] = { MGAG200_DAC_DEFAULT(0x00, 0xc9, 0x1f, 0x00, 0x00, 0x00) }; size_t i; for (i = 0; i < ARRAY_SIZE(dacvalue); i++) { if ((i <= 0x17) || (i == 0x1b) || (i == 0x1c) || ((i >= 0x1f) && (i <= 0x29)) || ((i >= 0x30) && (i <= 0x37))) continue; WREG_DAC(i, dacvalue[i]); } WREG_DAC(0x90, 0); /* G200ER specific */ mgag200_init_registers(mdev); WREG_ECRT(0x24, 0x5); /* G200ER specific */ } static void mgag200_g200er_reset_tagfifo(struct mga_device *mdev) { static const uint32_t RESET_FLAG = 0x00200000; /* undocumented magic value */ u32 memctl; memctl = RREG32(MGAREG_MEMCTL); memctl |= RESET_FLAG; WREG32(MGAREG_MEMCTL, memctl); udelay(1000); memctl &= ~RESET_FLAG; WREG32(MGAREG_MEMCTL, memctl); } /* * PIXPLLC */ static int mgag200_g200er_pixpllc_atomic_check(struct drm_crtc *crtc, struct drm_atomic_state *new_state) { static const unsigned int vcomax = 1488000; static const unsigned int vcomin = 1056000; static const unsigned int pllreffreq = 48000; static const unsigned int m_div_val[] = { 1, 2, 4, 8 }; struct drm_crtc_state *new_crtc_state = drm_atomic_get_new_crtc_state(new_state, crtc); struct mgag200_crtc_state *new_mgag200_crtc_state = to_mgag200_crtc_state(new_crtc_state); long clock = new_crtc_state->mode.clock; struct mgag200_pll_values *pixpllc = &new_mgag200_crtc_state->pixpllc; unsigned int delta, tmpdelta; int testr, testn, testm, testo; unsigned int p, m, n, s; unsigned int computed, vco; m = n = p = s = 0; delta = 0xffffffff; for (testr = 0; testr < 4; testr++) { if (delta == 0) break; for (testn = 5; testn < 129; testn++) { if (delta == 0) break; for (testm = 3; testm >= 0; testm--) { if (delta == 0) break; for (testo = 5; testo < 33; testo++) { vco = pllreffreq * (testn + 1) / (testr + 1); if (vco < vcomin) continue; if (vco > vcomax) continue; computed = vco / (m_div_val[testm] * (testo + 1)); if (computed > clock) tmpdelta = computed - clock; else tmpdelta = clock - computed; if (tmpdelta < delta) { delta = tmpdelta; m = (testm | (testo << 3)) + 1; n = testn + 1; p = testr + 1; s = testr; } } } } } pixpllc->m = m; pixpllc->n = n; pixpllc->p = p; pixpllc->s = s; return 0; } static void mgag200_g200er_pixpllc_atomic_update(struct drm_crtc *crtc, struct drm_atomic_state *old_state) { struct drm_device *dev = crtc->dev; struct mga_device *mdev = to_mga_device(dev); struct drm_crtc_state *crtc_state = crtc->state; struct mgag200_crtc_state *mgag200_crtc_state = to_mgag200_crtc_state(crtc_state); struct mgag200_pll_values *pixpllc = &mgag200_crtc_state->pixpllc; unsigned int pixpllcm, pixpllcn, pixpllcp, pixpllcs; u8 xpixpllcm, xpixpllcn, xpixpllcp, tmp; pixpllcm = pixpllc->m - 1; pixpllcn = pixpllc->n - 1; pixpllcp = pixpllc->p - 1; pixpllcs = pixpllc->s; xpixpllcm = pixpllcm; xpixpllcn = pixpllcn; xpixpllcp = (pixpllcs << 3) | pixpllcp; WREG_MISC_MASKED(MGAREG_MISC_CLKSEL_MGA, MGAREG_MISC_CLKSEL_MASK); WREG8(DAC_INDEX, MGA1064_PIX_CLK_CTL); tmp = RREG8(DAC_DATA); tmp |= MGA1064_PIX_CLK_CTL_CLK_DIS; WREG8(DAC_DATA, tmp); WREG8(DAC_INDEX, MGA1064_REMHEADCTL); tmp = RREG8(DAC_DATA); tmp |= MGA1064_REMHEADCTL_CLKDIS; WREG8(DAC_DATA, tmp); tmp = RREG8(MGAREG_MEM_MISC_READ); tmp |= (0x3<<2) | 0xc0; WREG8(MGAREG_MEM_MISC_WRITE, tmp); WREG8(DAC_INDEX, MGA1064_PIX_CLK_CTL); tmp = RREG8(DAC_DATA); tmp &= ~MGA1064_PIX_CLK_CTL_CLK_DIS; tmp |= MGA1064_PIX_CLK_CTL_CLK_POW_DOWN; WREG8(DAC_DATA, tmp); udelay(500); WREG_DAC(MGA1064_ER_PIX_PLLC_N, xpixpllcn); WREG_DAC(MGA1064_ER_PIX_PLLC_M, xpixpllcm); WREG_DAC(MGA1064_ER_PIX_PLLC_P, xpixpllcp); udelay(50); } /* * Mode-setting pipeline */ static const struct drm_plane_helper_funcs mgag200_g200er_primary_plane_helper_funcs = { MGAG200_PRIMARY_PLANE_HELPER_FUNCS, }; static const struct drm_plane_funcs mgag200_g200er_primary_plane_funcs = { MGAG200_PRIMARY_PLANE_FUNCS, }; static void mgag200_g200er_crtc_helper_atomic_enable(struct drm_crtc *crtc, struct drm_atomic_state *old_state) { struct drm_device *dev = crtc->dev; struct mga_device *mdev = to_mga_device(dev); const struct mgag200_device_funcs *funcs = mdev->funcs; struct drm_crtc_state *crtc_state = crtc->state; struct drm_display_mode *adjusted_mode = &crtc_state->adjusted_mode; struct mgag200_crtc_state *mgag200_crtc_state = to_mgag200_crtc_state(crtc_state); const struct drm_format_info *format = mgag200_crtc_state->format; if (funcs->disable_vidrst) funcs->disable_vidrst(mdev); mgag200_set_format_regs(mdev, format); mgag200_set_mode_regs(mdev, adjusted_mode); if (funcs->pixpllc_atomic_update) funcs->pixpllc_atomic_update(crtc, old_state); mgag200_g200er_reset_tagfifo(mdev); mgag200_enable_display(mdev); if (funcs->enable_vidrst) funcs->enable_vidrst(mdev); } static const struct drm_crtc_helper_funcs mgag200_g200er_crtc_helper_funcs = { .mode_valid = mgag200_crtc_helper_mode_valid, .atomic_check = mgag200_crtc_helper_atomic_check, .atomic_flush = mgag200_crtc_helper_atomic_flush, .atomic_enable = mgag200_g200er_crtc_helper_atomic_enable, .atomic_disable = mgag200_crtc_helper_atomic_disable }; static const struct drm_crtc_funcs mgag200_g200er_crtc_funcs = { MGAG200_CRTC_FUNCS, }; static const struct drm_encoder_funcs mgag200_g200er_dac_encoder_funcs = { MGAG200_DAC_ENCODER_FUNCS, }; static const struct drm_connector_helper_funcs mgag200_g200er_vga_connector_helper_funcs = { MGAG200_VGA_CONNECTOR_HELPER_FUNCS, }; static const struct drm_connector_funcs mgag200_g200er_vga_connector_funcs = { MGAG200_VGA_CONNECTOR_FUNCS, }; static int mgag200_g200er_pipeline_init(struct mga_device *mdev) { struct drm_device *dev = &mdev->base; struct drm_plane *primary_plane = &mdev->primary_plane; struct drm_crtc *crtc = &mdev->crtc; struct drm_encoder *encoder = &mdev->encoder; struct mga_i2c_chan *i2c = &mdev->i2c; struct drm_connector *connector = &mdev->connector; int ret; ret = drm_universal_plane_init(dev, primary_plane, 0, &mgag200_g200er_primary_plane_funcs, mgag200_primary_plane_formats, mgag200_primary_plane_formats_size, mgag200_primary_plane_fmtmods, DRM_PLANE_TYPE_PRIMARY, NULL); if (ret) { drm_err(dev, "drm_universal_plane_init() failed: %d\n", ret); return ret; } drm_plane_helper_add(primary_plane, &mgag200_g200er_primary_plane_helper_funcs); drm_plane_enable_fb_damage_clips(primary_plane); ret = drm_crtc_init_with_planes(dev, crtc, primary_plane, NULL, &mgag200_g200er_crtc_funcs, NULL); if (ret) { drm_err(dev, "drm_crtc_init_with_planes() failed: %d\n", ret); return ret; } drm_crtc_helper_add(crtc, &mgag200_g200er_crtc_helper_funcs); /* FIXME: legacy gamma tables, but atomic gamma doesn't work without */ drm_mode_crtc_set_gamma_size(crtc, MGAG200_LUT_SIZE); drm_crtc_enable_color_mgmt(crtc, 0, false, MGAG200_LUT_SIZE); encoder->possible_crtcs = drm_crtc_mask(crtc); ret = drm_encoder_init(dev, encoder, &mgag200_g200er_dac_encoder_funcs, DRM_MODE_ENCODER_DAC, NULL); if (ret) { drm_err(dev, "drm_encoder_init() failed: %d\n", ret); return ret; } ret = mgag200_i2c_init(mdev, i2c); if (ret) { drm_err(dev, "failed to add DDC bus: %d\n", ret); return ret; } ret = drm_connector_init_with_ddc(dev, connector, &mgag200_g200er_vga_connector_funcs, DRM_MODE_CONNECTOR_VGA, &i2c->adapter); if (ret) { drm_err(dev, "drm_connector_init_with_ddc() failed: %d\n", ret); return ret; } drm_connector_helper_add(connector, &mgag200_g200er_vga_connector_helper_funcs); ret = drm_connector_attach_encoder(connector, encoder); if (ret) { drm_err(dev, "drm_connector_attach_encoder() failed: %d\n", ret); return ret; } return 0; } /* * DRM device */ static const struct mgag200_device_info mgag200_g200er_device_info = MGAG200_DEVICE_INFO_INIT(2048, 2048, 55000, false, 1, 0, false); static const struct mgag200_device_funcs mgag200_g200er_device_funcs = { .pixpllc_atomic_check = mgag200_g200er_pixpllc_atomic_check, .pixpllc_atomic_update = mgag200_g200er_pixpllc_atomic_update, }; struct mga_device *mgag200_g200er_device_create(struct pci_dev *pdev, const struct drm_driver *drv) { struct mga_device *mdev; struct drm_device *dev; resource_size_t vram_available; int ret; mdev = devm_drm_dev_alloc(&pdev->dev, drv, struct mga_device, base); if (IS_ERR(mdev)) return mdev; dev = &mdev->base; pci_set_drvdata(pdev, dev); ret = mgag200_device_preinit(mdev); if (ret) return ERR_PTR(ret); ret = mgag200_device_init(mdev, &mgag200_g200er_device_info, &mgag200_g200er_device_funcs); if (ret) return ERR_PTR(ret); mgag200_g200er_init_registers(mdev); vram_available = mgag200_device_probe_vram(mdev); ret = mgag200_mode_config_init(mdev, vram_available); if (ret) return ERR_PTR(ret); ret = mgag200_g200er_pipeline_init(mdev); if (ret) return ERR_PTR(ret); drm_mode_config_reset(dev); return mdev; }
linux-master
drivers/gpu/drm/mgag200/mgag200_g200er.c
/* * Copyright 2012 Red Hat Inc. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the * "Software"), to deal in the Software without restriction, including * without limitation the rights to use, copy, modify, merge, publish, * distribute, sub license, and/or sell copies of the Software, and to * permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE * USE OR OTHER DEALINGS IN THE SOFTWARE. * * The above copyright notice and this permission notice (including the * next paragraph) shall be included in all copies or substantial portions * of the Software. * */ /* * Authors: Dave Airlie <[email protected]> */ #include <linux/export.h> #include <linux/i2c-algo-bit.h> #include <linux/i2c.h> #include <linux/pci.h> #include "mgag200_drv.h" static int mga_i2c_read_gpio(struct mga_device *mdev) { WREG8(DAC_INDEX, MGA1064_GEN_IO_DATA); return RREG8(DAC_DATA); } static void mga_i2c_set_gpio(struct mga_device *mdev, int mask, int val) { int tmp; WREG8(DAC_INDEX, MGA1064_GEN_IO_CTL); tmp = (RREG8(DAC_DATA) & mask) | val; WREG_DAC(MGA1064_GEN_IO_CTL, tmp); WREG_DAC(MGA1064_GEN_IO_DATA, 0); } static inline void mga_i2c_set(struct mga_device *mdev, int mask, int state) { if (state) state = 0; else state = mask; mga_i2c_set_gpio(mdev, ~mask, state); } static void mga_gpio_setsda(void *data, int state) { struct mga_i2c_chan *i2c = data; struct mga_device *mdev = to_mga_device(i2c->dev); mga_i2c_set(mdev, i2c->data, state); } static void mga_gpio_setscl(void *data, int state) { struct mga_i2c_chan *i2c = data; struct mga_device *mdev = to_mga_device(i2c->dev); mga_i2c_set(mdev, i2c->clock, state); } static int mga_gpio_getsda(void *data) { struct mga_i2c_chan *i2c = data; struct mga_device *mdev = to_mga_device(i2c->dev); return (mga_i2c_read_gpio(mdev) & i2c->data) ? 1 : 0; } static int mga_gpio_getscl(void *data) { struct mga_i2c_chan *i2c = data; struct mga_device *mdev = to_mga_device(i2c->dev); return (mga_i2c_read_gpio(mdev) & i2c->clock) ? 1 : 0; } static void mgag200_i2c_release(void *res) { struct mga_i2c_chan *i2c = res; i2c_del_adapter(&i2c->adapter); } int mgag200_i2c_init(struct mga_device *mdev, struct mga_i2c_chan *i2c) { struct drm_device *dev = &mdev->base; const struct mgag200_device_info *info = mdev->info; int ret; WREG_DAC(MGA1064_GEN_IO_CTL2, 1); WREG_DAC(MGA1064_GEN_IO_DATA, 0xff); WREG_DAC(MGA1064_GEN_IO_CTL, 0); i2c->data = BIT(info->i2c.data_bit); i2c->clock = BIT(info->i2c.clock_bit); i2c->adapter.owner = THIS_MODULE; i2c->adapter.class = I2C_CLASS_DDC; i2c->adapter.dev.parent = dev->dev; i2c->dev = dev; i2c_set_adapdata(&i2c->adapter, i2c); snprintf(i2c->adapter.name, sizeof(i2c->adapter.name), "mga i2c"); i2c->adapter.algo_data = &i2c->bit; i2c->bit.udelay = 10; i2c->bit.timeout = 2; i2c->bit.data = i2c; i2c->bit.setsda = mga_gpio_setsda; i2c->bit.setscl = mga_gpio_setscl; i2c->bit.getsda = mga_gpio_getsda; i2c->bit.getscl = mga_gpio_getscl; ret = i2c_bit_add_bus(&i2c->adapter); if (ret) return ret; return devm_add_action_or_reset(dev->dev, mgag200_i2c_release, i2c); }
linux-master
drivers/gpu/drm/mgag200/mgag200_i2c.c
// SPDX-License-Identifier: GPL-2.0-only /* * Copyright 2010 Matt Turner. * Copyright 2012 Red Hat * * Authors: Matthew Garrett * Matt Turner * Dave Airlie */ #include <linux/delay.h> #include <linux/iosys-map.h> #include <drm/drm_atomic.h> #include <drm/drm_atomic_helper.h> #include <drm/drm_damage_helper.h> #include <drm/drm_format_helper.h> #include <drm/drm_fourcc.h> #include <drm/drm_framebuffer.h> #include <drm/drm_gem_atomic_helper.h> #include <drm/drm_gem_framebuffer_helper.h> #include <drm/drm_print.h> #include <drm/drm_probe_helper.h> #include "mgag200_drv.h" /* * This file contains setup code for the CRTC. */ static void mgag200_crtc_set_gamma_linear(struct mga_device *mdev, const struct drm_format_info *format) { int i; WREG8(DAC_INDEX + MGA1064_INDEX, 0); switch (format->format) { case DRM_FORMAT_RGB565: /* Use better interpolation, to take 32 values from 0 to 255 */ for (i = 0; i < MGAG200_LUT_SIZE / 8; i++) { WREG8(DAC_INDEX + MGA1064_COL_PAL, i * 8 + i / 4); WREG8(DAC_INDEX + MGA1064_COL_PAL, i * 4 + i / 16); WREG8(DAC_INDEX + MGA1064_COL_PAL, i * 8 + i / 4); } /* Green has one more bit, so add padding with 0 for red and blue. */ for (i = MGAG200_LUT_SIZE / 8; i < MGAG200_LUT_SIZE / 4; i++) { WREG8(DAC_INDEX + MGA1064_COL_PAL, 0); WREG8(DAC_INDEX + MGA1064_COL_PAL, i * 4 + i / 16); WREG8(DAC_INDEX + MGA1064_COL_PAL, 0); } break; case DRM_FORMAT_RGB888: case DRM_FORMAT_XRGB8888: for (i = 0; i < MGAG200_LUT_SIZE; i++) { WREG8(DAC_INDEX + MGA1064_COL_PAL, i); WREG8(DAC_INDEX + MGA1064_COL_PAL, i); WREG8(DAC_INDEX + MGA1064_COL_PAL, i); } break; default: drm_warn_once(&mdev->base, "Unsupported format %p4cc for gamma correction\n", &format->format); break; } } static void mgag200_crtc_set_gamma(struct mga_device *mdev, const struct drm_format_info *format, struct drm_color_lut *lut) { int i; WREG8(DAC_INDEX + MGA1064_INDEX, 0); switch (format->format) { case DRM_FORMAT_RGB565: /* Use better interpolation, to take 32 values from lut[0] to lut[255] */ for (i = 0; i < MGAG200_LUT_SIZE / 8; i++) { WREG8(DAC_INDEX + MGA1064_COL_PAL, lut[i * 8 + i / 4].red >> 8); WREG8(DAC_INDEX + MGA1064_COL_PAL, lut[i * 4 + i / 16].green >> 8); WREG8(DAC_INDEX + MGA1064_COL_PAL, lut[i * 8 + i / 4].blue >> 8); } /* Green has one more bit, so add padding with 0 for red and blue. */ for (i = MGAG200_LUT_SIZE / 8; i < MGAG200_LUT_SIZE / 4; i++) { WREG8(DAC_INDEX + MGA1064_COL_PAL, 0); WREG8(DAC_INDEX + MGA1064_COL_PAL, lut[i * 4 + i / 16].green >> 8); WREG8(DAC_INDEX + MGA1064_COL_PAL, 0); } break; case DRM_FORMAT_RGB888: case DRM_FORMAT_XRGB8888: for (i = 0; i < MGAG200_LUT_SIZE; i++) { WREG8(DAC_INDEX + MGA1064_COL_PAL, lut[i].red >> 8); WREG8(DAC_INDEX + MGA1064_COL_PAL, lut[i].green >> 8); WREG8(DAC_INDEX + MGA1064_COL_PAL, lut[i].blue >> 8); } break; default: drm_warn_once(&mdev->base, "Unsupported format %p4cc for gamma correction\n", &format->format); break; } } static inline void mga_wait_vsync(struct mga_device *mdev) { unsigned long timeout = jiffies + HZ/10; unsigned int status = 0; do { status = RREG32(MGAREG_Status); } while ((status & 0x08) && time_before(jiffies, timeout)); timeout = jiffies + HZ/10; status = 0; do { status = RREG32(MGAREG_Status); } while (!(status & 0x08) && time_before(jiffies, timeout)); } static inline void mga_wait_busy(struct mga_device *mdev) { unsigned long timeout = jiffies + HZ; unsigned int status = 0; do { status = RREG8(MGAREG_Status + 2); } while ((status & 0x01) && time_before(jiffies, timeout)); } /* * This is how the framebuffer base address is stored in g200 cards: * * Assume @offset is the gpu_addr variable of the framebuffer object * * Then addr is the number of _pixels_ (not bytes) from the start of * VRAM to the first pixel we want to display. (divided by 2 for 32bit * framebuffers) * * addr is stored in the CRTCEXT0, CRTCC and CRTCD registers * addr<20> -> CRTCEXT0<6> * addr<19-16> -> CRTCEXT0<3-0> * addr<15-8> -> CRTCC<7-0> * addr<7-0> -> CRTCD<7-0> * * CRTCEXT0 has to be programmed last to trigger an update and make the * new addr variable take effect. */ static void mgag200_set_startadd(struct mga_device *mdev, unsigned long offset) { struct drm_device *dev = &mdev->base; u32 startadd; u8 crtcc, crtcd, crtcext0; startadd = offset / 8; if (startadd > 0) drm_WARN_ON_ONCE(dev, mdev->info->bug_no_startadd); /* * Can't store addresses any higher than that, but we also * don't have more than 16 MiB of memory, so it should be fine. */ drm_WARN_ON(dev, startadd > 0x1fffff); RREG_ECRT(0x00, crtcext0); crtcc = (startadd >> 8) & 0xff; crtcd = startadd & 0xff; crtcext0 &= 0xb0; crtcext0 |= ((startadd >> 14) & BIT(6)) | ((startadd >> 16) & 0x0f); WREG_CRT(0x0c, crtcc); WREG_CRT(0x0d, crtcd); WREG_ECRT(0x00, crtcext0); } void mgag200_init_registers(struct mga_device *mdev) { u8 crtc11, misc; WREG_SEQ(2, 0x0f); WREG_SEQ(3, 0x00); WREG_SEQ(4, 0x0e); WREG_CRT(10, 0); WREG_CRT(11, 0); WREG_CRT(12, 0); WREG_CRT(13, 0); WREG_CRT(14, 0); WREG_CRT(15, 0); RREG_CRT(0x11, crtc11); crtc11 &= ~(MGAREG_CRTC11_CRTCPROTECT | MGAREG_CRTC11_VINTEN | MGAREG_CRTC11_VINTCLR); WREG_CRT(0x11, crtc11); misc = RREG8(MGA_MISC_IN); misc |= MGAREG_MISC_IOADSEL; WREG8(MGA_MISC_OUT, misc); } void mgag200_set_mode_regs(struct mga_device *mdev, const struct drm_display_mode *mode) { const struct mgag200_device_info *info = mdev->info; unsigned int hdisplay, hsyncstart, hsyncend, htotal; unsigned int vdisplay, vsyncstart, vsyncend, vtotal; u8 misc, crtcext1, crtcext2, crtcext5; hdisplay = mode->hdisplay / 8 - 1; hsyncstart = mode->hsync_start / 8 - 1; hsyncend = mode->hsync_end / 8 - 1; htotal = mode->htotal / 8 - 1; /* Work around hardware quirk */ if ((htotal & 0x07) == 0x06 || (htotal & 0x07) == 0x04) htotal++; vdisplay = mode->vdisplay - 1; vsyncstart = mode->vsync_start - 1; vsyncend = mode->vsync_end - 1; vtotal = mode->vtotal - 2; misc = RREG8(MGA_MISC_IN); if (mode->flags & DRM_MODE_FLAG_NHSYNC) misc |= MGAREG_MISC_HSYNCPOL; else misc &= ~MGAREG_MISC_HSYNCPOL; if (mode->flags & DRM_MODE_FLAG_NVSYNC) misc |= MGAREG_MISC_VSYNCPOL; else misc &= ~MGAREG_MISC_VSYNCPOL; crtcext1 = (((htotal - 4) & 0x100) >> 8) | ((hdisplay & 0x100) >> 7) | ((hsyncstart & 0x100) >> 6) | (htotal & 0x40); if (info->has_vidrst) crtcext1 |= MGAREG_CRTCEXT1_VRSTEN | MGAREG_CRTCEXT1_HRSTEN; crtcext2 = ((vtotal & 0xc00) >> 10) | ((vdisplay & 0x400) >> 8) | ((vdisplay & 0xc00) >> 7) | ((vsyncstart & 0xc00) >> 5) | ((vdisplay & 0x400) >> 3); crtcext5 = 0x00; WREG_CRT(0, htotal - 4); WREG_CRT(1, hdisplay); WREG_CRT(2, hdisplay); WREG_CRT(3, (htotal & 0x1F) | 0x80); WREG_CRT(4, hsyncstart); WREG_CRT(5, ((htotal & 0x20) << 2) | (hsyncend & 0x1F)); WREG_CRT(6, vtotal & 0xFF); WREG_CRT(7, ((vtotal & 0x100) >> 8) | ((vdisplay & 0x100) >> 7) | ((vsyncstart & 0x100) >> 6) | ((vdisplay & 0x100) >> 5) | ((vdisplay & 0x100) >> 4) | /* linecomp */ ((vtotal & 0x200) >> 4) | ((vdisplay & 0x200) >> 3) | ((vsyncstart & 0x200) >> 2)); WREG_CRT(9, ((vdisplay & 0x200) >> 4) | ((vdisplay & 0x200) >> 3)); WREG_CRT(16, vsyncstart & 0xFF); WREG_CRT(17, (vsyncend & 0x0F) | 0x20); WREG_CRT(18, vdisplay & 0xFF); WREG_CRT(20, 0); WREG_CRT(21, vdisplay & 0xFF); WREG_CRT(22, (vtotal + 1) & 0xFF); WREG_CRT(23, 0xc3); WREG_CRT(24, vdisplay & 0xFF); WREG_ECRT(0x01, crtcext1); WREG_ECRT(0x02, crtcext2); WREG_ECRT(0x05, crtcext5); WREG8(MGA_MISC_OUT, misc); } static u8 mgag200_get_bpp_shift(const struct drm_format_info *format) { static const u8 bpp_shift[] = {0, 1, 0, 2}; return bpp_shift[format->cpp[0] - 1]; } /* * Calculates the HW offset value from the framebuffer's pitch. The * offset is a multiple of the pixel size and depends on the display * format. */ static u32 mgag200_calculate_offset(struct mga_device *mdev, const struct drm_framebuffer *fb) { u32 offset = fb->pitches[0] / fb->format->cpp[0]; u8 bppshift = mgag200_get_bpp_shift(fb->format); if (fb->format->cpp[0] * 8 == 24) offset = (offset * 3) >> (4 - bppshift); else offset = offset >> (4 - bppshift); return offset; } static void mgag200_set_offset(struct mga_device *mdev, const struct drm_framebuffer *fb) { u8 crtc13, crtcext0; u32 offset = mgag200_calculate_offset(mdev, fb); RREG_ECRT(0, crtcext0); crtc13 = offset & 0xff; crtcext0 &= ~MGAREG_CRTCEXT0_OFFSET_MASK; crtcext0 |= (offset >> 4) & MGAREG_CRTCEXT0_OFFSET_MASK; WREG_CRT(0x13, crtc13); WREG_ECRT(0x00, crtcext0); } void mgag200_set_format_regs(struct mga_device *mdev, const struct drm_format_info *format) { struct drm_device *dev = &mdev->base; unsigned int bpp, bppshift, scale; u8 crtcext3, xmulctrl; bpp = format->cpp[0] * 8; bppshift = mgag200_get_bpp_shift(format); switch (bpp) { case 24: scale = ((1 << bppshift) * 3) - 1; break; default: scale = (1 << bppshift) - 1; break; } RREG_ECRT(3, crtcext3); switch (bpp) { case 8: xmulctrl = MGA1064_MUL_CTL_8bits; break; case 16: if (format->depth == 15) xmulctrl = MGA1064_MUL_CTL_15bits; else xmulctrl = MGA1064_MUL_CTL_16bits; break; case 24: xmulctrl = MGA1064_MUL_CTL_24bits; break; case 32: xmulctrl = MGA1064_MUL_CTL_32_24bits; break; default: /* BUG: We should have caught this problem already. */ drm_WARN_ON(dev, "invalid format depth\n"); return; } crtcext3 &= ~GENMASK(2, 0); crtcext3 |= scale; WREG_DAC(MGA1064_MUL_CTL, xmulctrl); WREG_GFX(0, 0x00); WREG_GFX(1, 0x00); WREG_GFX(2, 0x00); WREG_GFX(3, 0x00); WREG_GFX(4, 0x00); WREG_GFX(5, 0x40); /* GCTL6 should be 0x05, but we configure memmapsl to 0xb8000 (text mode), * so that it doesn't hang when running kexec/kdump on G200_SE rev42. */ WREG_GFX(6, 0x0d); WREG_GFX(7, 0x0f); WREG_GFX(8, 0x0f); WREG_ECRT(3, crtcext3); } void mgag200_enable_display(struct mga_device *mdev) { u8 seq0, crtcext1; RREG_SEQ(0x00, seq0); seq0 |= MGAREG_SEQ0_SYNCRST | MGAREG_SEQ0_ASYNCRST; WREG_SEQ(0x00, seq0); /* * TODO: replace busy waiting with vblank IRQ; put * msleep(50) before changing SCROFF */ mga_wait_vsync(mdev); mga_wait_busy(mdev); RREG_ECRT(0x01, crtcext1); crtcext1 &= ~MGAREG_CRTCEXT1_VSYNCOFF; crtcext1 &= ~MGAREG_CRTCEXT1_HSYNCOFF; WREG_ECRT(0x01, crtcext1); } static void mgag200_disable_display(struct mga_device *mdev) { u8 seq0, crtcext1; RREG_SEQ(0x00, seq0); seq0 &= ~MGAREG_SEQ0_SYNCRST; WREG_SEQ(0x00, seq0); /* * TODO: replace busy waiting with vblank IRQ; put * msleep(50) before changing SCROFF */ mga_wait_vsync(mdev); mga_wait_busy(mdev); RREG_ECRT(0x01, crtcext1); crtcext1 |= MGAREG_CRTCEXT1_VSYNCOFF | MGAREG_CRTCEXT1_HSYNCOFF; WREG_ECRT(0x01, crtcext1); } static void mgag200_handle_damage(struct mga_device *mdev, const struct iosys_map *vmap, struct drm_framebuffer *fb, struct drm_rect *clip) { struct iosys_map dst = IOSYS_MAP_INIT_VADDR_IOMEM(mdev->vram); iosys_map_incr(&dst, drm_fb_clip_offset(fb->pitches[0], fb->format, clip)); drm_fb_memcpy(&dst, fb->pitches, vmap, fb, clip); } /* * Primary plane */ const uint32_t mgag200_primary_plane_formats[] = { DRM_FORMAT_XRGB8888, DRM_FORMAT_RGB565, DRM_FORMAT_RGB888, }; const size_t mgag200_primary_plane_formats_size = ARRAY_SIZE(mgag200_primary_plane_formats); const uint64_t mgag200_primary_plane_fmtmods[] = { DRM_FORMAT_MOD_LINEAR, DRM_FORMAT_MOD_INVALID }; int mgag200_primary_plane_helper_atomic_check(struct drm_plane *plane, struct drm_atomic_state *new_state) { struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(new_state, plane); struct drm_framebuffer *new_fb = new_plane_state->fb; struct drm_framebuffer *fb = NULL; struct drm_crtc *new_crtc = new_plane_state->crtc; struct drm_crtc_state *new_crtc_state = NULL; struct mgag200_crtc_state *new_mgag200_crtc_state; int ret; if (new_crtc) new_crtc_state = drm_atomic_get_new_crtc_state(new_state, new_crtc); ret = drm_atomic_helper_check_plane_state(new_plane_state, new_crtc_state, DRM_PLANE_NO_SCALING, DRM_PLANE_NO_SCALING, false, true); if (ret) return ret; else if (!new_plane_state->visible) return 0; if (plane->state) fb = plane->state->fb; if (!fb || (fb->format != new_fb->format)) new_crtc_state->mode_changed = true; /* update PLL settings */ new_mgag200_crtc_state = to_mgag200_crtc_state(new_crtc_state); new_mgag200_crtc_state->format = new_fb->format; return 0; } void mgag200_primary_plane_helper_atomic_update(struct drm_plane *plane, struct drm_atomic_state *old_state) { struct drm_device *dev = plane->dev; struct mga_device *mdev = to_mga_device(dev); struct drm_plane_state *plane_state = plane->state; struct drm_plane_state *old_plane_state = drm_atomic_get_old_plane_state(old_state, plane); struct drm_shadow_plane_state *shadow_plane_state = to_drm_shadow_plane_state(plane_state); struct drm_framebuffer *fb = plane_state->fb; struct drm_atomic_helper_damage_iter iter; struct drm_rect damage; drm_atomic_helper_damage_iter_init(&iter, old_plane_state, plane_state); drm_atomic_for_each_plane_damage(&iter, &damage) { mgag200_handle_damage(mdev, shadow_plane_state->data, fb, &damage); } /* Always scanout image at VRAM offset 0 */ mgag200_set_startadd(mdev, (u32)0); mgag200_set_offset(mdev, fb); } void mgag200_primary_plane_helper_atomic_enable(struct drm_plane *plane, struct drm_atomic_state *state) { struct drm_device *dev = plane->dev; struct mga_device *mdev = to_mga_device(dev); u8 seq1; RREG_SEQ(0x01, seq1); seq1 &= ~MGAREG_SEQ1_SCROFF; WREG_SEQ(0x01, seq1); msleep(20); } void mgag200_primary_plane_helper_atomic_disable(struct drm_plane *plane, struct drm_atomic_state *old_state) { struct drm_device *dev = plane->dev; struct mga_device *mdev = to_mga_device(dev); u8 seq1; RREG_SEQ(0x01, seq1); seq1 |= MGAREG_SEQ1_SCROFF; WREG_SEQ(0x01, seq1); msleep(20); } /* * CRTC */ enum drm_mode_status mgag200_crtc_helper_mode_valid(struct drm_crtc *crtc, const struct drm_display_mode *mode) { struct mga_device *mdev = to_mga_device(crtc->dev); const struct mgag200_device_info *info = mdev->info; /* * Some devices have additional limits on the size of the * display mode. */ if (mode->hdisplay > info->max_hdisplay) return MODE_VIRTUAL_X; if (mode->vdisplay > info->max_vdisplay) return MODE_VIRTUAL_Y; if ((mode->hdisplay % 8) != 0 || (mode->hsync_start % 8) != 0 || (mode->hsync_end % 8) != 0 || (mode->htotal % 8) != 0) { return MODE_H_ILLEGAL; } if (mode->crtc_hdisplay > 2048 || mode->crtc_hsync_start > 4096 || mode->crtc_hsync_end > 4096 || mode->crtc_htotal > 4096 || mode->crtc_vdisplay > 2048 || mode->crtc_vsync_start > 4096 || mode->crtc_vsync_end > 4096 || mode->crtc_vtotal > 4096) { return MODE_BAD; } return MODE_OK; } int mgag200_crtc_helper_atomic_check(struct drm_crtc *crtc, struct drm_atomic_state *new_state) { struct drm_device *dev = crtc->dev; struct mga_device *mdev = to_mga_device(dev); const struct mgag200_device_funcs *funcs = mdev->funcs; struct drm_crtc_state *new_crtc_state = drm_atomic_get_new_crtc_state(new_state, crtc); struct drm_property_blob *new_gamma_lut = new_crtc_state->gamma_lut; int ret; if (!new_crtc_state->enable) return 0; ret = drm_atomic_helper_check_crtc_primary_plane(new_crtc_state); if (ret) return ret; if (new_crtc_state->mode_changed) { if (funcs->pixpllc_atomic_check) { ret = funcs->pixpllc_atomic_check(crtc, new_state); if (ret) return ret; } } if (new_crtc_state->color_mgmt_changed && new_gamma_lut) { if (new_gamma_lut->length != MGAG200_LUT_SIZE * sizeof(struct drm_color_lut)) { drm_dbg(dev, "Wrong size for gamma_lut %zu\n", new_gamma_lut->length); return -EINVAL; } } return 0; } void mgag200_crtc_helper_atomic_flush(struct drm_crtc *crtc, struct drm_atomic_state *old_state) { struct drm_crtc_state *crtc_state = crtc->state; struct mgag200_crtc_state *mgag200_crtc_state = to_mgag200_crtc_state(crtc_state); struct drm_device *dev = crtc->dev; struct mga_device *mdev = to_mga_device(dev); if (crtc_state->enable && crtc_state->color_mgmt_changed) { const struct drm_format_info *format = mgag200_crtc_state->format; if (crtc_state->gamma_lut) mgag200_crtc_set_gamma(mdev, format, crtc_state->gamma_lut->data); else mgag200_crtc_set_gamma_linear(mdev, format); } } void mgag200_crtc_helper_atomic_enable(struct drm_crtc *crtc, struct drm_atomic_state *old_state) { struct drm_device *dev = crtc->dev; struct mga_device *mdev = to_mga_device(dev); const struct mgag200_device_funcs *funcs = mdev->funcs; struct drm_crtc_state *crtc_state = crtc->state; struct drm_display_mode *adjusted_mode = &crtc_state->adjusted_mode; struct mgag200_crtc_state *mgag200_crtc_state = to_mgag200_crtc_state(crtc_state); const struct drm_format_info *format = mgag200_crtc_state->format; if (funcs->disable_vidrst) funcs->disable_vidrst(mdev); mgag200_set_format_regs(mdev, format); mgag200_set_mode_regs(mdev, adjusted_mode); if (funcs->pixpllc_atomic_update) funcs->pixpllc_atomic_update(crtc, old_state); if (crtc_state->gamma_lut) mgag200_crtc_set_gamma(mdev, format, crtc_state->gamma_lut->data); else mgag200_crtc_set_gamma_linear(mdev, format); mgag200_enable_display(mdev); if (funcs->enable_vidrst) funcs->enable_vidrst(mdev); } void mgag200_crtc_helper_atomic_disable(struct drm_crtc *crtc, struct drm_atomic_state *old_state) { struct mga_device *mdev = to_mga_device(crtc->dev); const struct mgag200_device_funcs *funcs = mdev->funcs; if (funcs->disable_vidrst) funcs->disable_vidrst(mdev); mgag200_disable_display(mdev); if (funcs->enable_vidrst) funcs->enable_vidrst(mdev); } void mgag200_crtc_reset(struct drm_crtc *crtc) { struct mgag200_crtc_state *mgag200_crtc_state; if (crtc->state) crtc->funcs->atomic_destroy_state(crtc, crtc->state); mgag200_crtc_state = kzalloc(sizeof(*mgag200_crtc_state), GFP_KERNEL); if (mgag200_crtc_state) __drm_atomic_helper_crtc_reset(crtc, &mgag200_crtc_state->base); else __drm_atomic_helper_crtc_reset(crtc, NULL); } struct drm_crtc_state *mgag200_crtc_atomic_duplicate_state(struct drm_crtc *crtc) { struct drm_crtc_state *crtc_state = crtc->state; struct mgag200_crtc_state *mgag200_crtc_state = to_mgag200_crtc_state(crtc_state); struct mgag200_crtc_state *new_mgag200_crtc_state; if (!crtc_state) return NULL; new_mgag200_crtc_state = kzalloc(sizeof(*new_mgag200_crtc_state), GFP_KERNEL); if (!new_mgag200_crtc_state) return NULL; __drm_atomic_helper_crtc_duplicate_state(crtc, &new_mgag200_crtc_state->base); new_mgag200_crtc_state->format = mgag200_crtc_state->format; memcpy(&new_mgag200_crtc_state->pixpllc, &mgag200_crtc_state->pixpllc, sizeof(new_mgag200_crtc_state->pixpllc)); return &new_mgag200_crtc_state->base; } void mgag200_crtc_atomic_destroy_state(struct drm_crtc *crtc, struct drm_crtc_state *crtc_state) { struct mgag200_crtc_state *mgag200_crtc_state = to_mgag200_crtc_state(crtc_state); __drm_atomic_helper_crtc_destroy_state(&mgag200_crtc_state->base); kfree(mgag200_crtc_state); } /* * Connector */ int mgag200_vga_connector_helper_get_modes(struct drm_connector *connector) { struct mga_device *mdev = to_mga_device(connector->dev); int ret; /* * Protect access to I/O registers from concurrent modesetting * by acquiring the I/O-register lock. */ mutex_lock(&mdev->rmmio_lock); ret = drm_connector_helper_get_modes_from_ddc(connector); mutex_unlock(&mdev->rmmio_lock); return ret; } /* * Mode config */ static void mgag200_mode_config_helper_atomic_commit_tail(struct drm_atomic_state *state) { struct mga_device *mdev = to_mga_device(state->dev); /* * Concurrent operations could possibly trigger a call to * drm_connector_helper_funcs.get_modes by trying to read the * display modes. Protect access to I/O registers by acquiring * the I/O-register lock. */ mutex_lock(&mdev->rmmio_lock); drm_atomic_helper_commit_tail(state); mutex_unlock(&mdev->rmmio_lock); } static const struct drm_mode_config_helper_funcs mgag200_mode_config_helper_funcs = { .atomic_commit_tail = mgag200_mode_config_helper_atomic_commit_tail, }; /* Calculates a mode's required memory bandwidth (in KiB/sec). */ static uint32_t mgag200_calculate_mode_bandwidth(const struct drm_display_mode *mode, unsigned int bits_per_pixel) { uint32_t total_area, divisor; uint64_t active_area, pixels_per_second, bandwidth; uint64_t bytes_per_pixel = (bits_per_pixel + 7) / 8; divisor = 1024; if (!mode->htotal || !mode->vtotal || !mode->clock) return 0; active_area = mode->hdisplay * mode->vdisplay; total_area = mode->htotal * mode->vtotal; pixels_per_second = active_area * mode->clock * 1000; do_div(pixels_per_second, total_area); bandwidth = pixels_per_second * bytes_per_pixel * 100; do_div(bandwidth, divisor); return (uint32_t)bandwidth; } static enum drm_mode_status mgag200_mode_config_mode_valid(struct drm_device *dev, const struct drm_display_mode *mode) { static const unsigned int max_bpp = 4; // DRM_FORMAT_XRGB8888 struct mga_device *mdev = to_mga_device(dev); unsigned long fbsize, fbpages, max_fbpages; const struct mgag200_device_info *info = mdev->info; max_fbpages = mdev->vram_available >> PAGE_SHIFT; fbsize = mode->hdisplay * mode->vdisplay * max_bpp; fbpages = DIV_ROUND_UP(fbsize, PAGE_SIZE); if (fbpages > max_fbpages) return MODE_MEM; /* * Test the mode's required memory bandwidth if the device * specifies a maximum. Not all devices do though. */ if (info->max_mem_bandwidth) { uint32_t mode_bandwidth = mgag200_calculate_mode_bandwidth(mode, max_bpp * 8); if (mode_bandwidth > (info->max_mem_bandwidth * 1024)) return MODE_BAD; } return MODE_OK; } static const struct drm_mode_config_funcs mgag200_mode_config_funcs = { .fb_create = drm_gem_fb_create_with_dirty, .mode_valid = mgag200_mode_config_mode_valid, .atomic_check = drm_atomic_helper_check, .atomic_commit = drm_atomic_helper_commit, }; int mgag200_mode_config_init(struct mga_device *mdev, resource_size_t vram_available) { struct drm_device *dev = &mdev->base; int ret; mdev->vram_available = vram_available; ret = drmm_mode_config_init(dev); if (ret) { drm_err(dev, "drmm_mode_config_init() failed: %d\n", ret); return ret; } dev->mode_config.max_width = MGAG200_MAX_FB_WIDTH; dev->mode_config.max_height = MGAG200_MAX_FB_HEIGHT; dev->mode_config.preferred_depth = 24; dev->mode_config.funcs = &mgag200_mode_config_funcs; dev->mode_config.helper_private = &mgag200_mode_config_helper_funcs; return 0; }
linux-master
drivers/gpu/drm/mgag200/mgag200_mode.c
// SPDX-License-Identifier: GPL-2.0-only #include <linux/delay.h> #include <linux/pci.h> #include <drm/drm_atomic.h> #include <drm/drm_atomic_helper.h> #include <drm/drm_drv.h> #include <drm/drm_gem_atomic_helper.h> #include <drm/drm_probe_helper.h> #include "mgag200_drv.h" void mgag200_g200wb_init_registers(struct mga_device *mdev) { static const u8 dacvalue[] = { MGAG200_DAC_DEFAULT(0x07, 0xc9, 0x1f, 0x00, 0x00, 0x00) }; size_t i; for (i = 0; i < ARRAY_SIZE(dacvalue); i++) { if ((i <= 0x17) || (i == 0x1b) || (i == 0x1c) || ((i >= 0x1f) && (i <= 0x29)) || ((i >= 0x30) && (i <= 0x37)) || ((i >= 0x44) && (i <= 0x4e))) continue; WREG_DAC(i, dacvalue[i]); } mgag200_init_registers(mdev); } /* * PIXPLLC */ static int mgag200_g200wb_pixpllc_atomic_check(struct drm_crtc *crtc, struct drm_atomic_state *new_state) { static const unsigned int vcomax = 550000; static const unsigned int vcomin = 150000; static const unsigned int pllreffreq = 48000; struct drm_crtc_state *new_crtc_state = drm_atomic_get_new_crtc_state(new_state, crtc); struct mgag200_crtc_state *new_mgag200_crtc_state = to_mgag200_crtc_state(new_crtc_state); long clock = new_crtc_state->mode.clock; struct mgag200_pll_values *pixpllc = &new_mgag200_crtc_state->pixpllc; unsigned int delta, tmpdelta; unsigned int testp, testm, testn; unsigned int p, m, n, s; unsigned int computed; m = n = p = s = 0; delta = 0xffffffff; for (testp = 1; testp < 9; testp++) { if (clock * testp > vcomax) continue; if (clock * testp < vcomin) continue; for (testm = 1; testm < 17; testm++) { for (testn = 1; testn < 151; testn++) { computed = (pllreffreq * testn) / (testm * testp); if (computed > clock) tmpdelta = computed - clock; else tmpdelta = clock - computed; if (tmpdelta < delta) { delta = tmpdelta; n = testn; m = testm; p = testp; s = 0; } } } } pixpllc->m = m; pixpllc->n = n; pixpllc->p = p; pixpllc->s = s; return 0; } void mgag200_g200wb_pixpllc_atomic_update(struct drm_crtc *crtc, struct drm_atomic_state *old_state) { struct drm_device *dev = crtc->dev; struct mga_device *mdev = to_mga_device(dev); struct drm_crtc_state *crtc_state = crtc->state; struct mgag200_crtc_state *mgag200_crtc_state = to_mgag200_crtc_state(crtc_state); struct mgag200_pll_values *pixpllc = &mgag200_crtc_state->pixpllc; bool pll_locked = false; unsigned int pixpllcm, pixpllcn, pixpllcp, pixpllcs; u8 xpixpllcm, xpixpllcn, xpixpllcp, tmp; int i, j, tmpcount, vcount; pixpllcm = pixpllc->m - 1; pixpllcn = pixpllc->n - 1; pixpllcp = pixpllc->p - 1; pixpllcs = pixpllc->s; xpixpllcm = ((pixpllcn & BIT(8)) >> 1) | pixpllcm; xpixpllcn = pixpllcn; xpixpllcp = ((pixpllcn & GENMASK(10, 9)) >> 3) | (pixpllcs << 3) | pixpllcp; WREG_MISC_MASKED(MGAREG_MISC_CLKSEL_MGA, MGAREG_MISC_CLKSEL_MASK); for (i = 0; i <= 32 && pll_locked == false; i++) { if (i > 0) { WREG8(MGAREG_CRTC_INDEX, 0x1e); tmp = RREG8(MGAREG_CRTC_DATA); if (tmp < 0xff) WREG8(MGAREG_CRTC_DATA, tmp+1); } /* set pixclkdis to 1 */ WREG8(DAC_INDEX, MGA1064_PIX_CLK_CTL); tmp = RREG8(DAC_DATA); tmp |= MGA1064_PIX_CLK_CTL_CLK_DIS; WREG8(DAC_DATA, tmp); WREG8(DAC_INDEX, MGA1064_REMHEADCTL); tmp = RREG8(DAC_DATA); tmp |= MGA1064_REMHEADCTL_CLKDIS; WREG8(DAC_DATA, tmp); /* select PLL Set C */ tmp = RREG8(MGAREG_MEM_MISC_READ); tmp |= 0x3 << 2; WREG8(MGAREG_MEM_MISC_WRITE, tmp); WREG8(DAC_INDEX, MGA1064_PIX_CLK_CTL); tmp = RREG8(DAC_DATA); tmp |= MGA1064_PIX_CLK_CTL_CLK_POW_DOWN | 0x80; WREG8(DAC_DATA, tmp); udelay(500); /* reset the PLL */ WREG8(DAC_INDEX, MGA1064_VREF_CTL); tmp = RREG8(DAC_DATA); tmp &= ~0x04; WREG8(DAC_DATA, tmp); udelay(50); /* program pixel pll register */ WREG_DAC(MGA1064_WB_PIX_PLLC_N, xpixpllcn); WREG_DAC(MGA1064_WB_PIX_PLLC_M, xpixpllcm); WREG_DAC(MGA1064_WB_PIX_PLLC_P, xpixpllcp); udelay(50); /* turn pll on */ WREG8(DAC_INDEX, MGA1064_VREF_CTL); tmp = RREG8(DAC_DATA); tmp |= 0x04; WREG_DAC(MGA1064_VREF_CTL, tmp); udelay(500); /* select the pixel pll */ WREG8(DAC_INDEX, MGA1064_PIX_CLK_CTL); tmp = RREG8(DAC_DATA); tmp &= ~MGA1064_PIX_CLK_CTL_SEL_MSK; tmp |= MGA1064_PIX_CLK_CTL_SEL_PLL; WREG8(DAC_DATA, tmp); WREG8(DAC_INDEX, MGA1064_REMHEADCTL); tmp = RREG8(DAC_DATA); tmp &= ~MGA1064_REMHEADCTL_CLKSL_MSK; tmp |= MGA1064_REMHEADCTL_CLKSL_PLL; WREG8(DAC_DATA, tmp); /* reset dotclock rate bit */ WREG8(MGAREG_SEQ_INDEX, 1); tmp = RREG8(MGAREG_SEQ_DATA); tmp &= ~0x8; WREG8(MGAREG_SEQ_DATA, tmp); WREG8(DAC_INDEX, MGA1064_PIX_CLK_CTL); tmp = RREG8(DAC_DATA); tmp &= ~MGA1064_PIX_CLK_CTL_CLK_DIS; WREG8(DAC_DATA, tmp); vcount = RREG8(MGAREG_VCOUNT); for (j = 0; j < 30 && pll_locked == false; j++) { tmpcount = RREG8(MGAREG_VCOUNT); if (tmpcount < vcount) vcount = 0; if ((tmpcount - vcount) > 2) pll_locked = true; else udelay(5); } } WREG8(DAC_INDEX, MGA1064_REMHEADCTL); tmp = RREG8(DAC_DATA); tmp &= ~MGA1064_REMHEADCTL_CLKDIS; WREG_DAC(MGA1064_REMHEADCTL, tmp); } /* * Mode-setting pipeline */ static const struct drm_plane_helper_funcs mgag200_g200wb_primary_plane_helper_funcs = { MGAG200_PRIMARY_PLANE_HELPER_FUNCS, }; static const struct drm_plane_funcs mgag200_g200wb_primary_plane_funcs = { MGAG200_PRIMARY_PLANE_FUNCS, }; static const struct drm_crtc_helper_funcs mgag200_g200wb_crtc_helper_funcs = { MGAG200_CRTC_HELPER_FUNCS, }; static const struct drm_crtc_funcs mgag200_g200wb_crtc_funcs = { MGAG200_CRTC_FUNCS, }; static const struct drm_encoder_funcs mgag200_g200wb_dac_encoder_funcs = { MGAG200_DAC_ENCODER_FUNCS, }; static const struct drm_connector_helper_funcs mgag200_g200wb_vga_connector_helper_funcs = { MGAG200_VGA_CONNECTOR_HELPER_FUNCS, }; static const struct drm_connector_funcs mgag200_g200wb_vga_connector_funcs = { MGAG200_VGA_CONNECTOR_FUNCS, }; static int mgag200_g200wb_pipeline_init(struct mga_device *mdev) { struct drm_device *dev = &mdev->base; struct drm_plane *primary_plane = &mdev->primary_plane; struct drm_crtc *crtc = &mdev->crtc; struct drm_encoder *encoder = &mdev->encoder; struct mga_i2c_chan *i2c = &mdev->i2c; struct drm_connector *connector = &mdev->connector; int ret; ret = drm_universal_plane_init(dev, primary_plane, 0, &mgag200_g200wb_primary_plane_funcs, mgag200_primary_plane_formats, mgag200_primary_plane_formats_size, mgag200_primary_plane_fmtmods, DRM_PLANE_TYPE_PRIMARY, NULL); if (ret) { drm_err(dev, "drm_universal_plane_init() failed: %d\n", ret); return ret; } drm_plane_helper_add(primary_plane, &mgag200_g200wb_primary_plane_helper_funcs); drm_plane_enable_fb_damage_clips(primary_plane); ret = drm_crtc_init_with_planes(dev, crtc, primary_plane, NULL, &mgag200_g200wb_crtc_funcs, NULL); if (ret) { drm_err(dev, "drm_crtc_init_with_planes() failed: %d\n", ret); return ret; } drm_crtc_helper_add(crtc, &mgag200_g200wb_crtc_helper_funcs); /* FIXME: legacy gamma tables, but atomic gamma doesn't work without */ drm_mode_crtc_set_gamma_size(crtc, MGAG200_LUT_SIZE); drm_crtc_enable_color_mgmt(crtc, 0, false, MGAG200_LUT_SIZE); encoder->possible_crtcs = drm_crtc_mask(crtc); ret = drm_encoder_init(dev, encoder, &mgag200_g200wb_dac_encoder_funcs, DRM_MODE_ENCODER_DAC, NULL); if (ret) { drm_err(dev, "drm_encoder_init() failed: %d\n", ret); return ret; } ret = mgag200_i2c_init(mdev, i2c); if (ret) { drm_err(dev, "failed to add DDC bus: %d\n", ret); return ret; } ret = drm_connector_init_with_ddc(dev, connector, &mgag200_g200wb_vga_connector_funcs, DRM_MODE_CONNECTOR_VGA, &i2c->adapter); if (ret) { drm_err(dev, "drm_connector_init_with_ddc() failed: %d\n", ret); return ret; } drm_connector_helper_add(connector, &mgag200_g200wb_vga_connector_helper_funcs); ret = drm_connector_attach_encoder(connector, encoder); if (ret) { drm_err(dev, "drm_connector_attach_encoder() failed: %d\n", ret); return ret; } return 0; } /* * DRM device */ static const struct mgag200_device_info mgag200_g200wb_device_info = MGAG200_DEVICE_INFO_INIT(1280, 1024, 31877, true, 0, 1, false); static const struct mgag200_device_funcs mgag200_g200wb_device_funcs = { .disable_vidrst = mgag200_bmc_disable_vidrst, .enable_vidrst = mgag200_bmc_enable_vidrst, .pixpllc_atomic_check = mgag200_g200wb_pixpllc_atomic_check, .pixpllc_atomic_update = mgag200_g200wb_pixpllc_atomic_update, }; struct mga_device *mgag200_g200wb_device_create(struct pci_dev *pdev, const struct drm_driver *drv) { struct mga_device *mdev; struct drm_device *dev; resource_size_t vram_available; int ret; mdev = devm_drm_dev_alloc(&pdev->dev, drv, struct mga_device, base); if (IS_ERR(mdev)) return mdev; dev = &mdev->base; pci_set_drvdata(pdev, dev); ret = mgag200_init_pci_options(pdev, 0x41049120, 0x0000b000); if (ret) return ERR_PTR(ret); ret = mgag200_device_preinit(mdev); if (ret) return ERR_PTR(ret); ret = mgag200_device_init(mdev, &mgag200_g200wb_device_info, &mgag200_g200wb_device_funcs); if (ret) return ERR_PTR(ret); mgag200_g200wb_init_registers(mdev); vram_available = mgag200_device_probe_vram(mdev); ret = mgag200_mode_config_init(mdev, vram_available); if (ret) return ERR_PTR(ret); ret = mgag200_g200wb_pipeline_init(mdev); if (ret) return ERR_PTR(ret); drm_mode_config_reset(dev); return mdev; }
linux-master
drivers/gpu/drm/mgag200/mgag200_g200wb.c
// SPDX-License-Identifier: GPL-2.0-only /* * Copyright 2012 Red Hat * * Authors: Matthew Garrett * Dave Airlie */ #include <linux/module.h> #include <linux/pci.h> #include <drm/drm_aperture.h> #include <drm/drm_drv.h> #include <drm/drm_fbdev_generic.h> #include <drm/drm_file.h> #include <drm/drm_ioctl.h> #include <drm/drm_managed.h> #include <drm/drm_module.h> #include <drm/drm_pciids.h> #include "mgag200_drv.h" static int mgag200_modeset = -1; MODULE_PARM_DESC(modeset, "Disable/Enable modesetting"); module_param_named(modeset, mgag200_modeset, int, 0400); int mgag200_init_pci_options(struct pci_dev *pdev, u32 option, u32 option2) { struct device *dev = &pdev->dev; int err; err = pci_write_config_dword(pdev, PCI_MGA_OPTION, option); if (err != PCIBIOS_SUCCESSFUL) { dev_err(dev, "pci_write_config_dword(PCI_MGA_OPTION) failed: %d\n", err); return pcibios_err_to_errno(err); } err = pci_write_config_dword(pdev, PCI_MGA_OPTION2, option2); if (err != PCIBIOS_SUCCESSFUL) { dev_err(dev, "pci_write_config_dword(PCI_MGA_OPTION2) failed: %d\n", err); return pcibios_err_to_errno(err); } return 0; } resource_size_t mgag200_probe_vram(void __iomem *mem, resource_size_t size) { int offset; int orig; int test1, test2; int orig1, orig2; size_t vram_size; /* Probe */ orig = ioread16(mem); iowrite16(0, mem); vram_size = size; for (offset = 0x100000; offset < vram_size; offset += 0x4000) { orig1 = ioread8(mem + offset); orig2 = ioread8(mem + offset + 0x100); iowrite16(0xaa55, mem + offset); iowrite16(0xaa55, mem + offset + 0x100); test1 = ioread16(mem + offset); test2 = ioread16(mem); iowrite16(orig1, mem + offset); iowrite16(orig2, mem + offset + 0x100); if (test1 != 0xaa55) break; if (test2) break; } iowrite16(orig, mem); return offset - 65536; } /* * DRM driver */ DEFINE_DRM_GEM_FOPS(mgag200_driver_fops); static const struct drm_driver mgag200_driver = { .driver_features = DRIVER_ATOMIC | DRIVER_GEM | DRIVER_MODESET, .fops = &mgag200_driver_fops, .name = DRIVER_NAME, .desc = DRIVER_DESC, .date = DRIVER_DATE, .major = DRIVER_MAJOR, .minor = DRIVER_MINOR, .patchlevel = DRIVER_PATCHLEVEL, DRM_GEM_SHMEM_DRIVER_OPS, }; /* * DRM device */ resource_size_t mgag200_device_probe_vram(struct mga_device *mdev) { return mgag200_probe_vram(mdev->vram, resource_size(mdev->vram_res)); } int mgag200_device_preinit(struct mga_device *mdev) { struct drm_device *dev = &mdev->base; struct pci_dev *pdev = to_pci_dev(dev->dev); resource_size_t start, len; struct resource *res; /* BAR 1 contains registers */ start = pci_resource_start(pdev, 1); len = pci_resource_len(pdev, 1); res = devm_request_mem_region(dev->dev, start, len, "mgadrmfb_mmio"); if (!res) { drm_err(dev, "devm_request_mem_region(MMIO) failed\n"); return -ENXIO; } mdev->rmmio_res = res; mdev->rmmio = pcim_iomap(pdev, 1, 0); if (!mdev->rmmio) return -ENOMEM; /* BAR 0 is VRAM */ start = pci_resource_start(pdev, 0); len = pci_resource_len(pdev, 0); res = devm_request_mem_region(dev->dev, start, len, "mgadrmfb_vram"); if (!res) { drm_err(dev, "devm_request_mem_region(VRAM) failed\n"); return -ENXIO; } mdev->vram_res = res; /* Don't fail on errors, but performance might be reduced. */ devm_arch_io_reserve_memtype_wc(dev->dev, res->start, resource_size(res)); devm_arch_phys_wc_add(dev->dev, res->start, resource_size(res)); mdev->vram = devm_ioremap(dev->dev, res->start, resource_size(res)); if (!mdev->vram) return -ENOMEM; return 0; } int mgag200_device_init(struct mga_device *mdev, const struct mgag200_device_info *info, const struct mgag200_device_funcs *funcs) { struct drm_device *dev = &mdev->base; u8 crtcext3, misc; int ret; mdev->info = info; mdev->funcs = funcs; ret = drmm_mutex_init(dev, &mdev->rmmio_lock); if (ret) return ret; mutex_lock(&mdev->rmmio_lock); RREG_ECRT(0x03, crtcext3); crtcext3 |= MGAREG_CRTCEXT3_MGAMODE; WREG_ECRT(0x03, crtcext3); WREG_ECRT(0x04, 0x00); misc = RREG8(MGA_MISC_IN); misc |= MGAREG_MISC_RAMMAPEN | MGAREG_MISC_HIGH_PG_SEL; WREG8(MGA_MISC_OUT, misc); mutex_unlock(&mdev->rmmio_lock); return 0; } /* * PCI driver */ static const struct pci_device_id mgag200_pciidlist[] = { { PCI_VENDOR_ID_MATROX, 0x520, PCI_ANY_ID, PCI_ANY_ID, 0, 0, G200_PCI }, { PCI_VENDOR_ID_MATROX, 0x521, PCI_ANY_ID, PCI_ANY_ID, 0, 0, G200_AGP }, { PCI_VENDOR_ID_MATROX, 0x522, PCI_ANY_ID, PCI_ANY_ID, 0, 0, G200_SE_A }, { PCI_VENDOR_ID_MATROX, 0x524, PCI_ANY_ID, PCI_ANY_ID, 0, 0, G200_SE_B }, { PCI_VENDOR_ID_MATROX, 0x530, PCI_ANY_ID, PCI_ANY_ID, 0, 0, G200_EV }, { PCI_VENDOR_ID_MATROX, 0x532, PCI_ANY_ID, PCI_ANY_ID, 0, 0, G200_WB }, { PCI_VENDOR_ID_MATROX, 0x533, PCI_ANY_ID, PCI_ANY_ID, 0, 0, G200_EH }, { PCI_VENDOR_ID_MATROX, 0x534, PCI_ANY_ID, PCI_ANY_ID, 0, 0, G200_ER }, { PCI_VENDOR_ID_MATROX, 0x536, PCI_ANY_ID, PCI_ANY_ID, 0, 0, G200_EW3 }, { PCI_VENDOR_ID_MATROX, 0x538, PCI_ANY_ID, PCI_ANY_ID, 0, 0, G200_EH3 }, {0,} }; MODULE_DEVICE_TABLE(pci, mgag200_pciidlist); static int mgag200_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) { enum mga_type type = (enum mga_type)ent->driver_data; struct mga_device *mdev; struct drm_device *dev; int ret; ret = drm_aperture_remove_conflicting_pci_framebuffers(pdev, &mgag200_driver); if (ret) return ret; ret = pcim_enable_device(pdev); if (ret) return ret; switch (type) { case G200_PCI: case G200_AGP: mdev = mgag200_g200_device_create(pdev, &mgag200_driver); break; case G200_SE_A: case G200_SE_B: mdev = mgag200_g200se_device_create(pdev, &mgag200_driver, type); break; case G200_WB: mdev = mgag200_g200wb_device_create(pdev, &mgag200_driver); break; case G200_EV: mdev = mgag200_g200ev_device_create(pdev, &mgag200_driver); break; case G200_EH: mdev = mgag200_g200eh_device_create(pdev, &mgag200_driver); break; case G200_EH3: mdev = mgag200_g200eh3_device_create(pdev, &mgag200_driver); break; case G200_ER: mdev = mgag200_g200er_device_create(pdev, &mgag200_driver); break; case G200_EW3: mdev = mgag200_g200ew3_device_create(pdev, &mgag200_driver); break; default: dev_err(&pdev->dev, "Device type %d is unsupported\n", type); return -ENODEV; } if (IS_ERR(mdev)) return PTR_ERR(mdev); dev = &mdev->base; ret = drm_dev_register(dev, 0); if (ret) return ret; /* * FIXME: A 24-bit color depth does not work with 24 bpp on * G200ER. Force 32 bpp. */ drm_fbdev_generic_setup(dev, 32); return 0; } static void mgag200_pci_remove(struct pci_dev *pdev) { struct drm_device *dev = pci_get_drvdata(pdev); drm_dev_unregister(dev); } static struct pci_driver mgag200_pci_driver = { .name = DRIVER_NAME, .id_table = mgag200_pciidlist, .probe = mgag200_pci_probe, .remove = mgag200_pci_remove, }; drm_module_pci_driver_if_modeset(mgag200_pci_driver, mgag200_modeset); MODULE_AUTHOR(DRIVER_AUTHOR); MODULE_DESCRIPTION(DRIVER_DESC); MODULE_LICENSE("GPL");
linux-master
drivers/gpu/drm/mgag200/mgag200_drv.c
// SPDX-License-Identifier: GPL-2.0-only #include <linux/delay.h> #include <linux/pci.h> #include <drm/drm_atomic.h> #include <drm/drm_atomic_helper.h> #include <drm/drm_drv.h> #include <drm/drm_gem_atomic_helper.h> #include <drm/drm_probe_helper.h> #include "mgag200_drv.h" void mgag200_g200eh_init_registers(struct mga_device *mdev) { static const u8 dacvalue[] = { MGAG200_DAC_DEFAULT(0x00, 0xc9, MGA1064_MISC_CTL_VGA8 | MGA1064_MISC_CTL_DAC_RAM_CS, 0x00, 0x00, 0x00) }; size_t i; for (i = 0; i < ARRAY_SIZE(dacvalue); i++) { if ((i <= 0x17) || (i == 0x1b) || (i == 0x1c) || ((i >= 0x1f) && (i <= 0x29)) || ((i >= 0x30) && (i <= 0x37)) || ((i >= 0x44) && (i <= 0x4e))) continue; WREG_DAC(i, dacvalue[i]); } mgag200_init_registers(mdev); } /* * PIXPLLC */ static int mgag200_g200eh_pixpllc_atomic_check(struct drm_crtc *crtc, struct drm_atomic_state *new_state) { static const unsigned int vcomax = 800000; static const unsigned int vcomin = 400000; static const unsigned int pllreffreq = 33333; struct drm_crtc_state *new_crtc_state = drm_atomic_get_new_crtc_state(new_state, crtc); struct mgag200_crtc_state *new_mgag200_crtc_state = to_mgag200_crtc_state(new_crtc_state); long clock = new_crtc_state->mode.clock; struct mgag200_pll_values *pixpllc = &new_mgag200_crtc_state->pixpllc; unsigned int delta, tmpdelta; unsigned int testp, testm, testn; unsigned int p, m, n, s; unsigned int computed; m = n = p = s = 0; delta = 0xffffffff; for (testp = 16; testp > 0; testp >>= 1) { if (clock * testp > vcomax) continue; if (clock * testp < vcomin) continue; for (testm = 1; testm < 33; testm++) { for (testn = 17; testn < 257; testn++) { computed = (pllreffreq * testn) / (testm * testp); if (computed > clock) tmpdelta = computed - clock; else tmpdelta = clock - computed; if (tmpdelta < delta) { delta = tmpdelta; n = testn; m = testm; p = testp; } } } } pixpllc->m = m; pixpllc->n = n; pixpllc->p = p; pixpllc->s = s; return 0; } void mgag200_g200eh_pixpllc_atomic_update(struct drm_crtc *crtc, struct drm_atomic_state *old_state) { struct drm_device *dev = crtc->dev; struct mga_device *mdev = to_mga_device(dev); struct drm_crtc_state *crtc_state = crtc->state; struct mgag200_crtc_state *mgag200_crtc_state = to_mgag200_crtc_state(crtc_state); struct mgag200_pll_values *pixpllc = &mgag200_crtc_state->pixpllc; unsigned int pixpllcm, pixpllcn, pixpllcp, pixpllcs; u8 xpixpllcm, xpixpllcn, xpixpllcp, tmp; int i, j, tmpcount, vcount; bool pll_locked = false; pixpllcm = pixpllc->m - 1; pixpllcn = pixpllc->n - 1; pixpllcp = pixpllc->p - 1; pixpllcs = pixpllc->s; xpixpllcm = ((pixpllcn & BIT(8)) >> 1) | pixpllcm; xpixpllcn = pixpllcn; xpixpllcp = (pixpllcs << 3) | pixpllcp; WREG_MISC_MASKED(MGAREG_MISC_CLKSEL_MGA, MGAREG_MISC_CLKSEL_MASK); for (i = 0; i <= 32 && pll_locked == false; i++) { WREG8(DAC_INDEX, MGA1064_PIX_CLK_CTL); tmp = RREG8(DAC_DATA); tmp |= MGA1064_PIX_CLK_CTL_CLK_DIS; WREG8(DAC_DATA, tmp); tmp = RREG8(MGAREG_MEM_MISC_READ); tmp |= 0x3 << 2; WREG8(MGAREG_MEM_MISC_WRITE, tmp); WREG8(DAC_INDEX, MGA1064_PIX_CLK_CTL); tmp = RREG8(DAC_DATA); tmp |= MGA1064_PIX_CLK_CTL_CLK_POW_DOWN; WREG8(DAC_DATA, tmp); udelay(500); WREG_DAC(MGA1064_EH_PIX_PLLC_M, xpixpllcm); WREG_DAC(MGA1064_EH_PIX_PLLC_N, xpixpllcn); WREG_DAC(MGA1064_EH_PIX_PLLC_P, xpixpllcp); udelay(500); WREG8(DAC_INDEX, MGA1064_PIX_CLK_CTL); tmp = RREG8(DAC_DATA); tmp &= ~MGA1064_PIX_CLK_CTL_SEL_MSK; tmp |= MGA1064_PIX_CLK_CTL_SEL_PLL; WREG8(DAC_DATA, tmp); WREG8(DAC_INDEX, MGA1064_PIX_CLK_CTL); tmp = RREG8(DAC_DATA); tmp &= ~MGA1064_PIX_CLK_CTL_CLK_DIS; tmp &= ~MGA1064_PIX_CLK_CTL_CLK_POW_DOWN; WREG8(DAC_DATA, tmp); vcount = RREG8(MGAREG_VCOUNT); for (j = 0; j < 30 && pll_locked == false; j++) { tmpcount = RREG8(MGAREG_VCOUNT); if (tmpcount < vcount) vcount = 0; if ((tmpcount - vcount) > 2) pll_locked = true; else udelay(5); } } } /* * Mode-setting pipeline */ static const struct drm_plane_helper_funcs mgag200_g200eh_primary_plane_helper_funcs = { MGAG200_PRIMARY_PLANE_HELPER_FUNCS, }; static const struct drm_plane_funcs mgag200_g200eh_primary_plane_funcs = { MGAG200_PRIMARY_PLANE_FUNCS, }; static const struct drm_crtc_helper_funcs mgag200_g200eh_crtc_helper_funcs = { MGAG200_CRTC_HELPER_FUNCS, }; static const struct drm_crtc_funcs mgag200_g200eh_crtc_funcs = { MGAG200_CRTC_FUNCS, }; static const struct drm_encoder_funcs mgag200_g200eh_dac_encoder_funcs = { MGAG200_DAC_ENCODER_FUNCS, }; static const struct drm_connector_helper_funcs mgag200_g200eh_vga_connector_helper_funcs = { MGAG200_VGA_CONNECTOR_HELPER_FUNCS, }; static const struct drm_connector_funcs mgag200_g200eh_vga_connector_funcs = { MGAG200_VGA_CONNECTOR_FUNCS, }; static int mgag200_g200eh_pipeline_init(struct mga_device *mdev) { struct drm_device *dev = &mdev->base; struct drm_plane *primary_plane = &mdev->primary_plane; struct drm_crtc *crtc = &mdev->crtc; struct drm_encoder *encoder = &mdev->encoder; struct mga_i2c_chan *i2c = &mdev->i2c; struct drm_connector *connector = &mdev->connector; int ret; ret = drm_universal_plane_init(dev, primary_plane, 0, &mgag200_g200eh_primary_plane_funcs, mgag200_primary_plane_formats, mgag200_primary_plane_formats_size, mgag200_primary_plane_fmtmods, DRM_PLANE_TYPE_PRIMARY, NULL); if (ret) { drm_err(dev, "drm_universal_plane_init() failed: %d\n", ret); return ret; } drm_plane_helper_add(primary_plane, &mgag200_g200eh_primary_plane_helper_funcs); drm_plane_enable_fb_damage_clips(primary_plane); ret = drm_crtc_init_with_planes(dev, crtc, primary_plane, NULL, &mgag200_g200eh_crtc_funcs, NULL); if (ret) { drm_err(dev, "drm_crtc_init_with_planes() failed: %d\n", ret); return ret; } drm_crtc_helper_add(crtc, &mgag200_g200eh_crtc_helper_funcs); /* FIXME: legacy gamma tables, but atomic gamma doesn't work without */ drm_mode_crtc_set_gamma_size(crtc, MGAG200_LUT_SIZE); drm_crtc_enable_color_mgmt(crtc, 0, false, MGAG200_LUT_SIZE); encoder->possible_crtcs = drm_crtc_mask(crtc); ret = drm_encoder_init(dev, encoder, &mgag200_g200eh_dac_encoder_funcs, DRM_MODE_ENCODER_DAC, NULL); if (ret) { drm_err(dev, "drm_encoder_init() failed: %d\n", ret); return ret; } ret = mgag200_i2c_init(mdev, i2c); if (ret) { drm_err(dev, "failed to add DDC bus: %d\n", ret); return ret; } ret = drm_connector_init_with_ddc(dev, connector, &mgag200_g200eh_vga_connector_funcs, DRM_MODE_CONNECTOR_VGA, &i2c->adapter); if (ret) { drm_err(dev, "drm_connector_init_with_ddc() failed: %d\n", ret); return ret; } drm_connector_helper_add(connector, &mgag200_g200eh_vga_connector_helper_funcs); ret = drm_connector_attach_encoder(connector, encoder); if (ret) { drm_err(dev, "drm_connector_attach_encoder() failed: %d\n", ret); return ret; } return 0; } /* * DRM device */ static const struct mgag200_device_info mgag200_g200eh_device_info = MGAG200_DEVICE_INFO_INIT(2048, 2048, 37500, false, 1, 0, false); static const struct mgag200_device_funcs mgag200_g200eh_device_funcs = { .pixpllc_atomic_check = mgag200_g200eh_pixpllc_atomic_check, .pixpllc_atomic_update = mgag200_g200eh_pixpllc_atomic_update, }; struct mga_device *mgag200_g200eh_device_create(struct pci_dev *pdev, const struct drm_driver *drv) { struct mga_device *mdev; struct drm_device *dev; resource_size_t vram_available; int ret; mdev = devm_drm_dev_alloc(&pdev->dev, drv, struct mga_device, base); if (IS_ERR(mdev)) return mdev; dev = &mdev->base; pci_set_drvdata(pdev, dev); ret = mgag200_init_pci_options(pdev, 0x00000120, 0x0000b000); if (ret) return ERR_PTR(ret); ret = mgag200_device_preinit(mdev); if (ret) return ERR_PTR(ret); ret = mgag200_device_init(mdev, &mgag200_g200eh_device_info, &mgag200_g200eh_device_funcs); if (ret) return ERR_PTR(ret); mgag200_g200eh_init_registers(mdev); vram_available = mgag200_device_probe_vram(mdev); ret = mgag200_mode_config_init(mdev, vram_available); if (ret) return ERR_PTR(ret); ret = mgag200_g200eh_pipeline_init(mdev); if (ret) return ERR_PTR(ret); drm_mode_config_reset(dev); return mdev; }
linux-master
drivers/gpu/drm/mgag200/mgag200_g200eh.c
// SPDX-License-Identifier: GPL-2.0-only #include <linux/pci.h> #include <drm/drm_atomic.h> #include <drm/drm_atomic_helper.h> #include <drm/drm_drv.h> #include <drm/drm_gem_atomic_helper.h> #include <drm/drm_probe_helper.h> #include "mgag200_drv.h" static void mgag200_g200ew3_init_registers(struct mga_device *mdev) { mgag200_g200wb_init_registers(mdev); // same as G200WB WREG_ECRT(0x34, 0x5); // G200EW3 specific } /* * PIXPLLC */ static int mgag200_g200ew3_pixpllc_atomic_check(struct drm_crtc *crtc, struct drm_atomic_state *new_state) { static const unsigned int vcomax = 800000; static const unsigned int vcomin = 400000; static const unsigned int pllreffreq = 25000; struct drm_crtc_state *new_crtc_state = drm_atomic_get_new_crtc_state(new_state, crtc); struct mgag200_crtc_state *new_mgag200_crtc_state = to_mgag200_crtc_state(new_crtc_state); long clock = new_crtc_state->mode.clock; struct mgag200_pll_values *pixpllc = &new_mgag200_crtc_state->pixpllc; unsigned int delta, tmpdelta; unsigned int testp, testm, testn, testp2; unsigned int p, m, n, s; unsigned int computed; m = n = p = s = 0; delta = 0xffffffff; for (testp = 1; testp < 8; testp++) { for (testp2 = 1; testp2 < 8; testp2++) { if (testp < testp2) continue; if ((clock * testp * testp2) > vcomax) continue; if ((clock * testp * testp2) < vcomin) continue; for (testm = 1; testm < 26; testm++) { for (testn = 32; testn < 2048 ; testn++) { computed = (pllreffreq * testn) / (testm * testp * testp2); if (computed > clock) tmpdelta = computed - clock; else tmpdelta = clock - computed; if (tmpdelta < delta) { delta = tmpdelta; m = testm + 1; n = testn + 1; p = testp + 1; s = testp2; } } } } } pixpllc->m = m; pixpllc->n = n; pixpllc->p = p; pixpllc->s = s; return 0; } /* * Mode-setting pipeline */ static const struct drm_plane_helper_funcs mgag200_g200ew3_primary_plane_helper_funcs = { MGAG200_PRIMARY_PLANE_HELPER_FUNCS, }; static const struct drm_plane_funcs mgag200_g200ew3_primary_plane_funcs = { MGAG200_PRIMARY_PLANE_FUNCS, }; static const struct drm_crtc_helper_funcs mgag200_g200ew3_crtc_helper_funcs = { MGAG200_CRTC_HELPER_FUNCS, }; static const struct drm_crtc_funcs mgag200_g200ew3_crtc_funcs = { MGAG200_CRTC_FUNCS, }; static const struct drm_encoder_funcs mgag200_g200ew3_dac_encoder_funcs = { MGAG200_DAC_ENCODER_FUNCS, }; static const struct drm_connector_helper_funcs mgag200_g200ew3_vga_connector_helper_funcs = { MGAG200_VGA_CONNECTOR_HELPER_FUNCS, }; static const struct drm_connector_funcs mgag200_g200ew3_vga_connector_funcs = { MGAG200_VGA_CONNECTOR_FUNCS, }; static int mgag200_g200ew3_pipeline_init(struct mga_device *mdev) { struct drm_device *dev = &mdev->base; struct drm_plane *primary_plane = &mdev->primary_plane; struct drm_crtc *crtc = &mdev->crtc; struct drm_encoder *encoder = &mdev->encoder; struct mga_i2c_chan *i2c = &mdev->i2c; struct drm_connector *connector = &mdev->connector; int ret; ret = drm_universal_plane_init(dev, primary_plane, 0, &mgag200_g200ew3_primary_plane_funcs, mgag200_primary_plane_formats, mgag200_primary_plane_formats_size, mgag200_primary_plane_fmtmods, DRM_PLANE_TYPE_PRIMARY, NULL); if (ret) { drm_err(dev, "drm_universal_plane_init() failed: %d\n", ret); return ret; } drm_plane_helper_add(primary_plane, &mgag200_g200ew3_primary_plane_helper_funcs); drm_plane_enable_fb_damage_clips(primary_plane); ret = drm_crtc_init_with_planes(dev, crtc, primary_plane, NULL, &mgag200_g200ew3_crtc_funcs, NULL); if (ret) { drm_err(dev, "drm_crtc_init_with_planes() failed: %d\n", ret); return ret; } drm_crtc_helper_add(crtc, &mgag200_g200ew3_crtc_helper_funcs); /* FIXME: legacy gamma tables, but atomic gamma doesn't work without */ drm_mode_crtc_set_gamma_size(crtc, MGAG200_LUT_SIZE); drm_crtc_enable_color_mgmt(crtc, 0, false, MGAG200_LUT_SIZE); encoder->possible_crtcs = drm_crtc_mask(crtc); ret = drm_encoder_init(dev, encoder, &mgag200_g200ew3_dac_encoder_funcs, DRM_MODE_ENCODER_DAC, NULL); if (ret) { drm_err(dev, "drm_encoder_init() failed: %d\n", ret); return ret; } ret = mgag200_i2c_init(mdev, i2c); if (ret) { drm_err(dev, "failed to add DDC bus: %d\n", ret); return ret; } ret = drm_connector_init_with_ddc(dev, connector, &mgag200_g200ew3_vga_connector_funcs, DRM_MODE_CONNECTOR_VGA, &i2c->adapter); if (ret) { drm_err(dev, "drm_connector_init_with_ddc() failed: %d\n", ret); return ret; } drm_connector_helper_add(connector, &mgag200_g200ew3_vga_connector_helper_funcs); ret = drm_connector_attach_encoder(connector, encoder); if (ret) { drm_err(dev, "drm_connector_attach_encoder() failed: %d\n", ret); return ret; } return 0; } /* * DRM device */ static const struct mgag200_device_info mgag200_g200ew3_device_info = MGAG200_DEVICE_INFO_INIT(2048, 2048, 0, true, 0, 1, false); static const struct mgag200_device_funcs mgag200_g200ew3_device_funcs = { .disable_vidrst = mgag200_bmc_disable_vidrst, .enable_vidrst = mgag200_bmc_enable_vidrst, .pixpllc_atomic_check = mgag200_g200ew3_pixpllc_atomic_check, .pixpllc_atomic_update = mgag200_g200wb_pixpllc_atomic_update, // same as G200WB }; static resource_size_t mgag200_g200ew3_device_probe_vram(struct mga_device *mdev) { resource_size_t vram_size = resource_size(mdev->vram_res); if (vram_size >= 0x1000000) vram_size = vram_size - 0x400000; return mgag200_probe_vram(mdev->vram, vram_size); } struct mga_device *mgag200_g200ew3_device_create(struct pci_dev *pdev, const struct drm_driver *drv) { struct mga_device *mdev; struct drm_device *dev; resource_size_t vram_available; int ret; mdev = devm_drm_dev_alloc(&pdev->dev, drv, struct mga_device, base); if (IS_ERR(mdev)) return mdev; dev = &mdev->base; pci_set_drvdata(pdev, dev); ret = mgag200_init_pci_options(pdev, 0x41049120, 0x0000b000); if (ret) return ERR_PTR(ret); ret = mgag200_device_preinit(mdev); if (ret) return ERR_PTR(ret); ret = mgag200_device_init(mdev, &mgag200_g200ew3_device_info, &mgag200_g200ew3_device_funcs); if (ret) return ERR_PTR(ret); mgag200_g200ew3_init_registers(mdev); vram_available = mgag200_g200ew3_device_probe_vram(mdev); ret = mgag200_mode_config_init(mdev, vram_available); if (ret) return ERR_PTR(ret); ret = mgag200_g200ew3_pipeline_init(mdev); if (ret) return ERR_PTR(ret); drm_mode_config_reset(dev); return mdev; }
linux-master
drivers/gpu/drm/mgag200/mgag200_g200ew3.c
// SPDX-License-Identifier: GPL-2.0-only #include <linux/delay.h> #include <linux/pci.h> #include <drm/drm_atomic.h> #include <drm/drm_atomic_helper.h> #include <drm/drm_drv.h> #include <drm/drm_gem_atomic_helper.h> #include <drm/drm_probe_helper.h> #include "mgag200_drv.h" static int mgag200_g200se_init_pci_options(struct pci_dev *pdev) { struct device *dev = &pdev->dev; bool has_sgram; u32 option; int err; err = pci_read_config_dword(pdev, PCI_MGA_OPTION, &option); if (err != PCIBIOS_SUCCESSFUL) { dev_err(dev, "pci_read_config_dword(PCI_MGA_OPTION) failed: %d\n", err); return pcibios_err_to_errno(err); } has_sgram = !!(option & PCI_MGA_OPTION_HARDPWMSK); option = 0x40049120; if (has_sgram) option |= PCI_MGA_OPTION_HARDPWMSK; return mgag200_init_pci_options(pdev, option, 0x00008000); } static void mgag200_g200se_init_registers(struct mgag200_g200se_device *g200se) { static const u8 dacvalue[] = { MGAG200_DAC_DEFAULT(0x03, MGA1064_PIX_CLK_CTL_SEL_PLL, MGA1064_MISC_CTL_DAC_EN | MGA1064_MISC_CTL_VGA8 | MGA1064_MISC_CTL_DAC_RAM_CS, 0x00, 0x00, 0x00) }; struct mga_device *mdev = &g200se->base; size_t i; for (i = 0; i < ARRAY_SIZE(dacvalue); i++) { if ((i <= 0x17) || (i == 0x1b) || (i == 0x1c) || ((i >= 0x1f) && (i <= 0x29)) || ((i == 0x2c) || (i == 0x2d) || (i == 0x2e)) || ((i >= 0x30) && (i <= 0x37))) continue; WREG_DAC(i, dacvalue[i]); } mgag200_init_registers(mdev); } static void mgag200_g200se_set_hiprilvl(struct mga_device *mdev, const struct drm_display_mode *mode, const struct drm_format_info *format) { struct mgag200_g200se_device *g200se = to_mgag200_g200se_device(&mdev->base); unsigned int hiprilvl; u8 crtcext6; if (g200se->unique_rev_id >= 0x04) { hiprilvl = 0; } else if (g200se->unique_rev_id >= 0x02) { unsigned int bpp; unsigned long mb; if (format->cpp[0] * 8 > 16) bpp = 32; else if (format->cpp[0] * 8 > 8) bpp = 16; else bpp = 8; mb = (mode->clock * bpp) / 1000; if (mb > 3100) hiprilvl = 0; else if (mb > 2600) hiprilvl = 1; else if (mb > 1900) hiprilvl = 2; else if (mb > 1160) hiprilvl = 3; else if (mb > 440) hiprilvl = 4; else hiprilvl = 5; } else if (g200se->unique_rev_id >= 0x01) { hiprilvl = 3; } else { hiprilvl = 4; } crtcext6 = hiprilvl; /* implicitly sets maxhipri to 0 */ WREG_ECRT(0x06, crtcext6); } /* * PIXPLLC */ static int mgag200_g200se_00_pixpllc_atomic_check(struct drm_crtc *crtc, struct drm_atomic_state *new_state) { static const unsigned int vcomax = 320000; static const unsigned int vcomin = 160000; static const unsigned int pllreffreq = 25000; struct drm_crtc_state *new_crtc_state = drm_atomic_get_new_crtc_state(new_state, crtc); struct mgag200_crtc_state *new_mgag200_crtc_state = to_mgag200_crtc_state(new_crtc_state); long clock = new_crtc_state->mode.clock; struct mgag200_pll_values *pixpllc = &new_mgag200_crtc_state->pixpllc; unsigned int delta, tmpdelta, permitteddelta; unsigned int testp, testm, testn; unsigned int p, m, n, s; unsigned int computed; m = n = p = s = 0; delta = 0xffffffff; permitteddelta = clock * 5 / 1000; for (testp = 8; testp > 0; testp /= 2) { if (clock * testp > vcomax) continue; if (clock * testp < vcomin) continue; for (testn = 17; testn < 256; testn++) { for (testm = 1; testm < 32; testm++) { computed = (pllreffreq * testn) / (testm * testp); if (computed > clock) tmpdelta = computed - clock; else tmpdelta = clock - computed; if (tmpdelta < delta) { delta = tmpdelta; m = testm; n = testn; p = testp; } } } } if (delta > permitteddelta) { pr_warn("PLL delta too large\n"); return -EINVAL; } pixpllc->m = m; pixpllc->n = n; pixpllc->p = p; pixpllc->s = s; return 0; } static void mgag200_g200se_00_pixpllc_atomic_update(struct drm_crtc *crtc, struct drm_atomic_state *old_state) { struct drm_device *dev = crtc->dev; struct mga_device *mdev = to_mga_device(dev); struct drm_crtc_state *crtc_state = crtc->state; struct mgag200_crtc_state *mgag200_crtc_state = to_mgag200_crtc_state(crtc_state); struct mgag200_pll_values *pixpllc = &mgag200_crtc_state->pixpllc; unsigned int pixpllcm, pixpllcn, pixpllcp, pixpllcs; u8 xpixpllcm, xpixpllcn, xpixpllcp; pixpllcm = pixpllc->m - 1; pixpllcn = pixpllc->n - 1; pixpllcp = pixpllc->p - 1; pixpllcs = pixpllc->s; xpixpllcm = pixpllcm | ((pixpllcn & BIT(8)) >> 1); xpixpllcn = pixpllcn; xpixpllcp = (pixpllcs << 3) | pixpllcp; WREG_MISC_MASKED(MGAREG_MISC_CLKSEL_MGA, MGAREG_MISC_CLKSEL_MASK); WREG_DAC(MGA1064_PIX_PLLC_M, xpixpllcm); WREG_DAC(MGA1064_PIX_PLLC_N, xpixpllcn); WREG_DAC(MGA1064_PIX_PLLC_P, xpixpllcp); } static int mgag200_g200se_04_pixpllc_atomic_check(struct drm_crtc *crtc, struct drm_atomic_state *new_state) { static const unsigned int vcomax = 1600000; static const unsigned int vcomin = 800000; static const unsigned int pllreffreq = 25000; static const unsigned int pvalues_e4[] = {16, 14, 12, 10, 8, 6, 4, 2, 1}; struct drm_crtc_state *new_crtc_state = drm_atomic_get_new_crtc_state(new_state, crtc); struct mgag200_crtc_state *new_mgag200_crtc_state = to_mgag200_crtc_state(new_crtc_state); long clock = new_crtc_state->mode.clock; struct mgag200_pll_values *pixpllc = &new_mgag200_crtc_state->pixpllc; unsigned int delta, tmpdelta, permitteddelta; unsigned int testp, testm, testn; unsigned int p, m, n, s; unsigned int computed; unsigned int fvv; unsigned int i; m = n = p = s = 0; delta = 0xffffffff; if (clock < 25000) clock = 25000; clock = clock * 2; /* Permited delta is 0.5% as VESA Specification */ permitteddelta = clock * 5 / 1000; for (i = 0 ; i < ARRAY_SIZE(pvalues_e4); i++) { testp = pvalues_e4[i]; if ((clock * testp) > vcomax) continue; if ((clock * testp) < vcomin) continue; for (testn = 50; testn <= 256; testn++) { for (testm = 1; testm <= 32; testm++) { computed = (pllreffreq * testn) / (testm * testp); if (computed > clock) tmpdelta = computed - clock; else tmpdelta = clock - computed; if (tmpdelta < delta) { delta = tmpdelta; m = testm; n = testn; p = testp; } } } } fvv = pllreffreq * n / m; fvv = (fvv - 800000) / 50000; if (fvv > 15) fvv = 15; s = fvv << 1; if (delta > permitteddelta) { pr_warn("PLL delta too large\n"); return -EINVAL; } pixpllc->m = m; pixpllc->n = n; pixpllc->p = p; pixpllc->s = s; return 0; } static void mgag200_g200se_04_pixpllc_atomic_update(struct drm_crtc *crtc, struct drm_atomic_state *old_state) { struct drm_device *dev = crtc->dev; struct mga_device *mdev = to_mga_device(dev); struct drm_crtc_state *crtc_state = crtc->state; struct mgag200_crtc_state *mgag200_crtc_state = to_mgag200_crtc_state(crtc_state); struct mgag200_pll_values *pixpllc = &mgag200_crtc_state->pixpllc; unsigned int pixpllcm, pixpllcn, pixpllcp, pixpllcs; u8 xpixpllcm, xpixpllcn, xpixpllcp; pixpllcm = pixpllc->m - 1; pixpllcn = pixpllc->n - 1; pixpllcp = pixpllc->p - 1; pixpllcs = pixpllc->s; // For G200SE A, BIT(7) should be set unconditionally. xpixpllcm = BIT(7) | pixpllcm; xpixpllcn = pixpllcn; xpixpllcp = (pixpllcs << 3) | pixpllcp; WREG_MISC_MASKED(MGAREG_MISC_CLKSEL_MGA, MGAREG_MISC_CLKSEL_MASK); WREG_DAC(MGA1064_PIX_PLLC_M, xpixpllcm); WREG_DAC(MGA1064_PIX_PLLC_N, xpixpllcn); WREG_DAC(MGA1064_PIX_PLLC_P, xpixpllcp); WREG_DAC(0x1a, 0x09); msleep(20); WREG_DAC(0x1a, 0x01); } /* * Mode-setting pipeline */ static const struct drm_plane_helper_funcs mgag200_g200se_primary_plane_helper_funcs = { MGAG200_PRIMARY_PLANE_HELPER_FUNCS, }; static const struct drm_plane_funcs mgag200_g200se_primary_plane_funcs = { MGAG200_PRIMARY_PLANE_FUNCS, }; static void mgag200_g200se_crtc_helper_atomic_enable(struct drm_crtc *crtc, struct drm_atomic_state *old_state) { struct drm_device *dev = crtc->dev; struct mga_device *mdev = to_mga_device(dev); const struct mgag200_device_funcs *funcs = mdev->funcs; struct drm_crtc_state *crtc_state = crtc->state; struct drm_display_mode *adjusted_mode = &crtc_state->adjusted_mode; struct mgag200_crtc_state *mgag200_crtc_state = to_mgag200_crtc_state(crtc_state); const struct drm_format_info *format = mgag200_crtc_state->format; if (funcs->disable_vidrst) funcs->disable_vidrst(mdev); mgag200_set_format_regs(mdev, format); mgag200_set_mode_regs(mdev, adjusted_mode); if (funcs->pixpllc_atomic_update) funcs->pixpllc_atomic_update(crtc, old_state); mgag200_g200se_set_hiprilvl(mdev, adjusted_mode, format); mgag200_enable_display(mdev); if (funcs->enable_vidrst) funcs->enable_vidrst(mdev); } static const struct drm_crtc_helper_funcs mgag200_g200se_crtc_helper_funcs = { .mode_valid = mgag200_crtc_helper_mode_valid, .atomic_check = mgag200_crtc_helper_atomic_check, .atomic_flush = mgag200_crtc_helper_atomic_flush, .atomic_enable = mgag200_g200se_crtc_helper_atomic_enable, .atomic_disable = mgag200_crtc_helper_atomic_disable }; static const struct drm_crtc_funcs mgag200_g200se_crtc_funcs = { MGAG200_CRTC_FUNCS, }; static const struct drm_encoder_funcs mgag200_g200se_dac_encoder_funcs = { MGAG200_DAC_ENCODER_FUNCS, }; static const struct drm_connector_helper_funcs mgag200_g200se_vga_connector_helper_funcs = { MGAG200_VGA_CONNECTOR_HELPER_FUNCS, }; static const struct drm_connector_funcs mgag200_g200se_vga_connector_funcs = { MGAG200_VGA_CONNECTOR_FUNCS, }; static int mgag200_g200se_pipeline_init(struct mga_device *mdev) { struct drm_device *dev = &mdev->base; struct drm_plane *primary_plane = &mdev->primary_plane; struct drm_crtc *crtc = &mdev->crtc; struct drm_encoder *encoder = &mdev->encoder; struct mga_i2c_chan *i2c = &mdev->i2c; struct drm_connector *connector = &mdev->connector; int ret; ret = drm_universal_plane_init(dev, primary_plane, 0, &mgag200_g200se_primary_plane_funcs, mgag200_primary_plane_formats, mgag200_primary_plane_formats_size, mgag200_primary_plane_fmtmods, DRM_PLANE_TYPE_PRIMARY, NULL); if (ret) { drm_err(dev, "drm_universal_plane_init() failed: %d\n", ret); return ret; } drm_plane_helper_add(primary_plane, &mgag200_g200se_primary_plane_helper_funcs); drm_plane_enable_fb_damage_clips(primary_plane); ret = drm_crtc_init_with_planes(dev, crtc, primary_plane, NULL, &mgag200_g200se_crtc_funcs, NULL); if (ret) { drm_err(dev, "drm_crtc_init_with_planes() failed: %d\n", ret); return ret; } drm_crtc_helper_add(crtc, &mgag200_g200se_crtc_helper_funcs); /* FIXME: legacy gamma tables, but atomic gamma doesn't work without */ drm_mode_crtc_set_gamma_size(crtc, MGAG200_LUT_SIZE); drm_crtc_enable_color_mgmt(crtc, 0, false, MGAG200_LUT_SIZE); encoder->possible_crtcs = drm_crtc_mask(crtc); ret = drm_encoder_init(dev, encoder, &mgag200_g200se_dac_encoder_funcs, DRM_MODE_ENCODER_DAC, NULL); if (ret) { drm_err(dev, "drm_encoder_init() failed: %d\n", ret); return ret; } ret = mgag200_i2c_init(mdev, i2c); if (ret) { drm_err(dev, "failed to add DDC bus: %d\n", ret); return ret; } ret = drm_connector_init_with_ddc(dev, connector, &mgag200_g200se_vga_connector_funcs, DRM_MODE_CONNECTOR_VGA, &i2c->adapter); if (ret) { drm_err(dev, "drm_connector_init_with_ddc() failed: %d\n", ret); return ret; } drm_connector_helper_add(connector, &mgag200_g200se_vga_connector_helper_funcs); ret = drm_connector_attach_encoder(connector, encoder); if (ret) { drm_err(dev, "drm_connector_attach_encoder() failed: %d\n", ret); return ret; } return 0; } /* * DRM device */ static const struct mgag200_device_info mgag200_g200se_a_01_device_info = MGAG200_DEVICE_INFO_INIT(1600, 1200, 24400, false, 0, 1, true); static const struct mgag200_device_info mgag200_g200se_a_02_device_info = MGAG200_DEVICE_INFO_INIT(1920, 1200, 30100, false, 0, 1, true); static const struct mgag200_device_info mgag200_g200se_a_03_device_info = MGAG200_DEVICE_INFO_INIT(2048, 2048, 55000, false, 0, 1, false); static const struct mgag200_device_info mgag200_g200se_b_01_device_info = MGAG200_DEVICE_INFO_INIT(1600, 1200, 24400, false, 0, 1, false); static const struct mgag200_device_info mgag200_g200se_b_02_device_info = MGAG200_DEVICE_INFO_INIT(1920, 1200, 30100, false, 0, 1, false); static const struct mgag200_device_info mgag200_g200se_b_03_device_info = MGAG200_DEVICE_INFO_INIT(2048, 2048, 55000, false, 0, 1, false); static int mgag200_g200se_init_unique_rev_id(struct mgag200_g200se_device *g200se) { struct mga_device *mdev = &g200se->base; struct drm_device *dev = &mdev->base; /* stash G200 SE model number for later use */ g200se->unique_rev_id = RREG32(0x1e24); if (!g200se->unique_rev_id) return -ENODEV; drm_dbg(dev, "G200 SE unique revision id is 0x%x\n", g200se->unique_rev_id); return 0; } static const struct mgag200_device_funcs mgag200_g200se_00_device_funcs = { .pixpllc_atomic_check = mgag200_g200se_00_pixpllc_atomic_check, .pixpllc_atomic_update = mgag200_g200se_00_pixpllc_atomic_update, }; static const struct mgag200_device_funcs mgag200_g200se_04_device_funcs = { .pixpllc_atomic_check = mgag200_g200se_04_pixpllc_atomic_check, .pixpllc_atomic_update = mgag200_g200se_04_pixpllc_atomic_update, }; struct mga_device *mgag200_g200se_device_create(struct pci_dev *pdev, const struct drm_driver *drv, enum mga_type type) { struct mgag200_g200se_device *g200se; const struct mgag200_device_info *info; const struct mgag200_device_funcs *funcs; struct mga_device *mdev; struct drm_device *dev; resource_size_t vram_available; int ret; g200se = devm_drm_dev_alloc(&pdev->dev, drv, struct mgag200_g200se_device, base.base); if (IS_ERR(g200se)) return ERR_CAST(g200se); mdev = &g200se->base; dev = &mdev->base; pci_set_drvdata(pdev, dev); ret = mgag200_g200se_init_pci_options(pdev); if (ret) return ERR_PTR(ret); ret = mgag200_device_preinit(mdev); if (ret) return ERR_PTR(ret); ret = mgag200_g200se_init_unique_rev_id(g200se); if (ret) return ERR_PTR(ret); switch (type) { case G200_SE_A: if (g200se->unique_rev_id >= 0x03) info = &mgag200_g200se_a_03_device_info; else if (g200se->unique_rev_id >= 0x02) info = &mgag200_g200se_a_02_device_info; else info = &mgag200_g200se_a_01_device_info; break; case G200_SE_B: if (g200se->unique_rev_id >= 0x03) info = &mgag200_g200se_b_03_device_info; else if (g200se->unique_rev_id >= 0x02) info = &mgag200_g200se_b_02_device_info; else info = &mgag200_g200se_b_01_device_info; break; default: return ERR_PTR(-EINVAL); } if (g200se->unique_rev_id >= 0x04) funcs = &mgag200_g200se_04_device_funcs; else funcs = &mgag200_g200se_00_device_funcs; ret = mgag200_device_init(mdev, info, funcs); if (ret) return ERR_PTR(ret); mgag200_g200se_init_registers(g200se); vram_available = mgag200_device_probe_vram(mdev); ret = mgag200_mode_config_init(mdev, vram_available); if (ret) return ERR_PTR(ret); ret = mgag200_g200se_pipeline_init(mdev); if (ret) return ERR_PTR(ret); drm_mode_config_reset(dev); return mdev; }
linux-master
drivers/gpu/drm/mgag200/mgag200_g200se.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * Copyright 2015 Freescale Semiconductor, Inc. * * Freescale DCU drm device driver */ #include <linux/regmap.h> #include <drm/drm_atomic.h> #include <drm/drm_atomic_helper.h> #include <drm/drm_crtc.h> #include <drm/drm_fb_dma_helper.h> #include <drm/drm_fourcc.h> #include <drm/drm_framebuffer.h> #include <drm/drm_gem_dma_helper.h> #include <drm/drm_plane_helper.h> #include <drm/drm_probe_helper.h> #include "fsl_dcu_drm_drv.h" #include "fsl_dcu_drm_plane.h" static int fsl_dcu_drm_plane_index(struct drm_plane *plane) { struct fsl_dcu_drm_device *fsl_dev = plane->dev->dev_private; unsigned int total_layer = fsl_dev->soc->total_layer; unsigned int index; index = drm_plane_index(plane); if (index < total_layer) return total_layer - index - 1; dev_err(fsl_dev->dev, "No more layer left\n"); return -EINVAL; } static int fsl_dcu_drm_plane_atomic_check(struct drm_plane *plane, struct drm_atomic_state *state) { struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state, plane); struct drm_framebuffer *fb = new_plane_state->fb; if (!new_plane_state->fb || !new_plane_state->crtc) return 0; switch (fb->format->format) { case DRM_FORMAT_RGB565: case DRM_FORMAT_RGB888: case DRM_FORMAT_XRGB8888: case DRM_FORMAT_ARGB8888: case DRM_FORMAT_XRGB4444: case DRM_FORMAT_ARGB4444: case DRM_FORMAT_XRGB1555: case DRM_FORMAT_ARGB1555: case DRM_FORMAT_YUV422: return 0; default: return -EINVAL; } } static void fsl_dcu_drm_plane_atomic_disable(struct drm_plane *plane, struct drm_atomic_state *state) { struct fsl_dcu_drm_device *fsl_dev = plane->dev->dev_private; unsigned int value; int index; index = fsl_dcu_drm_plane_index(plane); if (index < 0) return; regmap_read(fsl_dev->regmap, DCU_CTRLDESCLN(index, 4), &value); value &= ~DCU_LAYER_EN; regmap_write(fsl_dev->regmap, DCU_CTRLDESCLN(index, 4), value); } static void fsl_dcu_drm_plane_atomic_update(struct drm_plane *plane, struct drm_atomic_state *state) { struct fsl_dcu_drm_device *fsl_dev = plane->dev->dev_private; struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state, plane); struct drm_framebuffer *fb = plane->state->fb; struct drm_gem_dma_object *gem; unsigned int alpha = DCU_LAYER_AB_NONE, bpp; int index; if (!fb) return; index = fsl_dcu_drm_plane_index(plane); if (index < 0) return; gem = drm_fb_dma_get_gem_obj(fb, 0); switch (fb->format->format) { case DRM_FORMAT_RGB565: bpp = FSL_DCU_RGB565; break; case DRM_FORMAT_RGB888: bpp = FSL_DCU_RGB888; break; case DRM_FORMAT_ARGB8888: alpha = DCU_LAYER_AB_WHOLE_FRAME; fallthrough; case DRM_FORMAT_XRGB8888: bpp = FSL_DCU_ARGB8888; break; case DRM_FORMAT_ARGB4444: alpha = DCU_LAYER_AB_WHOLE_FRAME; fallthrough; case DRM_FORMAT_XRGB4444: bpp = FSL_DCU_ARGB4444; break; case DRM_FORMAT_ARGB1555: alpha = DCU_LAYER_AB_WHOLE_FRAME; fallthrough; case DRM_FORMAT_XRGB1555: bpp = FSL_DCU_ARGB1555; break; case DRM_FORMAT_YUV422: bpp = FSL_DCU_YUV422; break; default: return; } regmap_write(fsl_dev->regmap, DCU_CTRLDESCLN(index, 1), DCU_LAYER_HEIGHT(new_state->crtc_h) | DCU_LAYER_WIDTH(new_state->crtc_w)); regmap_write(fsl_dev->regmap, DCU_CTRLDESCLN(index, 2), DCU_LAYER_POSY(new_state->crtc_y) | DCU_LAYER_POSX(new_state->crtc_x)); regmap_write(fsl_dev->regmap, DCU_CTRLDESCLN(index, 3), gem->dma_addr); regmap_write(fsl_dev->regmap, DCU_CTRLDESCLN(index, 4), DCU_LAYER_EN | DCU_LAYER_TRANS(0xff) | DCU_LAYER_BPP(bpp) | alpha); regmap_write(fsl_dev->regmap, DCU_CTRLDESCLN(index, 5), DCU_LAYER_CKMAX_R(0xFF) | DCU_LAYER_CKMAX_G(0xFF) | DCU_LAYER_CKMAX_B(0xFF)); regmap_write(fsl_dev->regmap, DCU_CTRLDESCLN(index, 6), DCU_LAYER_CKMIN_R(0) | DCU_LAYER_CKMIN_G(0) | DCU_LAYER_CKMIN_B(0)); regmap_write(fsl_dev->regmap, DCU_CTRLDESCLN(index, 7), 0); regmap_write(fsl_dev->regmap, DCU_CTRLDESCLN(index, 8), DCU_LAYER_FG_FCOLOR(0)); regmap_write(fsl_dev->regmap, DCU_CTRLDESCLN(index, 9), DCU_LAYER_BG_BCOLOR(0)); if (!strcmp(fsl_dev->soc->name, "ls1021a")) { regmap_write(fsl_dev->regmap, DCU_CTRLDESCLN(index, 10), DCU_LAYER_POST_SKIP(0) | DCU_LAYER_PRE_SKIP(0)); } return; } static const struct drm_plane_helper_funcs fsl_dcu_drm_plane_helper_funcs = { .atomic_check = fsl_dcu_drm_plane_atomic_check, .atomic_disable = fsl_dcu_drm_plane_atomic_disable, .atomic_update = fsl_dcu_drm_plane_atomic_update, }; static const struct drm_plane_funcs fsl_dcu_drm_plane_funcs = { .atomic_duplicate_state = drm_atomic_helper_plane_duplicate_state, .atomic_destroy_state = drm_atomic_helper_plane_destroy_state, .destroy = drm_plane_helper_destroy, .disable_plane = drm_atomic_helper_disable_plane, .reset = drm_atomic_helper_plane_reset, .update_plane = drm_atomic_helper_update_plane, }; static const u32 fsl_dcu_drm_plane_formats[] = { DRM_FORMAT_RGB565, DRM_FORMAT_RGB888, DRM_FORMAT_XRGB8888, DRM_FORMAT_ARGB8888, DRM_FORMAT_XRGB4444, DRM_FORMAT_ARGB4444, DRM_FORMAT_XRGB1555, DRM_FORMAT_ARGB1555, DRM_FORMAT_YUV422, }; void fsl_dcu_drm_init_planes(struct drm_device *dev) { struct fsl_dcu_drm_device *fsl_dev = dev->dev_private; int i, j; for (i = 0; i < fsl_dev->soc->total_layer; i++) { for (j = 1; j <= fsl_dev->soc->layer_regs; j++) regmap_write(fsl_dev->regmap, DCU_CTRLDESCLN(i, j), 0); } } struct drm_plane *fsl_dcu_drm_primary_create_plane(struct drm_device *dev) { struct drm_plane *primary; int ret; primary = kzalloc(sizeof(*primary), GFP_KERNEL); if (!primary) { DRM_DEBUG_KMS("Failed to allocate primary plane\n"); return NULL; } /* possible_crtc's will be filled in later by crtc_init */ ret = drm_universal_plane_init(dev, primary, 0, &fsl_dcu_drm_plane_funcs, fsl_dcu_drm_plane_formats, ARRAY_SIZE(fsl_dcu_drm_plane_formats), NULL, DRM_PLANE_TYPE_PRIMARY, NULL); if (ret) { kfree(primary); primary = NULL; } drm_plane_helper_add(primary, &fsl_dcu_drm_plane_helper_funcs); return primary; }
linux-master
drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_plane.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * Copyright 2015 Toradex AG * * Stefan Agner <[email protected]> * * Freescale TCON device driver */ #include <linux/clk.h> #include <linux/io.h> #include <linux/mm.h> #include <linux/of_address.h> #include <linux/platform_device.h> #include <linux/regmap.h> #include "fsl_tcon.h" void fsl_tcon_bypass_disable(struct fsl_tcon *tcon) { regmap_update_bits(tcon->regs, FSL_TCON_CTRL1, FSL_TCON_CTRL1_TCON_BYPASS, 0); } void fsl_tcon_bypass_enable(struct fsl_tcon *tcon) { regmap_update_bits(tcon->regs, FSL_TCON_CTRL1, FSL_TCON_CTRL1_TCON_BYPASS, FSL_TCON_CTRL1_TCON_BYPASS); } static struct regmap_config fsl_tcon_regmap_config = { .reg_bits = 32, .reg_stride = 4, .val_bits = 32, .name = "tcon", }; static int fsl_tcon_init_regmap(struct device *dev, struct fsl_tcon *tcon, struct device_node *np) { struct resource res; void __iomem *regs; if (of_address_to_resource(np, 0, &res)) return -EINVAL; regs = devm_ioremap_resource(dev, &res); if (IS_ERR(regs)) return PTR_ERR(regs); tcon->regs = devm_regmap_init_mmio(dev, regs, &fsl_tcon_regmap_config); return PTR_ERR_OR_ZERO(tcon->regs); } struct fsl_tcon *fsl_tcon_init(struct device *dev) { struct fsl_tcon *tcon; struct device_node *np; int ret; /* TCON node is not mandatory, some devices do not provide TCON */ np = of_parse_phandle(dev->of_node, "fsl,tcon", 0); if (!np) return NULL; tcon = devm_kzalloc(dev, sizeof(*tcon), GFP_KERNEL); if (!tcon) goto err_node_put; ret = fsl_tcon_init_regmap(dev, tcon, np); if (ret) { dev_err(dev, "Couldn't create the TCON regmap\n"); goto err_node_put; } tcon->ipg_clk = of_clk_get_by_name(np, "ipg"); if (IS_ERR(tcon->ipg_clk)) { dev_err(dev, "Couldn't get the TCON bus clock\n"); goto err_node_put; } ret = clk_prepare_enable(tcon->ipg_clk); if (ret) { dev_err(dev, "Couldn't enable the TCON clock\n"); goto err_node_put; } of_node_put(np); dev_info(dev, "Using TCON in bypass mode\n"); return tcon; err_node_put: of_node_put(np); return NULL; } void fsl_tcon_free(struct fsl_tcon *tcon) { clk_disable_unprepare(tcon->ipg_clk); clk_put(tcon->ipg_clk); }
linux-master
drivers/gpu/drm/fsl-dcu/fsl_tcon.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * Copyright 2015 Freescale Semiconductor, Inc. * * Freescale DCU drm device driver */ #include <linux/clk.h> #include <linux/clk-provider.h> #include <linux/console.h> #include <linux/io.h> #include <linux/mfd/syscon.h> #include <linux/mm.h> #include <linux/module.h> #include <linux/of_platform.h> #include <linux/platform_device.h> #include <linux/pm.h> #include <linux/pm_runtime.h> #include <linux/regmap.h> #include <drm/drm_atomic_helper.h> #include <drm/drm_drv.h> #include <drm/drm_fbdev_dma.h> #include <drm/drm_gem_dma_helper.h> #include <drm/drm_modeset_helper.h> #include <drm/drm_module.h> #include <drm/drm_probe_helper.h> #include <drm/drm_vblank.h> #include "fsl_dcu_drm_crtc.h" #include "fsl_dcu_drm_drv.h" #include "fsl_tcon.h" static int legacyfb_depth = 24; module_param(legacyfb_depth, int, 0444); static bool fsl_dcu_drm_is_volatile_reg(struct device *dev, unsigned int reg) { if (reg == DCU_INT_STATUS || reg == DCU_UPDATE_MODE) return true; return false; } static const struct regmap_config fsl_dcu_regmap_config = { .reg_bits = 32, .reg_stride = 4, .val_bits = 32, .volatile_reg = fsl_dcu_drm_is_volatile_reg, }; static void fsl_dcu_irq_reset(struct drm_device *dev) { struct fsl_dcu_drm_device *fsl_dev = dev->dev_private; regmap_write(fsl_dev->regmap, DCU_INT_STATUS, ~0); regmap_write(fsl_dev->regmap, DCU_INT_MASK, ~0); } static irqreturn_t fsl_dcu_drm_irq(int irq, void *arg) { struct drm_device *dev = arg; struct fsl_dcu_drm_device *fsl_dev = dev->dev_private; unsigned int int_status; int ret; ret = regmap_read(fsl_dev->regmap, DCU_INT_STATUS, &int_status); if (ret) { dev_err(dev->dev, "read DCU_INT_STATUS failed\n"); return IRQ_NONE; } if (int_status & DCU_INT_STATUS_VBLANK) drm_handle_vblank(dev, 0); regmap_write(fsl_dev->regmap, DCU_INT_STATUS, int_status); return IRQ_HANDLED; } static int fsl_dcu_irq_install(struct drm_device *dev, unsigned int irq) { if (irq == IRQ_NOTCONNECTED) return -ENOTCONN; fsl_dcu_irq_reset(dev); return request_irq(irq, fsl_dcu_drm_irq, 0, dev->driver->name, dev); } static void fsl_dcu_irq_uninstall(struct drm_device *dev) { struct fsl_dcu_drm_device *fsl_dev = dev->dev_private; fsl_dcu_irq_reset(dev); free_irq(fsl_dev->irq, dev); } static int fsl_dcu_load(struct drm_device *dev, unsigned long flags) { struct fsl_dcu_drm_device *fsl_dev = dev->dev_private; int ret; ret = fsl_dcu_drm_modeset_init(fsl_dev); if (ret < 0) { dev_err(dev->dev, "failed to initialize mode setting\n"); return ret; } ret = drm_vblank_init(dev, dev->mode_config.num_crtc); if (ret < 0) { dev_err(dev->dev, "failed to initialize vblank\n"); goto done_vblank; } ret = fsl_dcu_irq_install(dev, fsl_dev->irq); if (ret < 0) { dev_err(dev->dev, "failed to install IRQ handler\n"); goto done_irq; } if (legacyfb_depth != 16 && legacyfb_depth != 24 && legacyfb_depth != 32) { dev_warn(dev->dev, "Invalid legacyfb_depth. Defaulting to 24bpp\n"); legacyfb_depth = 24; } return 0; done_irq: drm_kms_helper_poll_fini(dev); drm_mode_config_cleanup(dev); done_vblank: dev->dev_private = NULL; return ret; } static void fsl_dcu_unload(struct drm_device *dev) { drm_atomic_helper_shutdown(dev); drm_kms_helper_poll_fini(dev); drm_mode_config_cleanup(dev); fsl_dcu_irq_uninstall(dev); dev->dev_private = NULL; } DEFINE_DRM_GEM_DMA_FOPS(fsl_dcu_drm_fops); static const struct drm_driver fsl_dcu_drm_driver = { .driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC, .load = fsl_dcu_load, .unload = fsl_dcu_unload, DRM_GEM_DMA_DRIVER_OPS, .fops = &fsl_dcu_drm_fops, .name = "fsl-dcu-drm", .desc = "Freescale DCU DRM", .date = "20160425", .major = 1, .minor = 1, }; #ifdef CONFIG_PM_SLEEP static int fsl_dcu_drm_pm_suspend(struct device *dev) { struct fsl_dcu_drm_device *fsl_dev = dev_get_drvdata(dev); int ret; if (!fsl_dev) return 0; disable_irq(fsl_dev->irq); ret = drm_mode_config_helper_suspend(fsl_dev->drm); if (ret) { enable_irq(fsl_dev->irq); return ret; } clk_disable_unprepare(fsl_dev->clk); return 0; } static int fsl_dcu_drm_pm_resume(struct device *dev) { struct fsl_dcu_drm_device *fsl_dev = dev_get_drvdata(dev); int ret; if (!fsl_dev) return 0; ret = clk_prepare_enable(fsl_dev->clk); if (ret < 0) { dev_err(dev, "failed to enable dcu clk\n"); return ret; } if (fsl_dev->tcon) fsl_tcon_bypass_enable(fsl_dev->tcon); fsl_dcu_drm_init_planes(fsl_dev->drm); enable_irq(fsl_dev->irq); drm_mode_config_helper_resume(fsl_dev->drm); return 0; } #endif static const struct dev_pm_ops fsl_dcu_drm_pm_ops = { SET_SYSTEM_SLEEP_PM_OPS(fsl_dcu_drm_pm_suspend, fsl_dcu_drm_pm_resume) }; static const struct fsl_dcu_soc_data fsl_dcu_ls1021a_data = { .name = "ls1021a", .total_layer = 16, .max_layer = 4, .layer_regs = LS1021A_LAYER_REG_NUM, }; static const struct fsl_dcu_soc_data fsl_dcu_vf610_data = { .name = "vf610", .total_layer = 64, .max_layer = 6, .layer_regs = VF610_LAYER_REG_NUM, }; static const struct of_device_id fsl_dcu_of_match[] = { { .compatible = "fsl,ls1021a-dcu", .data = &fsl_dcu_ls1021a_data, }, { .compatible = "fsl,vf610-dcu", .data = &fsl_dcu_vf610_data, }, { }, }; MODULE_DEVICE_TABLE(of, fsl_dcu_of_match); static int fsl_dcu_drm_probe(struct platform_device *pdev) { struct fsl_dcu_drm_device *fsl_dev; struct drm_device *drm; struct device *dev = &pdev->dev; struct resource *res; void __iomem *base; struct clk *pix_clk_in; char pix_clk_name[32]; const char *pix_clk_in_name; const struct of_device_id *id; int ret; u8 div_ratio_shift = 0; fsl_dev = devm_kzalloc(dev, sizeof(*fsl_dev), GFP_KERNEL); if (!fsl_dev) return -ENOMEM; id = of_match_node(fsl_dcu_of_match, pdev->dev.of_node); if (!id) return -ENODEV; fsl_dev->soc = id->data; res = platform_get_resource(pdev, IORESOURCE_MEM, 0); base = devm_ioremap_resource(dev, res); if (IS_ERR(base)) { ret = PTR_ERR(base); return ret; } fsl_dev->irq = platform_get_irq(pdev, 0); if (fsl_dev->irq < 0) { dev_err(dev, "failed to get irq\n"); return fsl_dev->irq; } fsl_dev->regmap = devm_regmap_init_mmio(dev, base, &fsl_dcu_regmap_config); if (IS_ERR(fsl_dev->regmap)) { dev_err(dev, "regmap init failed\n"); return PTR_ERR(fsl_dev->regmap); } fsl_dev->clk = devm_clk_get(dev, "dcu"); if (IS_ERR(fsl_dev->clk)) { dev_err(dev, "failed to get dcu clock\n"); return PTR_ERR(fsl_dev->clk); } ret = clk_prepare_enable(fsl_dev->clk); if (ret < 0) { dev_err(dev, "failed to enable dcu clk\n"); return ret; } pix_clk_in = devm_clk_get(dev, "pix"); if (IS_ERR(pix_clk_in)) { /* legancy binding, use dcu clock as pixel clock input */ pix_clk_in = fsl_dev->clk; } if (of_property_read_bool(dev->of_node, "big-endian")) div_ratio_shift = 24; pix_clk_in_name = __clk_get_name(pix_clk_in); snprintf(pix_clk_name, sizeof(pix_clk_name), "%s_pix", pix_clk_in_name); fsl_dev->pix_clk = clk_register_divider(dev, pix_clk_name, pix_clk_in_name, 0, base + DCU_DIV_RATIO, div_ratio_shift, 8, CLK_DIVIDER_ROUND_CLOSEST, NULL); if (IS_ERR(fsl_dev->pix_clk)) { dev_err(dev, "failed to register pix clk\n"); ret = PTR_ERR(fsl_dev->pix_clk); goto disable_clk; } fsl_dev->tcon = fsl_tcon_init(dev); drm = drm_dev_alloc(&fsl_dcu_drm_driver, dev); if (IS_ERR(drm)) { ret = PTR_ERR(drm); goto unregister_pix_clk; } fsl_dev->dev = dev; fsl_dev->drm = drm; fsl_dev->np = dev->of_node; drm->dev_private = fsl_dev; dev_set_drvdata(dev, fsl_dev); ret = drm_dev_register(drm, 0); if (ret < 0) goto put; drm_fbdev_dma_setup(drm, legacyfb_depth); return 0; put: drm_dev_put(drm); unregister_pix_clk: clk_unregister(fsl_dev->pix_clk); disable_clk: clk_disable_unprepare(fsl_dev->clk); return ret; } static void fsl_dcu_drm_remove(struct platform_device *pdev) { struct fsl_dcu_drm_device *fsl_dev = platform_get_drvdata(pdev); drm_dev_unregister(fsl_dev->drm); drm_dev_put(fsl_dev->drm); clk_disable_unprepare(fsl_dev->clk); clk_unregister(fsl_dev->pix_clk); } static struct platform_driver fsl_dcu_drm_platform_driver = { .probe = fsl_dcu_drm_probe, .remove_new = fsl_dcu_drm_remove, .driver = { .name = "fsl-dcu", .pm = &fsl_dcu_drm_pm_ops, .of_match_table = fsl_dcu_of_match, }, }; drm_module_platform_driver(fsl_dcu_drm_platform_driver); MODULE_DESCRIPTION("Freescale DCU DRM Driver"); MODULE_LICENSE("GPL");
linux-master
drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * Copyright 2015 Freescale Semiconductor, Inc. * * Freescale DCU drm device driver */ #include <linux/backlight.h> #include <linux/of_graph.h> #include <drm/drm_atomic_helper.h> #include <drm/drm_bridge.h> #include <drm/drm_of.h> #include <drm/drm_panel.h> #include <drm/drm_probe_helper.h> #include <drm/drm_simple_kms_helper.h> #include "fsl_dcu_drm_drv.h" #include "fsl_tcon.h" int fsl_dcu_drm_encoder_create(struct fsl_dcu_drm_device *fsl_dev, struct drm_crtc *crtc) { struct drm_encoder *encoder = &fsl_dev->encoder; int ret; encoder->possible_crtcs = 1; /* Use bypass mode for parallel RGB/LVDS encoder */ if (fsl_dev->tcon) fsl_tcon_bypass_enable(fsl_dev->tcon); ret = drm_simple_encoder_init(fsl_dev->drm, encoder, DRM_MODE_ENCODER_LVDS); if (ret < 0) return ret; return 0; } static void fsl_dcu_drm_connector_destroy(struct drm_connector *connector) { drm_connector_unregister(connector); drm_connector_cleanup(connector); } static const struct drm_connector_funcs fsl_dcu_drm_connector_funcs = { .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state, .atomic_destroy_state = drm_atomic_helper_connector_destroy_state, .destroy = fsl_dcu_drm_connector_destroy, .fill_modes = drm_helper_probe_single_connector_modes, .reset = drm_atomic_helper_connector_reset, }; static int fsl_dcu_drm_connector_get_modes(struct drm_connector *connector) { struct fsl_dcu_drm_connector *fsl_connector; fsl_connector = to_fsl_dcu_connector(connector); return drm_panel_get_modes(fsl_connector->panel, connector); } static enum drm_mode_status fsl_dcu_drm_connector_mode_valid(struct drm_connector *connector, struct drm_display_mode *mode) { if (mode->hdisplay & 0xf) return MODE_ERROR; return MODE_OK; } static const struct drm_connector_helper_funcs connector_helper_funcs = { .get_modes = fsl_dcu_drm_connector_get_modes, .mode_valid = fsl_dcu_drm_connector_mode_valid, }; static int fsl_dcu_attach_panel(struct fsl_dcu_drm_device *fsl_dev, struct drm_panel *panel) { struct drm_encoder *encoder = &fsl_dev->encoder; struct drm_connector *connector = &fsl_dev->connector.base; int ret; fsl_dev->connector.encoder = encoder; ret = drm_connector_init(fsl_dev->drm, connector, &fsl_dcu_drm_connector_funcs, DRM_MODE_CONNECTOR_LVDS); if (ret < 0) return ret; drm_connector_helper_add(connector, &connector_helper_funcs); ret = drm_connector_register(connector); if (ret < 0) goto err_cleanup; ret = drm_connector_attach_encoder(connector, encoder); if (ret < 0) goto err_sysfs; return 0; err_sysfs: drm_connector_unregister(connector); err_cleanup: drm_connector_cleanup(connector); return ret; } int fsl_dcu_create_outputs(struct fsl_dcu_drm_device *fsl_dev) { struct device_node *panel_node; struct drm_panel *panel; struct drm_bridge *bridge; int ret; /* This is for backward compatibility */ panel_node = of_parse_phandle(fsl_dev->np, "fsl,panel", 0); if (panel_node) { fsl_dev->connector.panel = of_drm_find_panel(panel_node); of_node_put(panel_node); if (IS_ERR(fsl_dev->connector.panel)) return PTR_ERR(fsl_dev->connector.panel); return fsl_dcu_attach_panel(fsl_dev, fsl_dev->connector.panel); } ret = drm_of_find_panel_or_bridge(fsl_dev->np, 0, 0, &panel, &bridge); if (ret) return ret; if (panel) { fsl_dev->connector.panel = panel; return fsl_dcu_attach_panel(fsl_dev, panel); } return drm_bridge_attach(&fsl_dev->encoder, bridge, NULL, 0); }
linux-master
drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_rgb.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * Copyright 2015 Freescale Semiconductor, Inc. * * Freescale DCU drm device driver */ #include <drm/drm_atomic_helper.h> #include <drm/drm_gem_framebuffer_helper.h> #include <drm/drm_probe_helper.h> #include "fsl_dcu_drm_crtc.h" #include "fsl_dcu_drm_drv.h" static const struct drm_mode_config_funcs fsl_dcu_drm_mode_config_funcs = { .atomic_check = drm_atomic_helper_check, .atomic_commit = drm_atomic_helper_commit, .fb_create = drm_gem_fb_create, }; int fsl_dcu_drm_modeset_init(struct fsl_dcu_drm_device *fsl_dev) { int ret; drm_mode_config_init(fsl_dev->drm); fsl_dev->drm->mode_config.min_width = 0; fsl_dev->drm->mode_config.min_height = 0; fsl_dev->drm->mode_config.max_width = 2031; fsl_dev->drm->mode_config.max_height = 2047; fsl_dev->drm->mode_config.funcs = &fsl_dcu_drm_mode_config_funcs; ret = fsl_dcu_drm_crtc_create(fsl_dev); if (ret) goto err; ret = fsl_dcu_drm_encoder_create(fsl_dev, &fsl_dev->crtc); if (ret) goto err; ret = fsl_dcu_create_outputs(fsl_dev); if (ret) goto err; drm_mode_config_reset(fsl_dev->drm); drm_kms_helper_poll_init(fsl_dev->drm); return 0; err: drm_mode_config_cleanup(fsl_dev->drm); return ret; }
linux-master
drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_kms.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * Copyright 2015 Freescale Semiconductor, Inc. * * Freescale DCU drm device driver */ #include <linux/clk.h> #include <linux/regmap.h> #include <video/videomode.h> #include <drm/drm_atomic.h> #include <drm/drm_atomic_helper.h> #include <drm/drm_crtc.h> #include <drm/drm_probe_helper.h> #include <drm/drm_vblank.h> #include "fsl_dcu_drm_crtc.h" #include "fsl_dcu_drm_drv.h" #include "fsl_dcu_drm_plane.h" static void fsl_dcu_drm_crtc_atomic_flush(struct drm_crtc *crtc, struct drm_atomic_state *state) { struct drm_device *dev = crtc->dev; struct fsl_dcu_drm_device *fsl_dev = dev->dev_private; struct drm_pending_vblank_event *event = crtc->state->event; regmap_write(fsl_dev->regmap, DCU_UPDATE_MODE, DCU_UPDATE_MODE_READREG); if (event) { crtc->state->event = NULL; spin_lock_irq(&crtc->dev->event_lock); if (drm_crtc_vblank_get(crtc) == 0) drm_crtc_arm_vblank_event(crtc, event); else drm_crtc_send_vblank_event(crtc, event); spin_unlock_irq(&crtc->dev->event_lock); } } static void fsl_dcu_drm_crtc_atomic_disable(struct drm_crtc *crtc, struct drm_atomic_state *state) { struct drm_crtc_state *old_crtc_state = drm_atomic_get_old_crtc_state(state, crtc); struct drm_device *dev = crtc->dev; struct fsl_dcu_drm_device *fsl_dev = dev->dev_private; /* always disable planes on the CRTC */ drm_atomic_helper_disable_planes_on_crtc(old_crtc_state, true); drm_crtc_vblank_off(crtc); regmap_update_bits(fsl_dev->regmap, DCU_DCU_MODE, DCU_MODE_DCU_MODE_MASK, DCU_MODE_DCU_MODE(DCU_MODE_OFF)); regmap_write(fsl_dev->regmap, DCU_UPDATE_MODE, DCU_UPDATE_MODE_READREG); clk_disable_unprepare(fsl_dev->pix_clk); } static void fsl_dcu_drm_crtc_atomic_enable(struct drm_crtc *crtc, struct drm_atomic_state *state) { struct drm_device *dev = crtc->dev; struct fsl_dcu_drm_device *fsl_dev = dev->dev_private; clk_prepare_enable(fsl_dev->pix_clk); regmap_update_bits(fsl_dev->regmap, DCU_DCU_MODE, DCU_MODE_DCU_MODE_MASK, DCU_MODE_DCU_MODE(DCU_MODE_NORMAL)); regmap_write(fsl_dev->regmap, DCU_UPDATE_MODE, DCU_UPDATE_MODE_READREG); drm_crtc_vblank_on(crtc); } static void fsl_dcu_drm_crtc_mode_set_nofb(struct drm_crtc *crtc) { struct drm_device *dev = crtc->dev; struct fsl_dcu_drm_device *fsl_dev = dev->dev_private; struct drm_connector *con = &fsl_dev->connector.base; struct drm_display_mode *mode = &crtc->state->mode; unsigned int pol = 0; struct videomode vm; clk_set_rate(fsl_dev->pix_clk, mode->clock * 1000); drm_display_mode_to_videomode(mode, &vm); /* INV_PXCK as default (most display sample data on rising edge) */ if (!(con->display_info.bus_flags & DRM_BUS_FLAG_PIXDATA_DRIVE_POSEDGE)) pol |= DCU_SYN_POL_INV_PXCK; if (vm.flags & DISPLAY_FLAGS_HSYNC_LOW) pol |= DCU_SYN_POL_INV_HS_LOW; if (vm.flags & DISPLAY_FLAGS_VSYNC_LOW) pol |= DCU_SYN_POL_INV_VS_LOW; regmap_write(fsl_dev->regmap, DCU_HSYN_PARA, DCU_HSYN_PARA_BP(vm.hback_porch) | DCU_HSYN_PARA_PW(vm.hsync_len) | DCU_HSYN_PARA_FP(vm.hfront_porch)); regmap_write(fsl_dev->regmap, DCU_VSYN_PARA, DCU_VSYN_PARA_BP(vm.vback_porch) | DCU_VSYN_PARA_PW(vm.vsync_len) | DCU_VSYN_PARA_FP(vm.vfront_porch)); regmap_write(fsl_dev->regmap, DCU_DISP_SIZE, DCU_DISP_SIZE_DELTA_Y(vm.vactive) | DCU_DISP_SIZE_DELTA_X(vm.hactive)); regmap_write(fsl_dev->regmap, DCU_SYN_POL, pol); regmap_write(fsl_dev->regmap, DCU_BGND, DCU_BGND_R(0) | DCU_BGND_G(0) | DCU_BGND_B(0)); regmap_write(fsl_dev->regmap, DCU_DCU_MODE, DCU_MODE_BLEND_ITER(1) | DCU_MODE_RASTER_EN); regmap_write(fsl_dev->regmap, DCU_THRESHOLD, DCU_THRESHOLD_LS_BF_VS(BF_VS_VAL) | DCU_THRESHOLD_OUT_BUF_HIGH(BUF_MAX_VAL) | DCU_THRESHOLD_OUT_BUF_LOW(BUF_MIN_VAL)); return; } static const struct drm_crtc_helper_funcs fsl_dcu_drm_crtc_helper_funcs = { .atomic_disable = fsl_dcu_drm_crtc_atomic_disable, .atomic_flush = fsl_dcu_drm_crtc_atomic_flush, .atomic_enable = fsl_dcu_drm_crtc_atomic_enable, .mode_set_nofb = fsl_dcu_drm_crtc_mode_set_nofb, }; static int fsl_dcu_drm_crtc_enable_vblank(struct drm_crtc *crtc) { struct drm_device *dev = crtc->dev; struct fsl_dcu_drm_device *fsl_dev = dev->dev_private; unsigned int value; regmap_read(fsl_dev->regmap, DCU_INT_MASK, &value); value &= ~DCU_INT_MASK_VBLANK; regmap_write(fsl_dev->regmap, DCU_INT_MASK, value); return 0; } static void fsl_dcu_drm_crtc_disable_vblank(struct drm_crtc *crtc) { struct drm_device *dev = crtc->dev; struct fsl_dcu_drm_device *fsl_dev = dev->dev_private; unsigned int value; regmap_read(fsl_dev->regmap, DCU_INT_MASK, &value); value |= DCU_INT_MASK_VBLANK; regmap_write(fsl_dev->regmap, DCU_INT_MASK, value); } static const struct drm_crtc_funcs fsl_dcu_drm_crtc_funcs = { .atomic_duplicate_state = drm_atomic_helper_crtc_duplicate_state, .atomic_destroy_state = drm_atomic_helper_crtc_destroy_state, .destroy = drm_crtc_cleanup, .page_flip = drm_atomic_helper_page_flip, .reset = drm_atomic_helper_crtc_reset, .set_config = drm_atomic_helper_set_config, .enable_vblank = fsl_dcu_drm_crtc_enable_vblank, .disable_vblank = fsl_dcu_drm_crtc_disable_vblank, }; int fsl_dcu_drm_crtc_create(struct fsl_dcu_drm_device *fsl_dev) { struct drm_plane *primary; struct drm_crtc *crtc = &fsl_dev->crtc; int ret; fsl_dcu_drm_init_planes(fsl_dev->drm); primary = fsl_dcu_drm_primary_create_plane(fsl_dev->drm); if (!primary) return -ENOMEM; ret = drm_crtc_init_with_planes(fsl_dev->drm, crtc, primary, NULL, &fsl_dcu_drm_crtc_funcs, NULL); if (ret) { primary->funcs->destroy(primary); return ret; } drm_crtc_helper_add(crtc, &fsl_dcu_drm_crtc_helper_funcs); return 0; }
linux-master
drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_crtc.c
// SPDX-License-Identifier: GPL-2.0+ /* * Copyright (C) 2020 Amarula Solutions(India) * Author: Jagan Teki <[email protected]> */ #include <drm/drm_atomic_helper.h> #include <drm/drm_of.h> #include <drm/drm_print.h> #include <drm/drm_mipi_dsi.h> #include <linux/bitfield.h> #include <linux/bits.h> #include <linux/clk.h> #include <linux/delay.h> #include <linux/gpio/consumer.h> #include <linux/i2c.h> #include <linux/media-bus-format.h> #include <linux/module.h> #include <linux/of.h> #include <linux/regmap.h> #include <linux/regulator/consumer.h> #define VENDOR_ID 0x00 #define DEVICE_ID_H 0x01 #define DEVICE_ID_L 0x02 #define VERSION_ID 0x03 #define FIRMWARE_VERSION 0x08 #define CONFIG_FINISH 0x09 #define PD_CTRL(n) (0x0a + ((n) & 0x3)) /* 0..3 */ #define RST_CTRL(n) (0x0e + ((n) & 0x1)) /* 0..1 */ #define SYS_CTRL(n) (0x10 + ((n) & 0x7)) /* 0..4 */ #define SYS_CTRL_1_CLK_PHASE_MSK GENMASK(5, 4) #define CLK_PHASE_0 0 #define CLK_PHASE_1_4 1 #define CLK_PHASE_1_2 2 #define CLK_PHASE_3_4 3 #define RGB_DRV(n) (0x18 + ((n) & 0x3)) /* 0..3 */ #define RGB_DLY(n) (0x1c + ((n) & 0x1)) /* 0..1 */ #define RGB_TEST_CTRL 0x1e #define ATE_PLL_EN 0x1f #define HACTIVE_LI 0x20 #define VACTIVE_LI 0x21 #define VACTIVE_HACTIVE_HI 0x22 #define HFP_LI 0x23 #define HSYNC_LI 0x24 #define HBP_LI 0x25 #define HFP_HSW_HBP_HI 0x26 #define HFP_HSW_HBP_HI_HFP(n) (((n) & 0x300) >> 4) #define HFP_HSW_HBP_HI_HS(n) (((n) & 0x300) >> 6) #define HFP_HSW_HBP_HI_HBP(n) (((n) & 0x300) >> 8) #define VFP 0x27 #define VSYNC 0x28 #define VBP 0x29 #define BIST_POL 0x2a #define BIST_POL_BIST_MODE(n) (((n) & 0xf) << 4) #define BIST_POL_BIST_GEN BIT(3) #define BIST_POL_HSYNC_POL BIT(2) #define BIST_POL_VSYNC_POL BIT(1) #define BIST_POL_DE_POL BIT(0) #define BIST_RED 0x2b #define BIST_GREEN 0x2c #define BIST_BLUE 0x2d #define BIST_CHESS_X 0x2e #define BIST_CHESS_Y 0x2f #define BIST_CHESS_XY_H 0x30 #define BIST_FRAME_TIME_L 0x31 #define BIST_FRAME_TIME_H 0x32 #define FIFO_MAX_ADDR_LOW 0x33 #define SYNC_EVENT_DLY 0x34 #define HSW_MIN 0x35 #define HFP_MIN 0x36 #define LOGIC_RST_NUM 0x37 #define OSC_CTRL(n) (0x48 + ((n) & 0x7)) /* 0..5 */ #define BG_CTRL 0x4e #define LDO_PLL 0x4f #define PLL_CTRL(n) (0x50 + ((n) & 0xf)) /* 0..15 */ #define PLL_CTRL_6_EXTERNAL 0x90 #define PLL_CTRL_6_MIPI_CLK 0x92 #define PLL_CTRL_6_INTERNAL 0x93 #define PLL_REM(n) (0x60 + ((n) & 0x3)) /* 0..2 */ #define PLL_DIV(n) (0x63 + ((n) & 0x3)) /* 0..2 */ #define PLL_FRAC(n) (0x66 + ((n) & 0x3)) /* 0..2 */ #define PLL_INT(n) (0x69 + ((n) & 0x1)) /* 0..1 */ #define PLL_REF_DIV 0x6b #define PLL_REF_DIV_P(n) ((n) & 0xf) #define PLL_REF_DIV_Pe BIT(4) #define PLL_REF_DIV_S(n) (((n) & 0x7) << 5) #define PLL_SSC_P(n) (0x6c + ((n) & 0x3)) /* 0..2 */ #define PLL_SSC_STEP(n) (0x6f + ((n) & 0x3)) /* 0..2 */ #define PLL_SSC_OFFSET(n) (0x72 + ((n) & 0x3)) /* 0..3 */ #define GPIO_OEN 0x79 #define MIPI_CFG_PW 0x7a #define MIPI_CFG_PW_CONFIG_DSI 0xc1 #define MIPI_CFG_PW_CONFIG_I2C 0x3e #define GPIO_SEL(n) (0x7b + ((n) & 0x1)) /* 0..1 */ #define IRQ_SEL 0x7d #define DBG_SEL 0x7e #define DBG_SIGNAL 0x7f #define MIPI_ERR_VECTOR_L 0x80 #define MIPI_ERR_VECTOR_H 0x81 #define MIPI_ERR_VECTOR_EN_L 0x82 #define MIPI_ERR_VECTOR_EN_H 0x83 #define MIPI_MAX_SIZE_L 0x84 #define MIPI_MAX_SIZE_H 0x85 #define DSI_CTRL 0x86 #define DSI_CTRL_UNKNOWN 0x28 #define DSI_CTRL_DSI_LANES(n) ((n) & 0x3) #define MIPI_PN_SWAP 0x87 #define MIPI_PN_SWAP_CLK BIT(4) #define MIPI_PN_SWAP_D(n) BIT((n) & 0x3) #define MIPI_SOT_SYNC_BIT(n) (0x88 + ((n) & 0x1)) /* 0..1 */ #define MIPI_ULPS_CTRL 0x8a #define MIPI_CLK_CHK_VAR 0x8e #define MIPI_CLK_CHK_INI 0x8f #define MIPI_T_TERM_EN 0x90 #define MIPI_T_HS_SETTLE 0x91 #define MIPI_T_TA_SURE_PRE 0x92 #define MIPI_T_LPX_SET 0x94 #define MIPI_T_CLK_MISS 0x95 #define MIPI_INIT_TIME_L 0x96 #define MIPI_INIT_TIME_H 0x97 #define MIPI_T_CLK_TERM_EN 0x99 #define MIPI_T_CLK_SETTLE 0x9a #define MIPI_TO_HS_RX_L 0x9e #define MIPI_TO_HS_RX_H 0x9f #define MIPI_PHY(n) (0xa0 + ((n) & 0x7)) /* 0..5 */ #define MIPI_PD_RX 0xb0 #define MIPI_PD_TERM 0xb1 #define MIPI_PD_HSRX 0xb2 #define MIPI_PD_LPTX 0xb3 #define MIPI_PD_LPRX 0xb4 #define MIPI_PD_CK_LANE 0xb5 #define MIPI_FORCE_0 0xb6 #define MIPI_RST_CTRL 0xb7 #define MIPI_RST_NUM 0xb8 #define MIPI_DBG_SET(n) (0xc0 + ((n) & 0xf)) /* 0..9 */ #define MIPI_DBG_SEL 0xe0 #define MIPI_DBG_DATA 0xe1 #define MIPI_ATE_TEST_SEL 0xe2 #define MIPI_ATE_STATUS(n) (0xe3 + ((n) & 0x1)) /* 0..1 */ struct chipone { struct device *dev; struct regmap *regmap; struct i2c_client *client; struct drm_bridge bridge; struct drm_display_mode mode; struct drm_bridge *panel_bridge; struct mipi_dsi_device *dsi; struct gpio_desc *enable_gpio; struct regulator *vdd1; struct regulator *vdd2; struct regulator *vdd3; struct clk *refclk; unsigned long refclk_rate; bool interface_i2c; }; static const struct regmap_range chipone_dsi_readable_ranges[] = { regmap_reg_range(VENDOR_ID, VERSION_ID), regmap_reg_range(FIRMWARE_VERSION, PLL_SSC_OFFSET(3)), regmap_reg_range(GPIO_OEN, MIPI_ULPS_CTRL), regmap_reg_range(MIPI_CLK_CHK_VAR, MIPI_T_TA_SURE_PRE), regmap_reg_range(MIPI_T_LPX_SET, MIPI_INIT_TIME_H), regmap_reg_range(MIPI_T_CLK_TERM_EN, MIPI_T_CLK_SETTLE), regmap_reg_range(MIPI_TO_HS_RX_L, MIPI_PHY(5)), regmap_reg_range(MIPI_PD_RX, MIPI_RST_NUM), regmap_reg_range(MIPI_DBG_SET(0), MIPI_DBG_SET(9)), regmap_reg_range(MIPI_DBG_SEL, MIPI_ATE_STATUS(1)), }; static const struct regmap_access_table chipone_dsi_readable_table = { .yes_ranges = chipone_dsi_readable_ranges, .n_yes_ranges = ARRAY_SIZE(chipone_dsi_readable_ranges), }; static const struct regmap_range chipone_dsi_writeable_ranges[] = { regmap_reg_range(CONFIG_FINISH, PLL_SSC_OFFSET(3)), regmap_reg_range(GPIO_OEN, MIPI_ULPS_CTRL), regmap_reg_range(MIPI_CLK_CHK_VAR, MIPI_T_TA_SURE_PRE), regmap_reg_range(MIPI_T_LPX_SET, MIPI_INIT_TIME_H), regmap_reg_range(MIPI_T_CLK_TERM_EN, MIPI_T_CLK_SETTLE), regmap_reg_range(MIPI_TO_HS_RX_L, MIPI_PHY(5)), regmap_reg_range(MIPI_PD_RX, MIPI_RST_NUM), regmap_reg_range(MIPI_DBG_SET(0), MIPI_DBG_SET(9)), regmap_reg_range(MIPI_DBG_SEL, MIPI_ATE_STATUS(1)), }; static const struct regmap_access_table chipone_dsi_writeable_table = { .yes_ranges = chipone_dsi_writeable_ranges, .n_yes_ranges = ARRAY_SIZE(chipone_dsi_writeable_ranges), }; static const struct regmap_config chipone_regmap_config = { .reg_bits = 8, .val_bits = 8, .rd_table = &chipone_dsi_readable_table, .wr_table = &chipone_dsi_writeable_table, .cache_type = REGCACHE_RBTREE, .max_register = MIPI_ATE_STATUS(1), }; static int chipone_dsi_read(void *context, const void *reg, size_t reg_size, void *val, size_t val_size) { struct mipi_dsi_device *dsi = context; const u16 reg16 = (val_size << 8) | *(u8 *)reg; int ret; ret = mipi_dsi_generic_read(dsi, &reg16, 2, val, val_size); return ret == val_size ? 0 : -EINVAL; } static int chipone_dsi_write(void *context, const void *data, size_t count) { struct mipi_dsi_device *dsi = context; return mipi_dsi_generic_write(dsi, data, 2); } static const struct regmap_bus chipone_dsi_regmap_bus = { .read = chipone_dsi_read, .write = chipone_dsi_write, .reg_format_endian_default = REGMAP_ENDIAN_NATIVE, .val_format_endian_default = REGMAP_ENDIAN_NATIVE, }; static inline struct chipone *bridge_to_chipone(struct drm_bridge *bridge) { return container_of(bridge, struct chipone, bridge); } static void chipone_readb(struct chipone *icn, u8 reg, u8 *val) { int ret, pval; ret = regmap_read(icn->regmap, reg, &pval); *val = ret ? 0 : pval & 0xff; } static int chipone_writeb(struct chipone *icn, u8 reg, u8 val) { return regmap_write(icn->regmap, reg, val); } static void chipone_configure_pll(struct chipone *icn, const struct drm_display_mode *mode) { unsigned int best_p = 0, best_m = 0, best_s = 0; unsigned int mode_clock = mode->clock * 1000; unsigned int delta, min_delta = 0xffffffff; unsigned int freq_p, freq_s, freq_out; unsigned int p_min, p_max; unsigned int p, m, s; unsigned int fin; bool best_p_pot; u8 ref_div; /* * DSI byte clock frequency (input into PLL) is calculated as: * DSI_CLK = HS clock / 4 * * DPI pixel clock frequency (output from PLL) is mode clock. * * The chip contains fractional PLL which works as follows: * DPI_CLK = ((DSI_CLK / P) * M) / S * P is pre-divider, register PLL_REF_DIV[3:0] is 1:n divider * register PLL_REF_DIV[4] is extra 1:2 divider * M is integer multiplier, register PLL_INT(0) is multiplier * S is post-divider, register PLL_REF_DIV[7:5] is 2^(n+1) divider * * It seems the PLL input clock after applying P pre-divider have * to be lower than 20 MHz. */ if (icn->refclk) fin = icn->refclk_rate; else fin = icn->dsi->hs_rate / 4; /* in Hz */ /* Minimum value of P predivider for PLL input in 5..20 MHz */ p_min = clamp(DIV_ROUND_UP(fin, 20000000), 1U, 31U); p_max = clamp(fin / 5000000, 1U, 31U); for (p = p_min; p < p_max; p++) { /* PLL_REF_DIV[4,3:0] */ if (p > 16 && p & 1) /* P > 16 uses extra /2 */ continue; freq_p = fin / p; if (freq_p == 0) /* Divider too high */ break; for (s = 0; s < 0x7; s++) { /* PLL_REF_DIV[7:5] */ freq_s = freq_p / BIT(s + 1); if (freq_s == 0) /* Divider too high */ break; m = mode_clock / freq_s; /* Multiplier is 8 bit */ if (m > 0xff) continue; /* Limit PLL VCO frequency to 1 GHz */ freq_out = (fin * m) / p; if (freq_out > 1000000000) continue; /* Apply post-divider */ freq_out /= BIT(s + 1); delta = abs(mode_clock - freq_out); if (delta < min_delta) { best_p = p; best_m = m; best_s = s; min_delta = delta; } } } best_p_pot = !(best_p & 1); dev_dbg(icn->dev, "PLL: P[3:0]=%d P[4]=2*%d M=%d S[7:5]=2^%d delta=%d => DSI f_in(%s)=%d Hz ; DPI f_out=%d Hz\n", best_p >> best_p_pot, best_p_pot, best_m, best_s + 1, min_delta, icn->refclk ? "EXT" : "DSI", fin, (fin * best_m) / (best_p << (best_s + 1))); ref_div = PLL_REF_DIV_P(best_p >> best_p_pot) | PLL_REF_DIV_S(best_s); if (best_p_pot) /* Prefer /2 pre-divider */ ref_div |= PLL_REF_DIV_Pe; /* Clock source selection either external clock or MIPI DSI clock lane */ chipone_writeb(icn, PLL_CTRL(6), icn->refclk ? PLL_CTRL_6_EXTERNAL : PLL_CTRL_6_MIPI_CLK); chipone_writeb(icn, PLL_REF_DIV, ref_div); chipone_writeb(icn, PLL_INT(0), best_m); } static void chipone_atomic_enable(struct drm_bridge *bridge, struct drm_bridge_state *old_bridge_state) { struct chipone *icn = bridge_to_chipone(bridge); struct drm_atomic_state *state = old_bridge_state->base.state; struct drm_display_mode *mode = &icn->mode; const struct drm_bridge_state *bridge_state; u16 hfp, hbp, hsync; u32 bus_flags; u8 pol, sys_ctrl_1, id[4]; chipone_readb(icn, VENDOR_ID, id); chipone_readb(icn, DEVICE_ID_H, id + 1); chipone_readb(icn, DEVICE_ID_L, id + 2); chipone_readb(icn, VERSION_ID, id + 3); dev_dbg(icn->dev, "Chip IDs: Vendor=0x%02x Device=0x%02x:0x%02x Version=0x%02x\n", id[0], id[1], id[2], id[3]); if (id[0] != 0xc1 || id[1] != 0x62 || id[2] != 0x11) { dev_dbg(icn->dev, "Invalid Chip IDs, aborting configuration\n"); return; } /* Get the DPI flags from the bridge state. */ bridge_state = drm_atomic_get_new_bridge_state(state, bridge); bus_flags = bridge_state->output_bus_cfg.flags; if (icn->interface_i2c) chipone_writeb(icn, MIPI_CFG_PW, MIPI_CFG_PW_CONFIG_I2C); else chipone_writeb(icn, MIPI_CFG_PW, MIPI_CFG_PW_CONFIG_DSI); chipone_writeb(icn, HACTIVE_LI, mode->hdisplay & 0xff); chipone_writeb(icn, VACTIVE_LI, mode->vdisplay & 0xff); /* * lsb nibble: 2nd nibble of hdisplay * msb nibble: 2nd nibble of vdisplay */ chipone_writeb(icn, VACTIVE_HACTIVE_HI, ((mode->hdisplay >> 8) & 0xf) | (((mode->vdisplay >> 8) & 0xf) << 4)); hfp = mode->hsync_start - mode->hdisplay; hsync = mode->hsync_end - mode->hsync_start; hbp = mode->htotal - mode->hsync_end; chipone_writeb(icn, HFP_LI, hfp & 0xff); chipone_writeb(icn, HSYNC_LI, hsync & 0xff); chipone_writeb(icn, HBP_LI, hbp & 0xff); /* Top two bits of Horizontal Front porch/Sync/Back porch */ chipone_writeb(icn, HFP_HSW_HBP_HI, HFP_HSW_HBP_HI_HFP(hfp) | HFP_HSW_HBP_HI_HS(hsync) | HFP_HSW_HBP_HI_HBP(hbp)); chipone_writeb(icn, VFP, mode->vsync_start - mode->vdisplay); chipone_writeb(icn, VSYNC, mode->vsync_end - mode->vsync_start); chipone_writeb(icn, VBP, mode->vtotal - mode->vsync_end); /* dsi specific sequence */ chipone_writeb(icn, SYNC_EVENT_DLY, 0x80); chipone_writeb(icn, HFP_MIN, hfp & 0xff); /* DSI data lane count */ chipone_writeb(icn, DSI_CTRL, DSI_CTRL_UNKNOWN | DSI_CTRL_DSI_LANES(icn->dsi->lanes - 1)); chipone_writeb(icn, MIPI_PD_CK_LANE, 0xa0); chipone_writeb(icn, PLL_CTRL(12), 0xff); chipone_writeb(icn, MIPI_PN_SWAP, 0x00); /* DPI HS/VS/DE polarity */ pol = ((mode->flags & DRM_MODE_FLAG_PHSYNC) ? BIST_POL_HSYNC_POL : 0) | ((mode->flags & DRM_MODE_FLAG_PVSYNC) ? BIST_POL_VSYNC_POL : 0) | ((bus_flags & DRM_BUS_FLAG_DE_HIGH) ? BIST_POL_DE_POL : 0); chipone_writeb(icn, BIST_POL, pol); /* Configure PLL settings */ chipone_configure_pll(icn, mode); chipone_writeb(icn, SYS_CTRL(0), 0x40); sys_ctrl_1 = 0x88; if (bus_flags & DRM_BUS_FLAG_PIXDATA_DRIVE_POSEDGE) sys_ctrl_1 |= FIELD_PREP(SYS_CTRL_1_CLK_PHASE_MSK, CLK_PHASE_0); else sys_ctrl_1 |= FIELD_PREP(SYS_CTRL_1_CLK_PHASE_MSK, CLK_PHASE_1_2); chipone_writeb(icn, SYS_CTRL(1), sys_ctrl_1); /* icn6211 specific sequence */ chipone_writeb(icn, MIPI_FORCE_0, 0x20); chipone_writeb(icn, PLL_CTRL(1), 0x20); chipone_writeb(icn, CONFIG_FINISH, 0x10); usleep_range(10000, 11000); } static void chipone_atomic_pre_enable(struct drm_bridge *bridge, struct drm_bridge_state *old_bridge_state) { struct chipone *icn = bridge_to_chipone(bridge); int ret; if (icn->vdd1) { ret = regulator_enable(icn->vdd1); if (ret) DRM_DEV_ERROR(icn->dev, "failed to enable VDD1 regulator: %d\n", ret); } if (icn->vdd2) { ret = regulator_enable(icn->vdd2); if (ret) DRM_DEV_ERROR(icn->dev, "failed to enable VDD2 regulator: %d\n", ret); } if (icn->vdd3) { ret = regulator_enable(icn->vdd3); if (ret) DRM_DEV_ERROR(icn->dev, "failed to enable VDD3 regulator: %d\n", ret); } ret = clk_prepare_enable(icn->refclk); if (ret) DRM_DEV_ERROR(icn->dev, "failed to enable RECLK clock: %d\n", ret); gpiod_set_value(icn->enable_gpio, 1); usleep_range(10000, 11000); } static void chipone_atomic_post_disable(struct drm_bridge *bridge, struct drm_bridge_state *old_bridge_state) { struct chipone *icn = bridge_to_chipone(bridge); clk_disable_unprepare(icn->refclk); if (icn->vdd1) regulator_disable(icn->vdd1); if (icn->vdd2) regulator_disable(icn->vdd2); if (icn->vdd3) regulator_disable(icn->vdd3); gpiod_set_value(icn->enable_gpio, 0); } static void chipone_mode_set(struct drm_bridge *bridge, const struct drm_display_mode *mode, const struct drm_display_mode *adjusted_mode) { struct chipone *icn = bridge_to_chipone(bridge); drm_mode_copy(&icn->mode, adjusted_mode); }; static int chipone_dsi_attach(struct chipone *icn) { struct mipi_dsi_device *dsi = icn->dsi; struct device *dev = icn->dev; int dsi_lanes, ret; dsi_lanes = drm_of_get_data_lanes_count_ep(dev->of_node, 0, 0, 1, 4); /* * If the 'data-lanes' property does not exist in DT or is invalid, * default to previously hard-coded behavior, which was 4 data lanes. */ if (dsi_lanes < 0) icn->dsi->lanes = 4; else icn->dsi->lanes = dsi_lanes; dsi->format = MIPI_DSI_FMT_RGB888; dsi->mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_BURST | MIPI_DSI_MODE_LPM | MIPI_DSI_MODE_NO_EOT_PACKET; dsi->hs_rate = 500000000; dsi->lp_rate = 16000000; ret = mipi_dsi_attach(dsi); if (ret < 0) dev_err(icn->dev, "failed to attach dsi\n"); return ret; } static int chipone_dsi_host_attach(struct chipone *icn) { struct device *dev = icn->dev; struct device_node *host_node; struct device_node *endpoint; struct mipi_dsi_device *dsi; struct mipi_dsi_host *host; int ret = 0; const struct mipi_dsi_device_info info = { .type = "chipone", .channel = 0, .node = NULL, }; endpoint = of_graph_get_endpoint_by_regs(dev->of_node, 0, 0); host_node = of_graph_get_remote_port_parent(endpoint); of_node_put(endpoint); if (!host_node) return -EINVAL; host = of_find_mipi_dsi_host_by_node(host_node); of_node_put(host_node); if (!host) { dev_err(dev, "failed to find dsi host\n"); return -EPROBE_DEFER; } dsi = mipi_dsi_device_register_full(host, &info); if (IS_ERR(dsi)) { return dev_err_probe(dev, PTR_ERR(dsi), "failed to create dsi device\n"); } icn->dsi = dsi; ret = chipone_dsi_attach(icn); if (ret < 0) mipi_dsi_device_unregister(dsi); return ret; } static int chipone_attach(struct drm_bridge *bridge, enum drm_bridge_attach_flags flags) { struct chipone *icn = bridge_to_chipone(bridge); return drm_bridge_attach(bridge->encoder, icn->panel_bridge, bridge, flags); } #define MAX_INPUT_SEL_FORMATS 1 static u32 * chipone_atomic_get_input_bus_fmts(struct drm_bridge *bridge, struct drm_bridge_state *bridge_state, struct drm_crtc_state *crtc_state, struct drm_connector_state *conn_state, u32 output_fmt, unsigned int *num_input_fmts) { u32 *input_fmts; *num_input_fmts = 0; input_fmts = kcalloc(MAX_INPUT_SEL_FORMATS, sizeof(*input_fmts), GFP_KERNEL); if (!input_fmts) return NULL; /* This is the DSI-end bus format */ input_fmts[0] = MEDIA_BUS_FMT_RGB888_1X24; *num_input_fmts = 1; return input_fmts; } static const struct drm_bridge_funcs chipone_bridge_funcs = { .atomic_duplicate_state = drm_atomic_helper_bridge_duplicate_state, .atomic_destroy_state = drm_atomic_helper_bridge_destroy_state, .atomic_reset = drm_atomic_helper_bridge_reset, .atomic_pre_enable = chipone_atomic_pre_enable, .atomic_enable = chipone_atomic_enable, .atomic_post_disable = chipone_atomic_post_disable, .mode_set = chipone_mode_set, .attach = chipone_attach, .atomic_get_input_bus_fmts = chipone_atomic_get_input_bus_fmts, }; static int chipone_parse_dt(struct chipone *icn) { struct device *dev = icn->dev; int ret; icn->refclk = devm_clk_get_optional(dev, "refclk"); if (IS_ERR(icn->refclk)) { ret = PTR_ERR(icn->refclk); DRM_DEV_ERROR(dev, "failed to get REFCLK clock: %d\n", ret); return ret; } else if (icn->refclk) { icn->refclk_rate = clk_get_rate(icn->refclk); if (icn->refclk_rate < 10000000 || icn->refclk_rate > 154000000) { DRM_DEV_ERROR(dev, "REFCLK out of range: %ld Hz\n", icn->refclk_rate); return -EINVAL; } } icn->vdd1 = devm_regulator_get_optional(dev, "vdd1"); if (IS_ERR(icn->vdd1)) { ret = PTR_ERR(icn->vdd1); if (ret == -EPROBE_DEFER) return -EPROBE_DEFER; icn->vdd1 = NULL; DRM_DEV_DEBUG(dev, "failed to get VDD1 regulator: %d\n", ret); } icn->vdd2 = devm_regulator_get_optional(dev, "vdd2"); if (IS_ERR(icn->vdd2)) { ret = PTR_ERR(icn->vdd2); if (ret == -EPROBE_DEFER) return -EPROBE_DEFER; icn->vdd2 = NULL; DRM_DEV_DEBUG(dev, "failed to get VDD2 regulator: %d\n", ret); } icn->vdd3 = devm_regulator_get_optional(dev, "vdd3"); if (IS_ERR(icn->vdd3)) { ret = PTR_ERR(icn->vdd3); if (ret == -EPROBE_DEFER) return -EPROBE_DEFER; icn->vdd3 = NULL; DRM_DEV_DEBUG(dev, "failed to get VDD3 regulator: %d\n", ret); } icn->enable_gpio = devm_gpiod_get(dev, "enable", GPIOD_OUT_LOW); if (IS_ERR(icn->enable_gpio)) { DRM_DEV_ERROR(dev, "failed to get enable GPIO\n"); return PTR_ERR(icn->enable_gpio); } icn->panel_bridge = devm_drm_of_get_bridge(dev, dev->of_node, 1, 0); if (IS_ERR(icn->panel_bridge)) return PTR_ERR(icn->panel_bridge); return 0; } static int chipone_common_probe(struct device *dev, struct chipone **icnr) { struct chipone *icn; int ret; icn = devm_kzalloc(dev, sizeof(struct chipone), GFP_KERNEL); if (!icn) return -ENOMEM; icn->dev = dev; ret = chipone_parse_dt(icn); if (ret) return ret; icn->bridge.funcs = &chipone_bridge_funcs; icn->bridge.type = DRM_MODE_CONNECTOR_DPI; icn->bridge.of_node = dev->of_node; *icnr = icn; return ret; } static int chipone_dsi_probe(struct mipi_dsi_device *dsi) { struct device *dev = &dsi->dev; struct chipone *icn; int ret; ret = chipone_common_probe(dev, &icn); if (ret) return ret; icn->regmap = devm_regmap_init(dev, &chipone_dsi_regmap_bus, dsi, &chipone_regmap_config); if (IS_ERR(icn->regmap)) return PTR_ERR(icn->regmap); icn->interface_i2c = false; icn->dsi = dsi; mipi_dsi_set_drvdata(dsi, icn); drm_bridge_add(&icn->bridge); ret = chipone_dsi_attach(icn); if (ret) drm_bridge_remove(&icn->bridge); return ret; } static int chipone_i2c_probe(struct i2c_client *client) { struct device *dev = &client->dev; struct chipone *icn; int ret; ret = chipone_common_probe(dev, &icn); if (ret) return ret; icn->regmap = devm_regmap_init_i2c(client, &chipone_regmap_config); if (IS_ERR(icn->regmap)) return PTR_ERR(icn->regmap); icn->interface_i2c = true; icn->client = client; dev_set_drvdata(dev, icn); i2c_set_clientdata(client, icn); drm_bridge_add(&icn->bridge); return chipone_dsi_host_attach(icn); } static void chipone_dsi_remove(struct mipi_dsi_device *dsi) { struct chipone *icn = mipi_dsi_get_drvdata(dsi); mipi_dsi_detach(dsi); drm_bridge_remove(&icn->bridge); } static const struct of_device_id chipone_of_match[] = { { .compatible = "chipone,icn6211", }, { /* sentinel */ } }; MODULE_DEVICE_TABLE(of, chipone_of_match); static struct mipi_dsi_driver chipone_dsi_driver = { .probe = chipone_dsi_probe, .remove = chipone_dsi_remove, .driver = { .name = "chipone-icn6211", .owner = THIS_MODULE, .of_match_table = chipone_of_match, }, }; static struct i2c_device_id chipone_i2c_id[] = { { "chipone,icn6211" }, {}, }; MODULE_DEVICE_TABLE(i2c, chipone_i2c_id); static struct i2c_driver chipone_i2c_driver = { .probe = chipone_i2c_probe, .id_table = chipone_i2c_id, .driver = { .name = "chipone-icn6211-i2c", .of_match_table = chipone_of_match, }, }; static int __init chipone_init(void) { if (IS_ENABLED(CONFIG_DRM_MIPI_DSI)) mipi_dsi_driver_register(&chipone_dsi_driver); return i2c_add_driver(&chipone_i2c_driver); } module_init(chipone_init); static void __exit chipone_exit(void) { i2c_del_driver(&chipone_i2c_driver); if (IS_ENABLED(CONFIG_DRM_MIPI_DSI)) mipi_dsi_driver_unregister(&chipone_dsi_driver); } module_exit(chipone_exit); MODULE_AUTHOR("Jagan Teki <[email protected]>"); MODULE_DESCRIPTION("Chipone ICN6211 MIPI-DSI to RGB Converter Bridge"); MODULE_LICENSE("GPL");
linux-master
drivers/gpu/drm/bridge/chipone-icn6211.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * Copyright (C) 2016 Laurent Pinchart <[email protected]> * Copyright (C) 2017 Broadcom */ #include <drm/drm_atomic_helper.h> #include <drm/drm_bridge.h> #include <drm/drm_connector.h> #include <drm/drm_encoder.h> #include <drm/drm_managed.h> #include <drm/drm_modeset_helper_vtables.h> #include <drm/drm_of.h> #include <drm/drm_panel.h> #include <drm/drm_print.h> #include <drm/drm_probe_helper.h> struct panel_bridge { struct drm_bridge bridge; struct drm_connector connector; struct drm_panel *panel; u32 connector_type; }; static inline struct panel_bridge * drm_bridge_to_panel_bridge(struct drm_bridge *bridge) { return container_of(bridge, struct panel_bridge, bridge); } static inline struct panel_bridge * drm_connector_to_panel_bridge(struct drm_connector *connector) { return container_of(connector, struct panel_bridge, connector); } static int panel_bridge_connector_get_modes(struct drm_connector *connector) { struct panel_bridge *panel_bridge = drm_connector_to_panel_bridge(connector); return drm_panel_get_modes(panel_bridge->panel, connector); } static const struct drm_connector_helper_funcs panel_bridge_connector_helper_funcs = { .get_modes = panel_bridge_connector_get_modes, }; static const struct drm_connector_funcs panel_bridge_connector_funcs = { .reset = drm_atomic_helper_connector_reset, .fill_modes = drm_helper_probe_single_connector_modes, .destroy = drm_connector_cleanup, .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state, .atomic_destroy_state = drm_atomic_helper_connector_destroy_state, }; static int panel_bridge_attach(struct drm_bridge *bridge, enum drm_bridge_attach_flags flags) { struct panel_bridge *panel_bridge = drm_bridge_to_panel_bridge(bridge); struct drm_connector *connector = &panel_bridge->connector; int ret; if (flags & DRM_BRIDGE_ATTACH_NO_CONNECTOR) return 0; if (!bridge->encoder) { DRM_ERROR("Missing encoder\n"); return -ENODEV; } drm_connector_helper_add(connector, &panel_bridge_connector_helper_funcs); ret = drm_connector_init(bridge->dev, connector, &panel_bridge_connector_funcs, panel_bridge->connector_type); if (ret) { DRM_ERROR("Failed to initialize connector\n"); return ret; } drm_panel_bridge_set_orientation(connector, bridge); drm_connector_attach_encoder(&panel_bridge->connector, bridge->encoder); if (bridge->dev->registered) { if (connector->funcs->reset) connector->funcs->reset(connector); drm_connector_register(connector); } return 0; } static void panel_bridge_detach(struct drm_bridge *bridge) { struct panel_bridge *panel_bridge = drm_bridge_to_panel_bridge(bridge); struct drm_connector *connector = &panel_bridge->connector; /* * Cleanup the connector if we know it was initialized. * * FIXME: This wouldn't be needed if the panel_bridge structure was * allocated with drmm_kzalloc(). This might be tricky since the * drm_device pointer can only be retrieved when the bridge is attached. */ if (connector->dev) drm_connector_cleanup(connector); } static void panel_bridge_atomic_pre_enable(struct drm_bridge *bridge, struct drm_bridge_state *old_bridge_state) { struct panel_bridge *panel_bridge = drm_bridge_to_panel_bridge(bridge); struct drm_atomic_state *atomic_state = old_bridge_state->base.state; struct drm_encoder *encoder = bridge->encoder; struct drm_crtc *crtc; struct drm_crtc_state *old_crtc_state; crtc = drm_atomic_get_new_crtc_for_encoder(atomic_state, encoder); if (!crtc) return; old_crtc_state = drm_atomic_get_old_crtc_state(atomic_state, crtc); if (old_crtc_state && old_crtc_state->self_refresh_active) return; drm_panel_prepare(panel_bridge->panel); } static void panel_bridge_atomic_enable(struct drm_bridge *bridge, struct drm_bridge_state *old_bridge_state) { struct panel_bridge *panel_bridge = drm_bridge_to_panel_bridge(bridge); struct drm_atomic_state *atomic_state = old_bridge_state->base.state; struct drm_encoder *encoder = bridge->encoder; struct drm_crtc *crtc; struct drm_crtc_state *old_crtc_state; crtc = drm_atomic_get_new_crtc_for_encoder(atomic_state, encoder); if (!crtc) return; old_crtc_state = drm_atomic_get_old_crtc_state(atomic_state, crtc); if (old_crtc_state && old_crtc_state->self_refresh_active) return; drm_panel_enable(panel_bridge->panel); } static void panel_bridge_atomic_disable(struct drm_bridge *bridge, struct drm_bridge_state *old_bridge_state) { struct panel_bridge *panel_bridge = drm_bridge_to_panel_bridge(bridge); struct drm_atomic_state *atomic_state = old_bridge_state->base.state; struct drm_encoder *encoder = bridge->encoder; struct drm_crtc *crtc; struct drm_crtc_state *new_crtc_state; crtc = drm_atomic_get_old_crtc_for_encoder(atomic_state, encoder); if (!crtc) return; new_crtc_state = drm_atomic_get_new_crtc_state(atomic_state, crtc); if (new_crtc_state && new_crtc_state->self_refresh_active) return; drm_panel_disable(panel_bridge->panel); } static void panel_bridge_atomic_post_disable(struct drm_bridge *bridge, struct drm_bridge_state *old_bridge_state) { struct panel_bridge *panel_bridge = drm_bridge_to_panel_bridge(bridge); struct drm_atomic_state *atomic_state = old_bridge_state->base.state; struct drm_encoder *encoder = bridge->encoder; struct drm_crtc *crtc; struct drm_crtc_state *new_crtc_state; crtc = drm_atomic_get_old_crtc_for_encoder(atomic_state, encoder); if (!crtc) return; new_crtc_state = drm_atomic_get_new_crtc_state(atomic_state, crtc); if (new_crtc_state && new_crtc_state->self_refresh_active) return; drm_panel_unprepare(panel_bridge->panel); } static int panel_bridge_get_modes(struct drm_bridge *bridge, struct drm_connector *connector) { struct panel_bridge *panel_bridge = drm_bridge_to_panel_bridge(bridge); return drm_panel_get_modes(panel_bridge->panel, connector); } static void panel_bridge_debugfs_init(struct drm_bridge *bridge, struct dentry *root) { struct panel_bridge *panel_bridge = drm_bridge_to_panel_bridge(bridge); struct drm_panel *panel = panel_bridge->panel; root = debugfs_create_dir("panel", root); if (panel->funcs->debugfs_init) panel->funcs->debugfs_init(panel, root); } static const struct drm_bridge_funcs panel_bridge_bridge_funcs = { .attach = panel_bridge_attach, .detach = panel_bridge_detach, .atomic_pre_enable = panel_bridge_atomic_pre_enable, .atomic_enable = panel_bridge_atomic_enable, .atomic_disable = panel_bridge_atomic_disable, .atomic_post_disable = panel_bridge_atomic_post_disable, .get_modes = panel_bridge_get_modes, .atomic_reset = drm_atomic_helper_bridge_reset, .atomic_duplicate_state = drm_atomic_helper_bridge_duplicate_state, .atomic_destroy_state = drm_atomic_helper_bridge_destroy_state, .atomic_get_input_bus_fmts = drm_atomic_helper_bridge_propagate_bus_fmt, .debugfs_init = panel_bridge_debugfs_init, }; /** * drm_bridge_is_panel - Checks if a drm_bridge is a panel_bridge. * * @bridge: The drm_bridge to be checked. * * Returns true if the bridge is a panel bridge, or false otherwise. */ bool drm_bridge_is_panel(const struct drm_bridge *bridge) { return bridge->funcs == &panel_bridge_bridge_funcs; } EXPORT_SYMBOL(drm_bridge_is_panel); /** * drm_panel_bridge_add - Creates a &drm_bridge and &drm_connector that * just calls the appropriate functions from &drm_panel. * * @panel: The drm_panel being wrapped. Must be non-NULL. * * For drivers converting from directly using drm_panel: The expected * usage pattern is that during either encoder module probe or DSI * host attach, a drm_panel will be looked up through * drm_of_find_panel_or_bridge(). drm_panel_bridge_add() is used to * wrap that panel in the new bridge, and the result can then be * passed to drm_bridge_attach(). The drm_panel_prepare() and related * functions can be dropped from the encoder driver (they're now * called by the KMS helpers before calling into the encoder), along * with connector creation. When done with the bridge (after * drm_mode_config_cleanup() if the bridge has already been attached), then * drm_panel_bridge_remove() to free it. * * The connector type is set to @panel->connector_type, which must be set to a * known type. Calling this function with a panel whose connector type is * DRM_MODE_CONNECTOR_Unknown will return ERR_PTR(-EINVAL). * * See devm_drm_panel_bridge_add() for an automatically managed version of this * function. */ struct drm_bridge *drm_panel_bridge_add(struct drm_panel *panel) { if (WARN_ON(panel->connector_type == DRM_MODE_CONNECTOR_Unknown)) return ERR_PTR(-EINVAL); return drm_panel_bridge_add_typed(panel, panel->connector_type); } EXPORT_SYMBOL(drm_panel_bridge_add); /** * drm_panel_bridge_add_typed - Creates a &drm_bridge and &drm_connector with * an explicit connector type. * @panel: The drm_panel being wrapped. Must be non-NULL. * @connector_type: The connector type (DRM_MODE_CONNECTOR_*) * * This is just like drm_panel_bridge_add(), but forces the connector type to * @connector_type instead of infering it from the panel. * * This function is deprecated and should not be used in new drivers. Use * drm_panel_bridge_add() instead, and fix panel drivers as necessary if they * don't report a connector type. */ struct drm_bridge *drm_panel_bridge_add_typed(struct drm_panel *panel, u32 connector_type) { struct panel_bridge *panel_bridge; if (!panel) return ERR_PTR(-EINVAL); panel_bridge = devm_kzalloc(panel->dev, sizeof(*panel_bridge), GFP_KERNEL); if (!panel_bridge) return ERR_PTR(-ENOMEM); panel_bridge->connector_type = connector_type; panel_bridge->panel = panel; panel_bridge->bridge.funcs = &panel_bridge_bridge_funcs; #ifdef CONFIG_OF panel_bridge->bridge.of_node = panel->dev->of_node; #endif panel_bridge->bridge.ops = DRM_BRIDGE_OP_MODES; panel_bridge->bridge.type = connector_type; drm_bridge_add(&panel_bridge->bridge); return &panel_bridge->bridge; } EXPORT_SYMBOL(drm_panel_bridge_add_typed); /** * drm_panel_bridge_remove - Unregisters and frees a drm_bridge * created by drm_panel_bridge_add(). * * @bridge: The drm_bridge being freed. */ void drm_panel_bridge_remove(struct drm_bridge *bridge) { struct panel_bridge *panel_bridge; if (!bridge) return; if (bridge->funcs != &panel_bridge_bridge_funcs) return; panel_bridge = drm_bridge_to_panel_bridge(bridge); drm_bridge_remove(bridge); devm_kfree(panel_bridge->panel->dev, bridge); } EXPORT_SYMBOL(drm_panel_bridge_remove); /** * drm_panel_bridge_set_orientation - Set the connector's panel orientation * from the bridge that can be transformed to panel bridge. * * @connector: The connector to be set panel orientation. * @bridge: The drm_bridge to be transformed to panel bridge. * * Returns 0 on success, negative errno on failure. */ int drm_panel_bridge_set_orientation(struct drm_connector *connector, struct drm_bridge *bridge) { struct panel_bridge *panel_bridge; panel_bridge = drm_bridge_to_panel_bridge(bridge); return drm_connector_set_orientation_from_panel(connector, panel_bridge->panel); } EXPORT_SYMBOL(drm_panel_bridge_set_orientation); static void devm_drm_panel_bridge_release(struct device *dev, void *res) { struct drm_bridge **bridge = res; drm_panel_bridge_remove(*bridge); } /** * devm_drm_panel_bridge_add - Creates a managed &drm_bridge and &drm_connector * that just calls the appropriate functions from &drm_panel. * @dev: device to tie the bridge lifetime to * @panel: The drm_panel being wrapped. Must be non-NULL. * * This is the managed version of drm_panel_bridge_add() which automatically * calls drm_panel_bridge_remove() when @dev is unbound. */ struct drm_bridge *devm_drm_panel_bridge_add(struct device *dev, struct drm_panel *panel) { if (WARN_ON(panel->connector_type == DRM_MODE_CONNECTOR_Unknown)) return ERR_PTR(-EINVAL); return devm_drm_panel_bridge_add_typed(dev, panel, panel->connector_type); } EXPORT_SYMBOL(devm_drm_panel_bridge_add); /** * devm_drm_panel_bridge_add_typed - Creates a managed &drm_bridge and * &drm_connector with an explicit connector type. * @dev: device to tie the bridge lifetime to * @panel: The drm_panel being wrapped. Must be non-NULL. * @connector_type: The connector type (DRM_MODE_CONNECTOR_*) * * This is just like devm_drm_panel_bridge_add(), but forces the connector type * to @connector_type instead of infering it from the panel. * * This function is deprecated and should not be used in new drivers. Use * devm_drm_panel_bridge_add() instead, and fix panel drivers as necessary if * they don't report a connector type. */ struct drm_bridge *devm_drm_panel_bridge_add_typed(struct device *dev, struct drm_panel *panel, u32 connector_type) { struct drm_bridge **ptr, *bridge; ptr = devres_alloc(devm_drm_panel_bridge_release, sizeof(*ptr), GFP_KERNEL); if (!ptr) return ERR_PTR(-ENOMEM); bridge = drm_panel_bridge_add_typed(panel, connector_type); if (IS_ERR(bridge)) { devres_free(ptr); return bridge; } bridge->pre_enable_prev_first = panel->prepare_prev_first; *ptr = bridge; devres_add(dev, ptr); return bridge; } EXPORT_SYMBOL(devm_drm_panel_bridge_add_typed); static void drmm_drm_panel_bridge_release(struct drm_device *drm, void *ptr) { struct drm_bridge *bridge = ptr; drm_panel_bridge_remove(bridge); } /** * drmm_panel_bridge_add - Creates a DRM-managed &drm_bridge and * &drm_connector that just calls the * appropriate functions from &drm_panel. * * @drm: DRM device to tie the bridge lifetime to * @panel: The drm_panel being wrapped. Must be non-NULL. * * This is the DRM-managed version of drm_panel_bridge_add() which * automatically calls drm_panel_bridge_remove() when @dev is cleaned * up. */ struct drm_bridge *drmm_panel_bridge_add(struct drm_device *drm, struct drm_panel *panel) { struct drm_bridge *bridge; int ret; bridge = drm_panel_bridge_add_typed(panel, panel->connector_type); if (IS_ERR(bridge)) return bridge; ret = drmm_add_action_or_reset(drm, drmm_drm_panel_bridge_release, bridge); if (ret) return ERR_PTR(ret); bridge->pre_enable_prev_first = panel->prepare_prev_first; return bridge; } EXPORT_SYMBOL(drmm_panel_bridge_add); /** * drm_panel_bridge_connector - return the connector for the panel bridge * @bridge: The drm_bridge. * * drm_panel_bridge creates the connector. * This function gives external access to the connector. * * Returns: Pointer to drm_connector */ struct drm_connector *drm_panel_bridge_connector(struct drm_bridge *bridge) { struct panel_bridge *panel_bridge; panel_bridge = drm_bridge_to_panel_bridge(bridge); return &panel_bridge->connector; } EXPORT_SYMBOL(drm_panel_bridge_connector); #ifdef CONFIG_OF /** * devm_drm_of_get_bridge - Return next bridge in the chain * @dev: device to tie the bridge lifetime to * @np: device tree node containing encoder output ports * @port: port in the device tree node * @endpoint: endpoint in the device tree node * * Given a DT node's port and endpoint number, finds the connected node * and returns the associated bridge if any, or creates and returns a * drm panel bridge instance if a panel is connected. * * Returns a pointer to the bridge if successful, or an error pointer * otherwise. */ struct drm_bridge *devm_drm_of_get_bridge(struct device *dev, struct device_node *np, u32 port, u32 endpoint) { struct drm_bridge *bridge; struct drm_panel *panel; int ret; ret = drm_of_find_panel_or_bridge(np, port, endpoint, &panel, &bridge); if (ret) return ERR_PTR(ret); if (panel) bridge = devm_drm_panel_bridge_add(dev, panel); return bridge; } EXPORT_SYMBOL(devm_drm_of_get_bridge); /** * drmm_of_get_bridge - Return next bridge in the chain * @drm: device to tie the bridge lifetime to * @np: device tree node containing encoder output ports * @port: port in the device tree node * @endpoint: endpoint in the device tree node * * Given a DT node's port and endpoint number, finds the connected node * and returns the associated bridge if any, or creates and returns a * drm panel bridge instance if a panel is connected. * * Returns a drmm managed pointer to the bridge if successful, or an error * pointer otherwise. */ struct drm_bridge *drmm_of_get_bridge(struct drm_device *drm, struct device_node *np, u32 port, u32 endpoint) { struct drm_bridge *bridge; struct drm_panel *panel; int ret; ret = drm_of_find_panel_or_bridge(np, port, endpoint, &panel, &bridge); if (ret) return ERR_PTR(ret); if (panel) bridge = drmm_panel_bridge_add(drm, panel); return bridge; } EXPORT_SYMBOL(drmm_of_get_bridge); #endif
linux-master
drivers/gpu/drm/bridge/panel.c
// SPDX-License-Identifier: GPL-2.0 /* * Copyright (c) 2018, The Linux Foundation. All rights reserved. * datasheet: https://www.ti.com/lit/ds/symlink/sn65dsi86.pdf */ #include <linux/atomic.h> #include <linux/auxiliary_bus.h> #include <linux/bitfield.h> #include <linux/bits.h> #include <linux/clk.h> #include <linux/debugfs.h> #include <linux/gpio/consumer.h> #include <linux/gpio/driver.h> #include <linux/i2c.h> #include <linux/iopoll.h> #include <linux/module.h> #include <linux/of_graph.h> #include <linux/pm_runtime.h> #include <linux/pwm.h> #include <linux/regmap.h> #include <linux/regulator/consumer.h> #include <asm/unaligned.h> #include <drm/display/drm_dp_aux_bus.h> #include <drm/display/drm_dp_helper.h> #include <drm/drm_atomic.h> #include <drm/drm_atomic_helper.h> #include <drm/drm_bridge.h> #include <drm/drm_bridge_connector.h> #include <drm/drm_edid.h> #include <drm/drm_mipi_dsi.h> #include <drm/drm_of.h> #include <drm/drm_panel.h> #include <drm/drm_print.h> #include <drm/drm_probe_helper.h> #define SN_DEVICE_REV_REG 0x08 #define SN_DPPLL_SRC_REG 0x0A #define DPPLL_CLK_SRC_DSICLK BIT(0) #define REFCLK_FREQ_MASK GENMASK(3, 1) #define REFCLK_FREQ(x) ((x) << 1) #define DPPLL_SRC_DP_PLL_LOCK BIT(7) #define SN_PLL_ENABLE_REG 0x0D #define SN_DSI_LANES_REG 0x10 #define CHA_DSI_LANES_MASK GENMASK(4, 3) #define CHA_DSI_LANES(x) ((x) << 3) #define SN_DSIA_CLK_FREQ_REG 0x12 #define SN_CHA_ACTIVE_LINE_LENGTH_LOW_REG 0x20 #define SN_CHA_VERTICAL_DISPLAY_SIZE_LOW_REG 0x24 #define SN_CHA_HSYNC_PULSE_WIDTH_LOW_REG 0x2C #define SN_CHA_HSYNC_PULSE_WIDTH_HIGH_REG 0x2D #define CHA_HSYNC_POLARITY BIT(7) #define SN_CHA_VSYNC_PULSE_WIDTH_LOW_REG 0x30 #define SN_CHA_VSYNC_PULSE_WIDTH_HIGH_REG 0x31 #define CHA_VSYNC_POLARITY BIT(7) #define SN_CHA_HORIZONTAL_BACK_PORCH_REG 0x34 #define SN_CHA_VERTICAL_BACK_PORCH_REG 0x36 #define SN_CHA_HORIZONTAL_FRONT_PORCH_REG 0x38 #define SN_CHA_VERTICAL_FRONT_PORCH_REG 0x3A #define SN_LN_ASSIGN_REG 0x59 #define LN_ASSIGN_WIDTH 2 #define SN_ENH_FRAME_REG 0x5A #define VSTREAM_ENABLE BIT(3) #define LN_POLRS_OFFSET 4 #define LN_POLRS_MASK 0xf0 #define SN_DATA_FORMAT_REG 0x5B #define BPP_18_RGB BIT(0) #define SN_HPD_DISABLE_REG 0x5C #define HPD_DISABLE BIT(0) #define HPD_DEBOUNCED_STATE BIT(4) #define SN_GPIO_IO_REG 0x5E #define SN_GPIO_INPUT_SHIFT 4 #define SN_GPIO_OUTPUT_SHIFT 0 #define SN_GPIO_CTRL_REG 0x5F #define SN_GPIO_MUX_INPUT 0 #define SN_GPIO_MUX_OUTPUT 1 #define SN_GPIO_MUX_SPECIAL 2 #define SN_GPIO_MUX_MASK 0x3 #define SN_AUX_WDATA_REG(x) (0x64 + (x)) #define SN_AUX_ADDR_19_16_REG 0x74 #define SN_AUX_ADDR_15_8_REG 0x75 #define SN_AUX_ADDR_7_0_REG 0x76 #define SN_AUX_ADDR_MASK GENMASK(19, 0) #define SN_AUX_LENGTH_REG 0x77 #define SN_AUX_CMD_REG 0x78 #define AUX_CMD_SEND BIT(0) #define AUX_CMD_REQ(x) ((x) << 4) #define SN_AUX_RDATA_REG(x) (0x79 + (x)) #define SN_SSC_CONFIG_REG 0x93 #define DP_NUM_LANES_MASK GENMASK(5, 4) #define DP_NUM_LANES(x) ((x) << 4) #define SN_DATARATE_CONFIG_REG 0x94 #define DP_DATARATE_MASK GENMASK(7, 5) #define DP_DATARATE(x) ((x) << 5) #define SN_TRAINING_SETTING_REG 0x95 #define SCRAMBLE_DISABLE BIT(4) #define SN_ML_TX_MODE_REG 0x96 #define ML_TX_MAIN_LINK_OFF 0 #define ML_TX_NORMAL_MODE BIT(0) #define SN_PWM_PRE_DIV_REG 0xA0 #define SN_BACKLIGHT_SCALE_REG 0xA1 #define BACKLIGHT_SCALE_MAX 0xFFFF #define SN_BACKLIGHT_REG 0xA3 #define SN_PWM_EN_INV_REG 0xA5 #define SN_PWM_INV_MASK BIT(0) #define SN_PWM_EN_MASK BIT(1) #define SN_AUX_CMD_STATUS_REG 0xF4 #define AUX_IRQ_STATUS_AUX_RPLY_TOUT BIT(3) #define AUX_IRQ_STATUS_AUX_SHORT BIT(5) #define AUX_IRQ_STATUS_NAT_I2C_FAIL BIT(6) #define MIN_DSI_CLK_FREQ_MHZ 40 /* fudge factor required to account for 8b/10b encoding */ #define DP_CLK_FUDGE_NUM 10 #define DP_CLK_FUDGE_DEN 8 /* Matches DP_AUX_MAX_PAYLOAD_BYTES (for now) */ #define SN_AUX_MAX_PAYLOAD_BYTES 16 #define SN_REGULATOR_SUPPLY_NUM 4 #define SN_MAX_DP_LANES 4 #define SN_NUM_GPIOS 4 #define SN_GPIO_PHYSICAL_OFFSET 1 #define SN_LINK_TRAINING_TRIES 10 #define SN_PWM_GPIO_IDX 3 /* 4th GPIO */ /** * struct ti_sn65dsi86 - Platform data for ti-sn65dsi86 driver. * @bridge_aux: AUX-bus sub device for MIPI-to-eDP bridge functionality. * @gpio_aux: AUX-bus sub device for GPIO controller functionality. * @aux_aux: AUX-bus sub device for eDP AUX channel functionality. * @pwm_aux: AUX-bus sub device for PWM controller functionality. * * @dev: Pointer to the top level (i2c) device. * @regmap: Regmap for accessing i2c. * @aux: Our aux channel. * @bridge: Our bridge. * @connector: Our connector. * @host_node: Remote DSI node. * @dsi: Our MIPI DSI source. * @refclk: Our reference clock. * @next_bridge: The bridge on the eDP side. * @enable_gpio: The GPIO we toggle to enable the bridge. * @supplies: Data for bulk enabling/disabling our regulators. * @dp_lanes: Count of dp_lanes we're using. * @ln_assign: Value to program to the LN_ASSIGN register. * @ln_polrs: Value for the 4-bit LN_POLRS field of SN_ENH_FRAME_REG. * @comms_enabled: If true then communication over the aux channel is enabled. * @comms_mutex: Protects modification of comms_enabled. * * @gchip: If we expose our GPIOs, this is used. * @gchip_output: A cache of whether we've set GPIOs to output. This * serves double-duty of keeping track of the direction and * also keeping track of whether we've incremented the * pm_runtime reference count for this pin, which we do * whenever a pin is configured as an output. This is a * bitmap so we can do atomic ops on it without an extra * lock so concurrent users of our 4 GPIOs don't stomp on * each other's read-modify-write. * * @pchip: pwm_chip if the PWM is exposed. * @pwm_enabled: Used to track if the PWM signal is currently enabled. * @pwm_pin_busy: Track if GPIO4 is currently requested for GPIO or PWM. * @pwm_refclk_freq: Cache for the reference clock input to the PWM. */ struct ti_sn65dsi86 { struct auxiliary_device *bridge_aux; struct auxiliary_device *gpio_aux; struct auxiliary_device *aux_aux; struct auxiliary_device *pwm_aux; struct device *dev; struct regmap *regmap; struct drm_dp_aux aux; struct drm_bridge bridge; struct drm_connector *connector; struct device_node *host_node; struct mipi_dsi_device *dsi; struct clk *refclk; struct drm_bridge *next_bridge; struct gpio_desc *enable_gpio; struct regulator_bulk_data supplies[SN_REGULATOR_SUPPLY_NUM]; int dp_lanes; u8 ln_assign; u8 ln_polrs; bool comms_enabled; struct mutex comms_mutex; #if defined(CONFIG_OF_GPIO) struct gpio_chip gchip; DECLARE_BITMAP(gchip_output, SN_NUM_GPIOS); #endif #if defined(CONFIG_PWM) struct pwm_chip pchip; bool pwm_enabled; atomic_t pwm_pin_busy; #endif unsigned int pwm_refclk_freq; }; static const struct regmap_range ti_sn65dsi86_volatile_ranges[] = { { .range_min = 0, .range_max = 0xFF }, }; static const struct regmap_access_table ti_sn_bridge_volatile_table = { .yes_ranges = ti_sn65dsi86_volatile_ranges, .n_yes_ranges = ARRAY_SIZE(ti_sn65dsi86_volatile_ranges), }; static const struct regmap_config ti_sn65dsi86_regmap_config = { .reg_bits = 8, .val_bits = 8, .volatile_table = &ti_sn_bridge_volatile_table, .cache_type = REGCACHE_NONE, .max_register = 0xFF, }; static int __maybe_unused ti_sn65dsi86_read_u16(struct ti_sn65dsi86 *pdata, unsigned int reg, u16 *val) { u8 buf[2]; int ret; ret = regmap_bulk_read(pdata->regmap, reg, buf, ARRAY_SIZE(buf)); if (ret) return ret; *val = buf[0] | (buf[1] << 8); return 0; } static void ti_sn65dsi86_write_u16(struct ti_sn65dsi86 *pdata, unsigned int reg, u16 val) { u8 buf[2] = { val & 0xff, val >> 8 }; regmap_bulk_write(pdata->regmap, reg, buf, ARRAY_SIZE(buf)); } static u32 ti_sn_bridge_get_dsi_freq(struct ti_sn65dsi86 *pdata) { u32 bit_rate_khz, clk_freq_khz; struct drm_display_mode *mode = &pdata->bridge.encoder->crtc->state->adjusted_mode; bit_rate_khz = mode->clock * mipi_dsi_pixel_format_to_bpp(pdata->dsi->format); clk_freq_khz = bit_rate_khz / (pdata->dsi->lanes * 2); return clk_freq_khz; } /* clk frequencies supported by bridge in Hz in case derived from REFCLK pin */ static const u32 ti_sn_bridge_refclk_lut[] = { 12000000, 19200000, 26000000, 27000000, 38400000, }; /* clk frequencies supported by bridge in Hz in case derived from DACP/N pin */ static const u32 ti_sn_bridge_dsiclk_lut[] = { 468000000, 384000000, 416000000, 486000000, 460800000, }; static void ti_sn_bridge_set_refclk_freq(struct ti_sn65dsi86 *pdata) { int i; u32 refclk_rate; const u32 *refclk_lut; size_t refclk_lut_size; if (pdata->refclk) { refclk_rate = clk_get_rate(pdata->refclk); refclk_lut = ti_sn_bridge_refclk_lut; refclk_lut_size = ARRAY_SIZE(ti_sn_bridge_refclk_lut); clk_prepare_enable(pdata->refclk); } else { refclk_rate = ti_sn_bridge_get_dsi_freq(pdata) * 1000; refclk_lut = ti_sn_bridge_dsiclk_lut; refclk_lut_size = ARRAY_SIZE(ti_sn_bridge_dsiclk_lut); } /* for i equals to refclk_lut_size means default frequency */ for (i = 0; i < refclk_lut_size; i++) if (refclk_lut[i] == refclk_rate) break; /* avoid buffer overflow and "1" is the default rate in the datasheet. */ if (i >= refclk_lut_size) i = 1; regmap_update_bits(pdata->regmap, SN_DPPLL_SRC_REG, REFCLK_FREQ_MASK, REFCLK_FREQ(i)); /* * The PWM refclk is based on the value written to SN_DPPLL_SRC_REG, * regardless of its actual sourcing. */ pdata->pwm_refclk_freq = ti_sn_bridge_refclk_lut[i]; } static void ti_sn65dsi86_enable_comms(struct ti_sn65dsi86 *pdata) { mutex_lock(&pdata->comms_mutex); /* configure bridge ref_clk */ ti_sn_bridge_set_refclk_freq(pdata); /* * HPD on this bridge chip is a bit useless. This is an eDP bridge * so the HPD is an internal signal that's only there to signal that * the panel is done powering up. ...but the bridge chip debounces * this signal by between 100 ms and 400 ms (depending on process, * voltage, and temperate--I measured it at about 200 ms). One * particular panel asserted HPD 84 ms after it was powered on meaning * that we saw HPD 284 ms after power on. ...but the same panel said * that instead of looking at HPD you could just hardcode a delay of * 200 ms. We'll assume that the panel driver will have the hardcoded * delay in its prepare and always disable HPD. * * If HPD somehow makes sense on some future panel we'll have to * change this to be conditional on someone specifying that HPD should * be used. */ regmap_update_bits(pdata->regmap, SN_HPD_DISABLE_REG, HPD_DISABLE, HPD_DISABLE); pdata->comms_enabled = true; mutex_unlock(&pdata->comms_mutex); } static void ti_sn65dsi86_disable_comms(struct ti_sn65dsi86 *pdata) { mutex_lock(&pdata->comms_mutex); pdata->comms_enabled = false; clk_disable_unprepare(pdata->refclk); mutex_unlock(&pdata->comms_mutex); } static int __maybe_unused ti_sn65dsi86_resume(struct device *dev) { struct ti_sn65dsi86 *pdata = dev_get_drvdata(dev); int ret; ret = regulator_bulk_enable(SN_REGULATOR_SUPPLY_NUM, pdata->supplies); if (ret) { DRM_ERROR("failed to enable supplies %d\n", ret); return ret; } /* td2: min 100 us after regulators before enabling the GPIO */ usleep_range(100, 110); gpiod_set_value_cansleep(pdata->enable_gpio, 1); /* * If we have a reference clock we can enable communication w/ the * panel (including the aux channel) w/out any need for an input clock * so we can do it in resume which lets us read the EDID before * pre_enable(). Without a reference clock we need the MIPI reference * clock so reading early doesn't work. */ if (pdata->refclk) ti_sn65dsi86_enable_comms(pdata); return ret; } static int __maybe_unused ti_sn65dsi86_suspend(struct device *dev) { struct ti_sn65dsi86 *pdata = dev_get_drvdata(dev); int ret; if (pdata->refclk) ti_sn65dsi86_disable_comms(pdata); gpiod_set_value_cansleep(pdata->enable_gpio, 0); ret = regulator_bulk_disable(SN_REGULATOR_SUPPLY_NUM, pdata->supplies); if (ret) DRM_ERROR("failed to disable supplies %d\n", ret); return ret; } static const struct dev_pm_ops ti_sn65dsi86_pm_ops = { SET_RUNTIME_PM_OPS(ti_sn65dsi86_suspend, ti_sn65dsi86_resume, NULL) SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend, pm_runtime_force_resume) }; static int status_show(struct seq_file *s, void *data) { struct ti_sn65dsi86 *pdata = s->private; unsigned int reg, val; seq_puts(s, "STATUS REGISTERS:\n"); pm_runtime_get_sync(pdata->dev); /* IRQ Status Registers, see Table 31 in datasheet */ for (reg = 0xf0; reg <= 0xf8; reg++) { regmap_read(pdata->regmap, reg, &val); seq_printf(s, "[0x%02x] = 0x%08x\n", reg, val); } pm_runtime_put_autosuspend(pdata->dev); return 0; } DEFINE_SHOW_ATTRIBUTE(status); static void ti_sn65dsi86_debugfs_remove(void *data) { debugfs_remove_recursive(data); } static void ti_sn65dsi86_debugfs_init(struct ti_sn65dsi86 *pdata) { struct device *dev = pdata->dev; struct dentry *debugfs; int ret; debugfs = debugfs_create_dir(dev_name(dev), NULL); /* * We might get an error back if debugfs wasn't enabled in the kernel * so let's just silently return upon failure. */ if (IS_ERR_OR_NULL(debugfs)) return; ret = devm_add_action_or_reset(dev, ti_sn65dsi86_debugfs_remove, debugfs); if (ret) return; debugfs_create_file("status", 0600, debugfs, pdata, &status_fops); } /* ----------------------------------------------------------------------------- * Auxiliary Devices (*not* AUX) */ static void ti_sn65dsi86_uninit_aux(void *data) { auxiliary_device_uninit(data); } static void ti_sn65dsi86_delete_aux(void *data) { auxiliary_device_delete(data); } static void ti_sn65dsi86_aux_device_release(struct device *dev) { struct auxiliary_device *aux = container_of(dev, struct auxiliary_device, dev); kfree(aux); } static int ti_sn65dsi86_add_aux_device(struct ti_sn65dsi86 *pdata, struct auxiliary_device **aux_out, const char *name) { struct device *dev = pdata->dev; struct auxiliary_device *aux; int ret; aux = kzalloc(sizeof(*aux), GFP_KERNEL); if (!aux) return -ENOMEM; aux->name = name; aux->dev.parent = dev; aux->dev.release = ti_sn65dsi86_aux_device_release; device_set_of_node_from_dev(&aux->dev, dev); ret = auxiliary_device_init(aux); if (ret) { kfree(aux); return ret; } ret = devm_add_action_or_reset(dev, ti_sn65dsi86_uninit_aux, aux); if (ret) return ret; ret = auxiliary_device_add(aux); if (ret) return ret; ret = devm_add_action_or_reset(dev, ti_sn65dsi86_delete_aux, aux); if (!ret) *aux_out = aux; return ret; } /* ----------------------------------------------------------------------------- * AUX Adapter */ static struct ti_sn65dsi86 *aux_to_ti_sn65dsi86(struct drm_dp_aux *aux) { return container_of(aux, struct ti_sn65dsi86, aux); } static ssize_t ti_sn_aux_transfer(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg) { struct ti_sn65dsi86 *pdata = aux_to_ti_sn65dsi86(aux); u32 request = msg->request & ~(DP_AUX_I2C_MOT | DP_AUX_I2C_WRITE_STATUS_UPDATE); u32 request_val = AUX_CMD_REQ(msg->request); u8 *buf = msg->buffer; unsigned int len = msg->size; unsigned int val; int ret; u8 addr_len[SN_AUX_LENGTH_REG + 1 - SN_AUX_ADDR_19_16_REG]; if (len > SN_AUX_MAX_PAYLOAD_BYTES) return -EINVAL; pm_runtime_get_sync(pdata->dev); mutex_lock(&pdata->comms_mutex); /* * If someone tries to do a DDC over AUX transaction before pre_enable() * on a device without a dedicated reference clock then we just can't * do it. Fail right away. This prevents non-refclk users from reading * the EDID before enabling the panel but such is life. */ if (!pdata->comms_enabled) { ret = -EIO; goto exit; } switch (request) { case DP_AUX_NATIVE_WRITE: case DP_AUX_I2C_WRITE: case DP_AUX_NATIVE_READ: case DP_AUX_I2C_READ: regmap_write(pdata->regmap, SN_AUX_CMD_REG, request_val); /* Assume it's good */ msg->reply = 0; break; default: ret = -EINVAL; goto exit; } BUILD_BUG_ON(sizeof(addr_len) != sizeof(__be32)); put_unaligned_be32((msg->address & SN_AUX_ADDR_MASK) << 8 | len, addr_len); regmap_bulk_write(pdata->regmap, SN_AUX_ADDR_19_16_REG, addr_len, ARRAY_SIZE(addr_len)); if (request == DP_AUX_NATIVE_WRITE || request == DP_AUX_I2C_WRITE) regmap_bulk_write(pdata->regmap, SN_AUX_WDATA_REG(0), buf, len); /* Clear old status bits before start so we don't get confused */ regmap_write(pdata->regmap, SN_AUX_CMD_STATUS_REG, AUX_IRQ_STATUS_NAT_I2C_FAIL | AUX_IRQ_STATUS_AUX_RPLY_TOUT | AUX_IRQ_STATUS_AUX_SHORT); regmap_write(pdata->regmap, SN_AUX_CMD_REG, request_val | AUX_CMD_SEND); /* Zero delay loop because i2c transactions are slow already */ ret = regmap_read_poll_timeout(pdata->regmap, SN_AUX_CMD_REG, val, !(val & AUX_CMD_SEND), 0, 50 * 1000); if (ret) goto exit; ret = regmap_read(pdata->regmap, SN_AUX_CMD_STATUS_REG, &val); if (ret) goto exit; if (val & AUX_IRQ_STATUS_AUX_RPLY_TOUT) { /* * The hardware tried the message seven times per the DP spec * but it hit a timeout. We ignore defers here because they're * handled in hardware. */ ret = -ETIMEDOUT; goto exit; } if (val & AUX_IRQ_STATUS_AUX_SHORT) { ret = regmap_read(pdata->regmap, SN_AUX_LENGTH_REG, &len); if (ret) goto exit; } else if (val & AUX_IRQ_STATUS_NAT_I2C_FAIL) { switch (request) { case DP_AUX_I2C_WRITE: case DP_AUX_I2C_READ: msg->reply |= DP_AUX_I2C_REPLY_NACK; break; case DP_AUX_NATIVE_READ: case DP_AUX_NATIVE_WRITE: msg->reply |= DP_AUX_NATIVE_REPLY_NACK; break; } len = 0; goto exit; } if (request != DP_AUX_NATIVE_WRITE && request != DP_AUX_I2C_WRITE && len != 0) ret = regmap_bulk_read(pdata->regmap, SN_AUX_RDATA_REG(0), buf, len); exit: mutex_unlock(&pdata->comms_mutex); pm_runtime_mark_last_busy(pdata->dev); pm_runtime_put_autosuspend(pdata->dev); if (ret) return ret; return len; } static int ti_sn_aux_wait_hpd_asserted(struct drm_dp_aux *aux, unsigned long wait_us) { /* * The HPD in this chip is a bit useless (See comment in * ti_sn65dsi86_enable_comms) so if our driver is expected to wait * for HPD, we just assume it's asserted after the wait_us delay. * * In case we are asked to wait forever (wait_us=0) take conservative * 500ms delay. */ if (wait_us == 0) wait_us = 500000; usleep_range(wait_us, wait_us + 1000); return 0; } static int ti_sn_aux_probe(struct auxiliary_device *adev, const struct auxiliary_device_id *id) { struct ti_sn65dsi86 *pdata = dev_get_drvdata(adev->dev.parent); int ret; pdata->aux.name = "ti-sn65dsi86-aux"; pdata->aux.dev = &adev->dev; pdata->aux.transfer = ti_sn_aux_transfer; pdata->aux.wait_hpd_asserted = ti_sn_aux_wait_hpd_asserted; drm_dp_aux_init(&pdata->aux); ret = devm_of_dp_aux_populate_ep_devices(&pdata->aux); if (ret) return ret; /* * The eDP to MIPI bridge parts don't work until the AUX channel is * setup so we don't add it in the main driver probe, we add it now. */ return ti_sn65dsi86_add_aux_device(pdata, &pdata->bridge_aux, "bridge"); } static const struct auxiliary_device_id ti_sn_aux_id_table[] = { { .name = "ti_sn65dsi86.aux", }, {}, }; static struct auxiliary_driver ti_sn_aux_driver = { .name = "aux", .probe = ti_sn_aux_probe, .id_table = ti_sn_aux_id_table, }; /*------------------------------------------------------------------------------ * DRM Bridge */ static struct ti_sn65dsi86 *bridge_to_ti_sn65dsi86(struct drm_bridge *bridge) { return container_of(bridge, struct ti_sn65dsi86, bridge); } static int ti_sn_attach_host(struct ti_sn65dsi86 *pdata) { int val; struct mipi_dsi_host *host; struct mipi_dsi_device *dsi; struct device *dev = pdata->dev; const struct mipi_dsi_device_info info = { .type = "ti_sn_bridge", .channel = 0, .node = NULL, }; host = of_find_mipi_dsi_host_by_node(pdata->host_node); if (!host) return -EPROBE_DEFER; dsi = devm_mipi_dsi_device_register_full(dev, host, &info); if (IS_ERR(dsi)) return PTR_ERR(dsi); /* TODO: setting to 4 MIPI lanes always for now */ dsi->lanes = 4; dsi->format = MIPI_DSI_FMT_RGB888; dsi->mode_flags = MIPI_DSI_MODE_VIDEO; /* check if continuous dsi clock is required or not */ pm_runtime_get_sync(dev); regmap_read(pdata->regmap, SN_DPPLL_SRC_REG, &val); pm_runtime_put_autosuspend(dev); if (!(val & DPPLL_CLK_SRC_DSICLK)) dsi->mode_flags |= MIPI_DSI_CLOCK_NON_CONTINUOUS; pdata->dsi = dsi; return devm_mipi_dsi_attach(dev, dsi); } static int ti_sn_bridge_attach(struct drm_bridge *bridge, enum drm_bridge_attach_flags flags) { struct ti_sn65dsi86 *pdata = bridge_to_ti_sn65dsi86(bridge); int ret; pdata->aux.drm_dev = bridge->dev; ret = drm_dp_aux_register(&pdata->aux); if (ret < 0) { drm_err(bridge->dev, "Failed to register DP AUX channel: %d\n", ret); return ret; } /* * Attach the next bridge. * We never want the next bridge to *also* create a connector. */ ret = drm_bridge_attach(bridge->encoder, pdata->next_bridge, &pdata->bridge, flags | DRM_BRIDGE_ATTACH_NO_CONNECTOR); if (ret < 0) goto err_initted_aux; if (flags & DRM_BRIDGE_ATTACH_NO_CONNECTOR) return 0; pdata->connector = drm_bridge_connector_init(pdata->bridge.dev, pdata->bridge.encoder); if (IS_ERR(pdata->connector)) { ret = PTR_ERR(pdata->connector); goto err_initted_aux; } drm_connector_attach_encoder(pdata->connector, pdata->bridge.encoder); return 0; err_initted_aux: drm_dp_aux_unregister(&pdata->aux); return ret; } static void ti_sn_bridge_detach(struct drm_bridge *bridge) { drm_dp_aux_unregister(&bridge_to_ti_sn65dsi86(bridge)->aux); } static enum drm_mode_status ti_sn_bridge_mode_valid(struct drm_bridge *bridge, const struct drm_display_info *info, const struct drm_display_mode *mode) { /* maximum supported resolution is 4K at 60 fps */ if (mode->clock > 594000) return MODE_CLOCK_HIGH; /* * The front and back porch registers are 8 bits, and pulse width * registers are 15 bits, so reject any modes with larger periods. */ if ((mode->hsync_start - mode->hdisplay) > 0xff) return MODE_HBLANK_WIDE; if ((mode->vsync_start - mode->vdisplay) > 0xff) return MODE_VBLANK_WIDE; if ((mode->hsync_end - mode->hsync_start) > 0x7fff) return MODE_HSYNC_WIDE; if ((mode->vsync_end - mode->vsync_start) > 0x7fff) return MODE_VSYNC_WIDE; if ((mode->htotal - mode->hsync_end) > 0xff) return MODE_HBLANK_WIDE; if ((mode->vtotal - mode->vsync_end) > 0xff) return MODE_VBLANK_WIDE; return MODE_OK; } static void ti_sn_bridge_atomic_disable(struct drm_bridge *bridge, struct drm_bridge_state *old_bridge_state) { struct ti_sn65dsi86 *pdata = bridge_to_ti_sn65dsi86(bridge); /* disable video stream */ regmap_update_bits(pdata->regmap, SN_ENH_FRAME_REG, VSTREAM_ENABLE, 0); } static void ti_sn_bridge_set_dsi_rate(struct ti_sn65dsi86 *pdata) { unsigned int bit_rate_mhz, clk_freq_mhz; unsigned int val; struct drm_display_mode *mode = &pdata->bridge.encoder->crtc->state->adjusted_mode; /* set DSIA clk frequency */ bit_rate_mhz = (mode->clock / 1000) * mipi_dsi_pixel_format_to_bpp(pdata->dsi->format); clk_freq_mhz = bit_rate_mhz / (pdata->dsi->lanes * 2); /* for each increment in val, frequency increases by 5MHz */ val = (MIN_DSI_CLK_FREQ_MHZ / 5) + (((clk_freq_mhz - MIN_DSI_CLK_FREQ_MHZ) / 5) & 0xFF); regmap_write(pdata->regmap, SN_DSIA_CLK_FREQ_REG, val); } static unsigned int ti_sn_bridge_get_bpp(struct drm_connector *connector) { if (connector->display_info.bpc <= 6) return 18; else return 24; } /* * LUT index corresponds to register value and * LUT values corresponds to dp data rate supported * by the bridge in Mbps unit. */ static const unsigned int ti_sn_bridge_dp_rate_lut[] = { 0, 1620, 2160, 2430, 2700, 3240, 4320, 5400 }; static int ti_sn_bridge_calc_min_dp_rate_idx(struct ti_sn65dsi86 *pdata, unsigned int bpp) { unsigned int bit_rate_khz, dp_rate_mhz; unsigned int i; struct drm_display_mode *mode = &pdata->bridge.encoder->crtc->state->adjusted_mode; /* Calculate minimum bit rate based on our pixel clock. */ bit_rate_khz = mode->clock * bpp; /* Calculate minimum DP data rate, taking 80% as per DP spec */ dp_rate_mhz = DIV_ROUND_UP(bit_rate_khz * DP_CLK_FUDGE_NUM, 1000 * pdata->dp_lanes * DP_CLK_FUDGE_DEN); for (i = 1; i < ARRAY_SIZE(ti_sn_bridge_dp_rate_lut) - 1; i++) if (ti_sn_bridge_dp_rate_lut[i] >= dp_rate_mhz) break; return i; } static unsigned int ti_sn_bridge_read_valid_rates(struct ti_sn65dsi86 *pdata) { unsigned int valid_rates = 0; unsigned int rate_per_200khz; unsigned int rate_mhz; u8 dpcd_val; int ret; int i, j; ret = drm_dp_dpcd_readb(&pdata->aux, DP_EDP_DPCD_REV, &dpcd_val); if (ret != 1) { DRM_DEV_ERROR(pdata->dev, "Can't read eDP rev (%d), assuming 1.1\n", ret); dpcd_val = DP_EDP_11; } if (dpcd_val >= DP_EDP_14) { /* eDP 1.4 devices must provide a custom table */ __le16 sink_rates[DP_MAX_SUPPORTED_RATES]; ret = drm_dp_dpcd_read(&pdata->aux, DP_SUPPORTED_LINK_RATES, sink_rates, sizeof(sink_rates)); if (ret != sizeof(sink_rates)) { DRM_DEV_ERROR(pdata->dev, "Can't read supported rate table (%d)\n", ret); /* By zeroing we'll fall back to DP_MAX_LINK_RATE. */ memset(sink_rates, 0, sizeof(sink_rates)); } for (i = 0; i < ARRAY_SIZE(sink_rates); i++) { rate_per_200khz = le16_to_cpu(sink_rates[i]); if (!rate_per_200khz) break; rate_mhz = rate_per_200khz * 200 / 1000; for (j = 0; j < ARRAY_SIZE(ti_sn_bridge_dp_rate_lut); j++) { if (ti_sn_bridge_dp_rate_lut[j] == rate_mhz) valid_rates |= BIT(j); } } for (i = 0; i < ARRAY_SIZE(ti_sn_bridge_dp_rate_lut); i++) { if (valid_rates & BIT(i)) return valid_rates; } DRM_DEV_ERROR(pdata->dev, "No matching eDP rates in table; falling back\n"); } /* On older versions best we can do is use DP_MAX_LINK_RATE */ ret = drm_dp_dpcd_readb(&pdata->aux, DP_MAX_LINK_RATE, &dpcd_val); if (ret != 1) { DRM_DEV_ERROR(pdata->dev, "Can't read max rate (%d); assuming 5.4 GHz\n", ret); dpcd_val = DP_LINK_BW_5_4; } switch (dpcd_val) { default: DRM_DEV_ERROR(pdata->dev, "Unexpected max rate (%#x); assuming 5.4 GHz\n", (int)dpcd_val); fallthrough; case DP_LINK_BW_5_4: valid_rates |= BIT(7); fallthrough; case DP_LINK_BW_2_7: valid_rates |= BIT(4); fallthrough; case DP_LINK_BW_1_62: valid_rates |= BIT(1); break; } return valid_rates; } static void ti_sn_bridge_set_video_timings(struct ti_sn65dsi86 *pdata) { struct drm_display_mode *mode = &pdata->bridge.encoder->crtc->state->adjusted_mode; u8 hsync_polarity = 0, vsync_polarity = 0; if (mode->flags & DRM_MODE_FLAG_NHSYNC) hsync_polarity = CHA_HSYNC_POLARITY; if (mode->flags & DRM_MODE_FLAG_NVSYNC) vsync_polarity = CHA_VSYNC_POLARITY; ti_sn65dsi86_write_u16(pdata, SN_CHA_ACTIVE_LINE_LENGTH_LOW_REG, mode->hdisplay); ti_sn65dsi86_write_u16(pdata, SN_CHA_VERTICAL_DISPLAY_SIZE_LOW_REG, mode->vdisplay); regmap_write(pdata->regmap, SN_CHA_HSYNC_PULSE_WIDTH_LOW_REG, (mode->hsync_end - mode->hsync_start) & 0xFF); regmap_write(pdata->regmap, SN_CHA_HSYNC_PULSE_WIDTH_HIGH_REG, (((mode->hsync_end - mode->hsync_start) >> 8) & 0x7F) | hsync_polarity); regmap_write(pdata->regmap, SN_CHA_VSYNC_PULSE_WIDTH_LOW_REG, (mode->vsync_end - mode->vsync_start) & 0xFF); regmap_write(pdata->regmap, SN_CHA_VSYNC_PULSE_WIDTH_HIGH_REG, (((mode->vsync_end - mode->vsync_start) >> 8) & 0x7F) | vsync_polarity); regmap_write(pdata->regmap, SN_CHA_HORIZONTAL_BACK_PORCH_REG, (mode->htotal - mode->hsync_end) & 0xFF); regmap_write(pdata->regmap, SN_CHA_VERTICAL_BACK_PORCH_REG, (mode->vtotal - mode->vsync_end) & 0xFF); regmap_write(pdata->regmap, SN_CHA_HORIZONTAL_FRONT_PORCH_REG, (mode->hsync_start - mode->hdisplay) & 0xFF); regmap_write(pdata->regmap, SN_CHA_VERTICAL_FRONT_PORCH_REG, (mode->vsync_start - mode->vdisplay) & 0xFF); usleep_range(10000, 10500); /* 10ms delay recommended by spec */ } static unsigned int ti_sn_get_max_lanes(struct ti_sn65dsi86 *pdata) { u8 data; int ret; ret = drm_dp_dpcd_readb(&pdata->aux, DP_MAX_LANE_COUNT, &data); if (ret != 1) { DRM_DEV_ERROR(pdata->dev, "Can't read lane count (%d); assuming 4\n", ret); return 4; } return data & DP_LANE_COUNT_MASK; } static int ti_sn_link_training(struct ti_sn65dsi86 *pdata, int dp_rate_idx, const char **last_err_str) { unsigned int val; int ret; int i; /* set dp clk frequency value */ regmap_update_bits(pdata->regmap, SN_DATARATE_CONFIG_REG, DP_DATARATE_MASK, DP_DATARATE(dp_rate_idx)); /* enable DP PLL */ regmap_write(pdata->regmap, SN_PLL_ENABLE_REG, 1); ret = regmap_read_poll_timeout(pdata->regmap, SN_DPPLL_SRC_REG, val, val & DPPLL_SRC_DP_PLL_LOCK, 1000, 50 * 1000); if (ret) { *last_err_str = "DP_PLL_LOCK polling failed"; goto exit; } /* * We'll try to link train several times. As part of link training * the bridge chip will write DP_SET_POWER_D0 to DP_SET_POWER. If * the panel isn't ready quite it might respond NAK here which means * we need to try again. */ for (i = 0; i < SN_LINK_TRAINING_TRIES; i++) { /* Semi auto link training mode */ regmap_write(pdata->regmap, SN_ML_TX_MODE_REG, 0x0A); ret = regmap_read_poll_timeout(pdata->regmap, SN_ML_TX_MODE_REG, val, val == ML_TX_MAIN_LINK_OFF || val == ML_TX_NORMAL_MODE, 1000, 500 * 1000); if (ret) { *last_err_str = "Training complete polling failed"; } else if (val == ML_TX_MAIN_LINK_OFF) { *last_err_str = "Link training failed, link is off"; ret = -EIO; continue; } break; } /* If we saw quite a few retries, add a note about it */ if (!ret && i > SN_LINK_TRAINING_TRIES / 2) DRM_DEV_INFO(pdata->dev, "Link training needed %d retries\n", i); exit: /* Disable the PLL if we failed */ if (ret) regmap_write(pdata->regmap, SN_PLL_ENABLE_REG, 0); return ret; } static void ti_sn_bridge_atomic_enable(struct drm_bridge *bridge, struct drm_bridge_state *old_bridge_state) { struct ti_sn65dsi86 *pdata = bridge_to_ti_sn65dsi86(bridge); struct drm_connector *connector; const char *last_err_str = "No supported DP rate"; unsigned int valid_rates; int dp_rate_idx; unsigned int val; int ret = -EINVAL; int max_dp_lanes; unsigned int bpp; connector = drm_atomic_get_new_connector_for_encoder(old_bridge_state->base.state, bridge->encoder); if (!connector) { dev_err_ratelimited(pdata->dev, "Could not get the connector\n"); return; } max_dp_lanes = ti_sn_get_max_lanes(pdata); pdata->dp_lanes = min(pdata->dp_lanes, max_dp_lanes); /* DSI_A lane config */ val = CHA_DSI_LANES(SN_MAX_DP_LANES - pdata->dsi->lanes); regmap_update_bits(pdata->regmap, SN_DSI_LANES_REG, CHA_DSI_LANES_MASK, val); regmap_write(pdata->regmap, SN_LN_ASSIGN_REG, pdata->ln_assign); regmap_update_bits(pdata->regmap, SN_ENH_FRAME_REG, LN_POLRS_MASK, pdata->ln_polrs << LN_POLRS_OFFSET); /* set dsi clk frequency value */ ti_sn_bridge_set_dsi_rate(pdata); /* * The SN65DSI86 only supports ASSR Display Authentication method and * this method is enabled for eDP panels. An eDP panel must support this * authentication method. We need to enable this method in the eDP panel * at DisplayPort address 0x0010A prior to link training. * * As only ASSR is supported by SN65DSI86, for full DisplayPort displays * we need to disable the scrambler. */ if (pdata->bridge.type == DRM_MODE_CONNECTOR_eDP) { drm_dp_dpcd_writeb(&pdata->aux, DP_EDP_CONFIGURATION_SET, DP_ALTERNATE_SCRAMBLER_RESET_ENABLE); regmap_update_bits(pdata->regmap, SN_TRAINING_SETTING_REG, SCRAMBLE_DISABLE, 0); } else { regmap_update_bits(pdata->regmap, SN_TRAINING_SETTING_REG, SCRAMBLE_DISABLE, SCRAMBLE_DISABLE); } bpp = ti_sn_bridge_get_bpp(connector); /* Set the DP output format (18 bpp or 24 bpp) */ val = bpp == 18 ? BPP_18_RGB : 0; regmap_update_bits(pdata->regmap, SN_DATA_FORMAT_REG, BPP_18_RGB, val); /* DP lane config */ val = DP_NUM_LANES(min(pdata->dp_lanes, 3)); regmap_update_bits(pdata->regmap, SN_SSC_CONFIG_REG, DP_NUM_LANES_MASK, val); valid_rates = ti_sn_bridge_read_valid_rates(pdata); /* Train until we run out of rates */ for (dp_rate_idx = ti_sn_bridge_calc_min_dp_rate_idx(pdata, bpp); dp_rate_idx < ARRAY_SIZE(ti_sn_bridge_dp_rate_lut); dp_rate_idx++) { if (!(valid_rates & BIT(dp_rate_idx))) continue; ret = ti_sn_link_training(pdata, dp_rate_idx, &last_err_str); if (!ret) break; } if (ret) { DRM_DEV_ERROR(pdata->dev, "%s (%d)\n", last_err_str, ret); return; } /* config video parameters */ ti_sn_bridge_set_video_timings(pdata); /* enable video stream */ regmap_update_bits(pdata->regmap, SN_ENH_FRAME_REG, VSTREAM_ENABLE, VSTREAM_ENABLE); } static void ti_sn_bridge_atomic_pre_enable(struct drm_bridge *bridge, struct drm_bridge_state *old_bridge_state) { struct ti_sn65dsi86 *pdata = bridge_to_ti_sn65dsi86(bridge); pm_runtime_get_sync(pdata->dev); if (!pdata->refclk) ti_sn65dsi86_enable_comms(pdata); /* td7: min 100 us after enable before DSI data */ usleep_range(100, 110); } static void ti_sn_bridge_atomic_post_disable(struct drm_bridge *bridge, struct drm_bridge_state *old_bridge_state) { struct ti_sn65dsi86 *pdata = bridge_to_ti_sn65dsi86(bridge); /* semi auto link training mode OFF */ regmap_write(pdata->regmap, SN_ML_TX_MODE_REG, 0); /* Num lanes to 0 as per power sequencing in data sheet */ regmap_update_bits(pdata->regmap, SN_SSC_CONFIG_REG, DP_NUM_LANES_MASK, 0); /* disable DP PLL */ regmap_write(pdata->regmap, SN_PLL_ENABLE_REG, 0); if (!pdata->refclk) ti_sn65dsi86_disable_comms(pdata); pm_runtime_put_sync(pdata->dev); } static enum drm_connector_status ti_sn_bridge_detect(struct drm_bridge *bridge) { struct ti_sn65dsi86 *pdata = bridge_to_ti_sn65dsi86(bridge); int val = 0; pm_runtime_get_sync(pdata->dev); regmap_read(pdata->regmap, SN_HPD_DISABLE_REG, &val); pm_runtime_put_autosuspend(pdata->dev); return val & HPD_DEBOUNCED_STATE ? connector_status_connected : connector_status_disconnected; } static struct edid *ti_sn_bridge_get_edid(struct drm_bridge *bridge, struct drm_connector *connector) { struct ti_sn65dsi86 *pdata = bridge_to_ti_sn65dsi86(bridge); return drm_get_edid(connector, &pdata->aux.ddc); } static const struct drm_bridge_funcs ti_sn_bridge_funcs = { .attach = ti_sn_bridge_attach, .detach = ti_sn_bridge_detach, .mode_valid = ti_sn_bridge_mode_valid, .get_edid = ti_sn_bridge_get_edid, .detect = ti_sn_bridge_detect, .atomic_pre_enable = ti_sn_bridge_atomic_pre_enable, .atomic_enable = ti_sn_bridge_atomic_enable, .atomic_disable = ti_sn_bridge_atomic_disable, .atomic_post_disable = ti_sn_bridge_atomic_post_disable, .atomic_reset = drm_atomic_helper_bridge_reset, .atomic_duplicate_state = drm_atomic_helper_bridge_duplicate_state, .atomic_destroy_state = drm_atomic_helper_bridge_destroy_state, }; static void ti_sn_bridge_parse_lanes(struct ti_sn65dsi86 *pdata, struct device_node *np) { u32 lane_assignments[SN_MAX_DP_LANES] = { 0, 1, 2, 3 }; u32 lane_polarities[SN_MAX_DP_LANES] = { }; struct device_node *endpoint; u8 ln_assign = 0; u8 ln_polrs = 0; int dp_lanes; int i; /* * Read config from the device tree about lane remapping and lane * polarities. These are optional and we assume identity map and * normal polarity if nothing is specified. It's OK to specify just * data-lanes but not lane-polarities but not vice versa. * * Error checking is light (we just make sure we don't crash or * buffer overrun) and we assume dts is well formed and specifying * mappings that the hardware supports. */ endpoint = of_graph_get_endpoint_by_regs(np, 1, -1); dp_lanes = drm_of_get_data_lanes_count(endpoint, 1, SN_MAX_DP_LANES); if (dp_lanes > 0) { of_property_read_u32_array(endpoint, "data-lanes", lane_assignments, dp_lanes); of_property_read_u32_array(endpoint, "lane-polarities", lane_polarities, dp_lanes); } else { dp_lanes = SN_MAX_DP_LANES; } of_node_put(endpoint); /* * Convert into register format. Loop over all lanes even if * data-lanes had fewer elements so that we nicely initialize * the LN_ASSIGN register. */ for (i = SN_MAX_DP_LANES - 1; i >= 0; i--) { ln_assign = ln_assign << LN_ASSIGN_WIDTH | lane_assignments[i]; ln_polrs = ln_polrs << 1 | lane_polarities[i]; } /* Stash in our struct for when we power on */ pdata->dp_lanes = dp_lanes; pdata->ln_assign = ln_assign; pdata->ln_polrs = ln_polrs; } static int ti_sn_bridge_parse_dsi_host(struct ti_sn65dsi86 *pdata) { struct device_node *np = pdata->dev->of_node; pdata->host_node = of_graph_get_remote_node(np, 0, 0); if (!pdata->host_node) { DRM_ERROR("remote dsi host node not found\n"); return -ENODEV; } return 0; } static int ti_sn_bridge_probe(struct auxiliary_device *adev, const struct auxiliary_device_id *id) { struct ti_sn65dsi86 *pdata = dev_get_drvdata(adev->dev.parent); struct device_node *np = pdata->dev->of_node; int ret; pdata->next_bridge = devm_drm_of_get_bridge(pdata->dev, np, 1, 0); if (IS_ERR(pdata->next_bridge)) return dev_err_probe(pdata->dev, PTR_ERR(pdata->next_bridge), "failed to create panel bridge\n"); ti_sn_bridge_parse_lanes(pdata, np); ret = ti_sn_bridge_parse_dsi_host(pdata); if (ret) return ret; pdata->bridge.funcs = &ti_sn_bridge_funcs; pdata->bridge.of_node = np; pdata->bridge.type = pdata->next_bridge->type == DRM_MODE_CONNECTOR_DisplayPort ? DRM_MODE_CONNECTOR_DisplayPort : DRM_MODE_CONNECTOR_eDP; if (pdata->bridge.type == DRM_MODE_CONNECTOR_DisplayPort) pdata->bridge.ops = DRM_BRIDGE_OP_EDID | DRM_BRIDGE_OP_DETECT; drm_bridge_add(&pdata->bridge); ret = ti_sn_attach_host(pdata); if (ret) { dev_err_probe(pdata->dev, ret, "failed to attach dsi host\n"); goto err_remove_bridge; } return 0; err_remove_bridge: drm_bridge_remove(&pdata->bridge); return ret; } static void ti_sn_bridge_remove(struct auxiliary_device *adev) { struct ti_sn65dsi86 *pdata = dev_get_drvdata(adev->dev.parent); if (!pdata) return; drm_bridge_remove(&pdata->bridge); of_node_put(pdata->host_node); } static const struct auxiliary_device_id ti_sn_bridge_id_table[] = { { .name = "ti_sn65dsi86.bridge", }, {}, }; static struct auxiliary_driver ti_sn_bridge_driver = { .name = "bridge", .probe = ti_sn_bridge_probe, .remove = ti_sn_bridge_remove, .id_table = ti_sn_bridge_id_table, }; /* ----------------------------------------------------------------------------- * PWM Controller */ #if defined(CONFIG_PWM) static int ti_sn_pwm_pin_request(struct ti_sn65dsi86 *pdata) { return atomic_xchg(&pdata->pwm_pin_busy, 1) ? -EBUSY : 0; } static void ti_sn_pwm_pin_release(struct ti_sn65dsi86 *pdata) { atomic_set(&pdata->pwm_pin_busy, 0); } static struct ti_sn65dsi86 *pwm_chip_to_ti_sn_bridge(struct pwm_chip *chip) { return container_of(chip, struct ti_sn65dsi86, pchip); } static int ti_sn_pwm_request(struct pwm_chip *chip, struct pwm_device *pwm) { struct ti_sn65dsi86 *pdata = pwm_chip_to_ti_sn_bridge(chip); return ti_sn_pwm_pin_request(pdata); } static void ti_sn_pwm_free(struct pwm_chip *chip, struct pwm_device *pwm) { struct ti_sn65dsi86 *pdata = pwm_chip_to_ti_sn_bridge(chip); ti_sn_pwm_pin_release(pdata); } /* * Limitations: * - The PWM signal is not driven when the chip is powered down, or in its * reset state and the driver does not implement the "suspend state" * described in the documentation. In order to save power, state->enabled is * interpreted as denoting if the signal is expected to be valid, and is used * to determine if the chip needs to be kept powered. * - Changing both period and duty_cycle is not done atomically, neither is the * multi-byte register updates, so the output might briefly be undefined * during update. */ static int ti_sn_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm, const struct pwm_state *state) { struct ti_sn65dsi86 *pdata = pwm_chip_to_ti_sn_bridge(chip); unsigned int pwm_en_inv; unsigned int backlight; unsigned int pre_div; unsigned int scale; u64 period_max; u64 period; int ret; if (!pdata->pwm_enabled) { ret = pm_runtime_get_sync(pdata->dev); if (ret < 0) { pm_runtime_put_sync(pdata->dev); return ret; } } if (state->enabled) { if (!pdata->pwm_enabled) { /* * The chip might have been powered down while we * didn't hold a PM runtime reference, so mux in the * PWM function on the GPIO pin again. */ ret = regmap_update_bits(pdata->regmap, SN_GPIO_CTRL_REG, SN_GPIO_MUX_MASK << (2 * SN_PWM_GPIO_IDX), SN_GPIO_MUX_SPECIAL << (2 * SN_PWM_GPIO_IDX)); if (ret) { dev_err(pdata->dev, "failed to mux in PWM function\n"); goto out; } } /* * Per the datasheet the PWM frequency is given by: * * REFCLK_FREQ * PWM_FREQ = ----------------------------------- * PWM_PRE_DIV * BACKLIGHT_SCALE + 1 * * However, after careful review the author is convinced that * the documentation has lost some parenthesis around * "BACKLIGHT_SCALE + 1". * * With the period T_pwm = 1/PWM_FREQ this can be written: * * T_pwm * REFCLK_FREQ = PWM_PRE_DIV * (BACKLIGHT_SCALE + 1) * * In order to keep BACKLIGHT_SCALE within its 16 bits, * PWM_PRE_DIV must be: * * T_pwm * REFCLK_FREQ * PWM_PRE_DIV >= ------------------------- * BACKLIGHT_SCALE_MAX + 1 * * To simplify the search and to favour higher resolution of * the duty cycle over accuracy of the period, the lowest * possible PWM_PRE_DIV is used. Finally the scale is * calculated as: * * T_pwm * REFCLK_FREQ * BACKLIGHT_SCALE = ---------------------- - 1 * PWM_PRE_DIV * * Here T_pwm is represented in seconds, so appropriate scaling * to nanoseconds is necessary. */ /* Minimum T_pwm is 1 / REFCLK_FREQ */ if (state->period <= NSEC_PER_SEC / pdata->pwm_refclk_freq) { ret = -EINVAL; goto out; } /* * Maximum T_pwm is 255 * (65535 + 1) / REFCLK_FREQ * Limit period to this to avoid overflows */ period_max = div_u64((u64)NSEC_PER_SEC * 255 * (65535 + 1), pdata->pwm_refclk_freq); period = min(state->period, period_max); pre_div = DIV64_U64_ROUND_UP(period * pdata->pwm_refclk_freq, (u64)NSEC_PER_SEC * (BACKLIGHT_SCALE_MAX + 1)); scale = div64_u64(period * pdata->pwm_refclk_freq, (u64)NSEC_PER_SEC * pre_div) - 1; /* * The documentation has the duty ratio given as: * * duty BACKLIGHT * ------- = --------------------- * period BACKLIGHT_SCALE + 1 * * Solve for BACKLIGHT, substituting BACKLIGHT_SCALE according * to definition above and adjusting for nanosecond * representation of duty cycle gives us: */ backlight = div64_u64(state->duty_cycle * pdata->pwm_refclk_freq, (u64)NSEC_PER_SEC * pre_div); if (backlight > scale) backlight = scale; ret = regmap_write(pdata->regmap, SN_PWM_PRE_DIV_REG, pre_div); if (ret) { dev_err(pdata->dev, "failed to update PWM_PRE_DIV\n"); goto out; } ti_sn65dsi86_write_u16(pdata, SN_BACKLIGHT_SCALE_REG, scale); ti_sn65dsi86_write_u16(pdata, SN_BACKLIGHT_REG, backlight); } pwm_en_inv = FIELD_PREP(SN_PWM_EN_MASK, state->enabled) | FIELD_PREP(SN_PWM_INV_MASK, state->polarity == PWM_POLARITY_INVERSED); ret = regmap_write(pdata->regmap, SN_PWM_EN_INV_REG, pwm_en_inv); if (ret) { dev_err(pdata->dev, "failed to update PWM_EN/PWM_INV\n"); goto out; } pdata->pwm_enabled = state->enabled; out: if (!pdata->pwm_enabled) pm_runtime_put_sync(pdata->dev); return ret; } static int ti_sn_pwm_get_state(struct pwm_chip *chip, struct pwm_device *pwm, struct pwm_state *state) { struct ti_sn65dsi86 *pdata = pwm_chip_to_ti_sn_bridge(chip); unsigned int pwm_en_inv; unsigned int pre_div; u16 backlight; u16 scale; int ret; ret = regmap_read(pdata->regmap, SN_PWM_EN_INV_REG, &pwm_en_inv); if (ret) return ret; ret = ti_sn65dsi86_read_u16(pdata, SN_BACKLIGHT_SCALE_REG, &scale); if (ret) return ret; ret = ti_sn65dsi86_read_u16(pdata, SN_BACKLIGHT_REG, &backlight); if (ret) return ret; ret = regmap_read(pdata->regmap, SN_PWM_PRE_DIV_REG, &pre_div); if (ret) return ret; state->enabled = FIELD_GET(SN_PWM_EN_MASK, pwm_en_inv); if (FIELD_GET(SN_PWM_INV_MASK, pwm_en_inv)) state->polarity = PWM_POLARITY_INVERSED; else state->polarity = PWM_POLARITY_NORMAL; state->period = DIV_ROUND_UP_ULL((u64)NSEC_PER_SEC * pre_div * (scale + 1), pdata->pwm_refclk_freq); state->duty_cycle = DIV_ROUND_UP_ULL((u64)NSEC_PER_SEC * pre_div * backlight, pdata->pwm_refclk_freq); if (state->duty_cycle > state->period) state->duty_cycle = state->period; return 0; } static const struct pwm_ops ti_sn_pwm_ops = { .request = ti_sn_pwm_request, .free = ti_sn_pwm_free, .apply = ti_sn_pwm_apply, .get_state = ti_sn_pwm_get_state, .owner = THIS_MODULE, }; static int ti_sn_pwm_probe(struct auxiliary_device *adev, const struct auxiliary_device_id *id) { struct ti_sn65dsi86 *pdata = dev_get_drvdata(adev->dev.parent); pdata->pchip.dev = pdata->dev; pdata->pchip.ops = &ti_sn_pwm_ops; pdata->pchip.npwm = 1; pdata->pchip.of_xlate = of_pwm_single_xlate; pdata->pchip.of_pwm_n_cells = 1; return pwmchip_add(&pdata->pchip); } static void ti_sn_pwm_remove(struct auxiliary_device *adev) { struct ti_sn65dsi86 *pdata = dev_get_drvdata(adev->dev.parent); pwmchip_remove(&pdata->pchip); if (pdata->pwm_enabled) pm_runtime_put_sync(pdata->dev); } static const struct auxiliary_device_id ti_sn_pwm_id_table[] = { { .name = "ti_sn65dsi86.pwm", }, {}, }; static struct auxiliary_driver ti_sn_pwm_driver = { .name = "pwm", .probe = ti_sn_pwm_probe, .remove = ti_sn_pwm_remove, .id_table = ti_sn_pwm_id_table, }; static int __init ti_sn_pwm_register(void) { return auxiliary_driver_register(&ti_sn_pwm_driver); } static void ti_sn_pwm_unregister(void) { auxiliary_driver_unregister(&ti_sn_pwm_driver); } #else static inline int ti_sn_pwm_pin_request(struct ti_sn65dsi86 *pdata) { return 0; } static inline void ti_sn_pwm_pin_release(struct ti_sn65dsi86 *pdata) {} static inline int ti_sn_pwm_register(void) { return 0; } static inline void ti_sn_pwm_unregister(void) {} #endif /* ----------------------------------------------------------------------------- * GPIO Controller */ #if defined(CONFIG_OF_GPIO) static int tn_sn_bridge_of_xlate(struct gpio_chip *chip, const struct of_phandle_args *gpiospec, u32 *flags) { if (WARN_ON(gpiospec->args_count < chip->of_gpio_n_cells)) return -EINVAL; if (gpiospec->args[0] > chip->ngpio || gpiospec->args[0] < 1) return -EINVAL; if (flags) *flags = gpiospec->args[1]; return gpiospec->args[0] - SN_GPIO_PHYSICAL_OFFSET; } static int ti_sn_bridge_gpio_get_direction(struct gpio_chip *chip, unsigned int offset) { struct ti_sn65dsi86 *pdata = gpiochip_get_data(chip); /* * We already have to keep track of the direction because we use * that to figure out whether we've powered the device. We can * just return that rather than (maybe) powering up the device * to ask its direction. */ return test_bit(offset, pdata->gchip_output) ? GPIO_LINE_DIRECTION_OUT : GPIO_LINE_DIRECTION_IN; } static int ti_sn_bridge_gpio_get(struct gpio_chip *chip, unsigned int offset) { struct ti_sn65dsi86 *pdata = gpiochip_get_data(chip); unsigned int val; int ret; /* * When the pin is an input we don't forcibly keep the bridge * powered--we just power it on to read the pin. NOTE: part of * the reason this works is that the bridge defaults (when * powered back on) to all 4 GPIOs being configured as GPIO input. * Also note that if something else is keeping the chip powered the * pm_runtime functions are lightweight increments of a refcount. */ pm_runtime_get_sync(pdata->dev); ret = regmap_read(pdata->regmap, SN_GPIO_IO_REG, &val); pm_runtime_put_autosuspend(pdata->dev); if (ret) return ret; return !!(val & BIT(SN_GPIO_INPUT_SHIFT + offset)); } static void ti_sn_bridge_gpio_set(struct gpio_chip *chip, unsigned int offset, int val) { struct ti_sn65dsi86 *pdata = gpiochip_get_data(chip); int ret; if (!test_bit(offset, pdata->gchip_output)) { dev_err(pdata->dev, "Ignoring GPIO set while input\n"); return; } val &= 1; ret = regmap_update_bits(pdata->regmap, SN_GPIO_IO_REG, BIT(SN_GPIO_OUTPUT_SHIFT + offset), val << (SN_GPIO_OUTPUT_SHIFT + offset)); if (ret) dev_warn(pdata->dev, "Failed to set bridge GPIO %u: %d\n", offset, ret); } static int ti_sn_bridge_gpio_direction_input(struct gpio_chip *chip, unsigned int offset) { struct ti_sn65dsi86 *pdata = gpiochip_get_data(chip); int shift = offset * 2; int ret; if (!test_and_clear_bit(offset, pdata->gchip_output)) return 0; ret = regmap_update_bits(pdata->regmap, SN_GPIO_CTRL_REG, SN_GPIO_MUX_MASK << shift, SN_GPIO_MUX_INPUT << shift); if (ret) { set_bit(offset, pdata->gchip_output); return ret; } /* * NOTE: if nobody else is powering the device this may fully power * it off and when it comes back it will have lost all state, but * that's OK because the default is input and we're now an input. */ pm_runtime_put_autosuspend(pdata->dev); return 0; } static int ti_sn_bridge_gpio_direction_output(struct gpio_chip *chip, unsigned int offset, int val) { struct ti_sn65dsi86 *pdata = gpiochip_get_data(chip); int shift = offset * 2; int ret; if (test_and_set_bit(offset, pdata->gchip_output)) return 0; pm_runtime_get_sync(pdata->dev); /* Set value first to avoid glitching */ ti_sn_bridge_gpio_set(chip, offset, val); /* Set direction */ ret = regmap_update_bits(pdata->regmap, SN_GPIO_CTRL_REG, SN_GPIO_MUX_MASK << shift, SN_GPIO_MUX_OUTPUT << shift); if (ret) { clear_bit(offset, pdata->gchip_output); pm_runtime_put_autosuspend(pdata->dev); } return ret; } static int ti_sn_bridge_gpio_request(struct gpio_chip *chip, unsigned int offset) { struct ti_sn65dsi86 *pdata = gpiochip_get_data(chip); if (offset == SN_PWM_GPIO_IDX) return ti_sn_pwm_pin_request(pdata); return 0; } static void ti_sn_bridge_gpio_free(struct gpio_chip *chip, unsigned int offset) { struct ti_sn65dsi86 *pdata = gpiochip_get_data(chip); /* We won't keep pm_runtime if we're input, so switch there on free */ ti_sn_bridge_gpio_direction_input(chip, offset); if (offset == SN_PWM_GPIO_IDX) ti_sn_pwm_pin_release(pdata); } static const char * const ti_sn_bridge_gpio_names[SN_NUM_GPIOS] = { "GPIO1", "GPIO2", "GPIO3", "GPIO4" }; static int ti_sn_gpio_probe(struct auxiliary_device *adev, const struct auxiliary_device_id *id) { struct ti_sn65dsi86 *pdata = dev_get_drvdata(adev->dev.parent); int ret; /* Only init if someone is going to use us as a GPIO controller */ if (!of_property_read_bool(pdata->dev->of_node, "gpio-controller")) return 0; pdata->gchip.label = dev_name(pdata->dev); pdata->gchip.parent = pdata->dev; pdata->gchip.owner = THIS_MODULE; pdata->gchip.of_xlate = tn_sn_bridge_of_xlate; pdata->gchip.of_gpio_n_cells = 2; pdata->gchip.request = ti_sn_bridge_gpio_request; pdata->gchip.free = ti_sn_bridge_gpio_free; pdata->gchip.get_direction = ti_sn_bridge_gpio_get_direction; pdata->gchip.direction_input = ti_sn_bridge_gpio_direction_input; pdata->gchip.direction_output = ti_sn_bridge_gpio_direction_output; pdata->gchip.get = ti_sn_bridge_gpio_get; pdata->gchip.set = ti_sn_bridge_gpio_set; pdata->gchip.can_sleep = true; pdata->gchip.names = ti_sn_bridge_gpio_names; pdata->gchip.ngpio = SN_NUM_GPIOS; pdata->gchip.base = -1; ret = devm_gpiochip_add_data(&adev->dev, &pdata->gchip, pdata); if (ret) dev_err(pdata->dev, "can't add gpio chip\n"); return ret; } static const struct auxiliary_device_id ti_sn_gpio_id_table[] = { { .name = "ti_sn65dsi86.gpio", }, {}, }; MODULE_DEVICE_TABLE(auxiliary, ti_sn_gpio_id_table); static struct auxiliary_driver ti_sn_gpio_driver = { .name = "gpio", .probe = ti_sn_gpio_probe, .id_table = ti_sn_gpio_id_table, }; static int __init ti_sn_gpio_register(void) { return auxiliary_driver_register(&ti_sn_gpio_driver); } static void ti_sn_gpio_unregister(void) { auxiliary_driver_unregister(&ti_sn_gpio_driver); } #else static inline int ti_sn_gpio_register(void) { return 0; } static inline void ti_sn_gpio_unregister(void) {} #endif /* ----------------------------------------------------------------------------- * Probe & Remove */ static void ti_sn65dsi86_runtime_disable(void *data) { pm_runtime_dont_use_autosuspend(data); pm_runtime_disable(data); } static int ti_sn65dsi86_parse_regulators(struct ti_sn65dsi86 *pdata) { unsigned int i; const char * const ti_sn_bridge_supply_names[] = { "vcca", "vcc", "vccio", "vpll", }; for (i = 0; i < SN_REGULATOR_SUPPLY_NUM; i++) pdata->supplies[i].supply = ti_sn_bridge_supply_names[i]; return devm_regulator_bulk_get(pdata->dev, SN_REGULATOR_SUPPLY_NUM, pdata->supplies); } static int ti_sn65dsi86_probe(struct i2c_client *client) { struct device *dev = &client->dev; struct ti_sn65dsi86 *pdata; int ret; if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) { DRM_ERROR("device doesn't support I2C\n"); return -ENODEV; } pdata = devm_kzalloc(dev, sizeof(struct ti_sn65dsi86), GFP_KERNEL); if (!pdata) return -ENOMEM; dev_set_drvdata(dev, pdata); pdata->dev = dev; mutex_init(&pdata->comms_mutex); pdata->regmap = devm_regmap_init_i2c(client, &ti_sn65dsi86_regmap_config); if (IS_ERR(pdata->regmap)) return dev_err_probe(dev, PTR_ERR(pdata->regmap), "regmap i2c init failed\n"); pdata->enable_gpio = devm_gpiod_get_optional(dev, "enable", GPIOD_OUT_LOW); if (IS_ERR(pdata->enable_gpio)) return dev_err_probe(dev, PTR_ERR(pdata->enable_gpio), "failed to get enable gpio from DT\n"); ret = ti_sn65dsi86_parse_regulators(pdata); if (ret) return dev_err_probe(dev, ret, "failed to parse regulators\n"); pdata->refclk = devm_clk_get_optional(dev, "refclk"); if (IS_ERR(pdata->refclk)) return dev_err_probe(dev, PTR_ERR(pdata->refclk), "failed to get reference clock\n"); pm_runtime_enable(dev); pm_runtime_set_autosuspend_delay(pdata->dev, 500); pm_runtime_use_autosuspend(pdata->dev); ret = devm_add_action_or_reset(dev, ti_sn65dsi86_runtime_disable, dev); if (ret) return ret; ti_sn65dsi86_debugfs_init(pdata); /* * Break ourselves up into a collection of aux devices. The only real * motiviation here is to solve the chicken-and-egg problem of probe * ordering. The bridge wants the panel to be there when it probes. * The panel wants its HPD GPIO (provided by sn65dsi86 on some boards) * when it probes. The panel and maybe backlight might want the DDC * bus or the pwm_chip. Having sub-devices allows the some sub devices * to finish probing even if others return -EPROBE_DEFER and gets us * around the problems. */ if (IS_ENABLED(CONFIG_OF_GPIO)) { ret = ti_sn65dsi86_add_aux_device(pdata, &pdata->gpio_aux, "gpio"); if (ret) return ret; } if (IS_ENABLED(CONFIG_PWM)) { ret = ti_sn65dsi86_add_aux_device(pdata, &pdata->pwm_aux, "pwm"); if (ret) return ret; } /* * NOTE: At the end of the AUX channel probe we'll add the aux device * for the bridge. This is because the bridge can't be used until the * AUX channel is there and this is a very simple solution to the * dependency problem. */ return ti_sn65dsi86_add_aux_device(pdata, &pdata->aux_aux, "aux"); } static struct i2c_device_id ti_sn65dsi86_id[] = { { "ti,sn65dsi86", 0}, {}, }; MODULE_DEVICE_TABLE(i2c, ti_sn65dsi86_id); static const struct of_device_id ti_sn65dsi86_match_table[] = { {.compatible = "ti,sn65dsi86"}, {}, }; MODULE_DEVICE_TABLE(of, ti_sn65dsi86_match_table); static struct i2c_driver ti_sn65dsi86_driver = { .driver = { .name = "ti_sn65dsi86", .of_match_table = ti_sn65dsi86_match_table, .pm = &ti_sn65dsi86_pm_ops, }, .probe = ti_sn65dsi86_probe, .id_table = ti_sn65dsi86_id, }; static int __init ti_sn65dsi86_init(void) { int ret; ret = i2c_add_driver(&ti_sn65dsi86_driver); if (ret) return ret; ret = ti_sn_gpio_register(); if (ret) goto err_main_was_registered; ret = ti_sn_pwm_register(); if (ret) goto err_gpio_was_registered; ret = auxiliary_driver_register(&ti_sn_aux_driver); if (ret) goto err_pwm_was_registered; ret = auxiliary_driver_register(&ti_sn_bridge_driver); if (ret) goto err_aux_was_registered; return 0; err_aux_was_registered: auxiliary_driver_unregister(&ti_sn_aux_driver); err_pwm_was_registered: ti_sn_pwm_unregister(); err_gpio_was_registered: ti_sn_gpio_unregister(); err_main_was_registered: i2c_del_driver(&ti_sn65dsi86_driver); return ret; } module_init(ti_sn65dsi86_init); static void __exit ti_sn65dsi86_exit(void) { auxiliary_driver_unregister(&ti_sn_bridge_driver); auxiliary_driver_unregister(&ti_sn_aux_driver); ti_sn_pwm_unregister(); ti_sn_gpio_unregister(); i2c_del_driver(&ti_sn65dsi86_driver); } module_exit(ti_sn65dsi86_exit); MODULE_AUTHOR("Sandeep Panda <[email protected]>"); MODULE_DESCRIPTION("sn65dsi86 DSI to eDP bridge driver"); MODULE_LICENSE("GPL v2");
linux-master
drivers/gpu/drm/bridge/ti-sn65dsi86.c
// SPDX-License-Identifier: GPL-2.0-only /* * Copyright (c) 2016 MediaTek Inc. */ #include <linux/delay.h> #include <linux/err.h> #include <linux/gpio/consumer.h> #include <linux/i2c.h> #include <linux/module.h> #include <linux/of_graph.h> #include <linux/pm_runtime.h> #include <linux/regmap.h> #include <linux/regulator/consumer.h> #include <drm/display/drm_dp_aux_bus.h> #include <drm/display/drm_dp_helper.h> #include <drm/drm_atomic_state_helper.h> #include <drm/drm_bridge.h> #include <drm/drm_edid.h> #include <drm/drm_mipi_dsi.h> #include <drm/drm_of.h> #include <drm/drm_panel.h> #include <drm/drm_print.h> #define PAGE0_AUXCH_CFG3 0x76 #define AUXCH_CFG3_RESET 0xff #define PAGE0_SWAUX_ADDR_7_0 0x7d #define PAGE0_SWAUX_ADDR_15_8 0x7e #define PAGE0_SWAUX_ADDR_23_16 0x7f #define SWAUX_ADDR_MASK GENMASK(19, 0) #define PAGE0_SWAUX_LENGTH 0x80 #define SWAUX_LENGTH_MASK GENMASK(3, 0) #define SWAUX_NO_PAYLOAD BIT(7) #define PAGE0_SWAUX_WDATA 0x81 #define PAGE0_SWAUX_RDATA 0x82 #define PAGE0_SWAUX_CTRL 0x83 #define SWAUX_SEND BIT(0) #define PAGE0_SWAUX_STATUS 0x84 #define SWAUX_M_MASK GENMASK(4, 0) #define SWAUX_STATUS_MASK GENMASK(7, 5) #define SWAUX_STATUS_NACK (0x1 << 5) #define SWAUX_STATUS_DEFER (0x2 << 5) #define SWAUX_STATUS_ACKM (0x3 << 5) #define SWAUX_STATUS_INVALID (0x4 << 5) #define SWAUX_STATUS_I2C_NACK (0x5 << 5) #define SWAUX_STATUS_I2C_DEFER (0x6 << 5) #define SWAUX_STATUS_TIMEOUT (0x7 << 5) #define PAGE2_GPIO_H 0xa7 #define PS_GPIO9 BIT(1) #define PAGE2_I2C_BYPASS 0xea #define I2C_BYPASS_EN 0xd0 #define PAGE2_MCS_EN 0xf3 #define MCS_EN BIT(0) #define PAGE3_SET_ADD 0xfe #define VDO_CTL_ADD 0x13 #define VDO_DIS 0x18 #define VDO_EN 0x1c #define NUM_MIPI_LANES 4 #define COMMON_PS8640_REGMAP_CONFIG \ .reg_bits = 8, \ .val_bits = 8, \ .cache_type = REGCACHE_NONE /* * PS8640 uses multiple addresses: * page[0]: for DP control * page[1]: for VIDEO Bridge * page[2]: for control top * page[3]: for DSI Link Control1 * page[4]: for MIPI Phy * page[5]: for VPLL * page[6]: for DSI Link Control2 * page[7]: for SPI ROM mapping */ enum page_addr_offset { PAGE0_DP_CNTL = 0, PAGE1_VDO_BDG, PAGE2_TOP_CNTL, PAGE3_DSI_CNTL1, PAGE4_MIPI_PHY, PAGE5_VPLL, PAGE6_DSI_CNTL2, PAGE7_SPI_CNTL, MAX_DEVS }; enum ps8640_vdo_control { DISABLE = VDO_DIS, ENABLE = VDO_EN, }; struct ps8640 { struct drm_bridge bridge; struct drm_bridge *panel_bridge; struct drm_dp_aux aux; struct mipi_dsi_device *dsi; struct i2c_client *page[MAX_DEVS]; struct regmap *regmap[MAX_DEVS]; struct regulator_bulk_data supplies[2]; struct gpio_desc *gpio_reset; struct gpio_desc *gpio_powerdown; struct device_link *link; bool pre_enabled; bool need_post_hpd_delay; }; static const struct regmap_config ps8640_regmap_config[] = { [PAGE0_DP_CNTL] = { COMMON_PS8640_REGMAP_CONFIG, .max_register = 0xbf, }, [PAGE1_VDO_BDG] = { COMMON_PS8640_REGMAP_CONFIG, .max_register = 0xff, }, [PAGE2_TOP_CNTL] = { COMMON_PS8640_REGMAP_CONFIG, .max_register = 0xff, }, [PAGE3_DSI_CNTL1] = { COMMON_PS8640_REGMAP_CONFIG, .max_register = 0xff, }, [PAGE4_MIPI_PHY] = { COMMON_PS8640_REGMAP_CONFIG, .max_register = 0xff, }, [PAGE5_VPLL] = { COMMON_PS8640_REGMAP_CONFIG, .max_register = 0x7f, }, [PAGE6_DSI_CNTL2] = { COMMON_PS8640_REGMAP_CONFIG, .max_register = 0xff, }, [PAGE7_SPI_CNTL] = { COMMON_PS8640_REGMAP_CONFIG, .max_register = 0xff, }, }; static inline struct ps8640 *bridge_to_ps8640(struct drm_bridge *e) { return container_of(e, struct ps8640, bridge); } static inline struct ps8640 *aux_to_ps8640(struct drm_dp_aux *aux) { return container_of(aux, struct ps8640, aux); } static int _ps8640_wait_hpd_asserted(struct ps8640 *ps_bridge, unsigned long wait_us) { struct regmap *map = ps_bridge->regmap[PAGE2_TOP_CNTL]; int status; int ret; /* * Apparently something about the firmware in the chip signals that * HPD goes high by reporting GPIO9 as high (even though HPD isn't * actually connected to GPIO9). */ ret = regmap_read_poll_timeout(map, PAGE2_GPIO_H, status, status & PS_GPIO9, 20000, wait_us); /* * The first time we see HPD go high after a reset we delay an extra * 50 ms. The best guess is that the MCU is doing "stuff" during this * time (maybe talking to the panel) and we don't want to interrupt it. * * No locking is done around "need_post_hpd_delay". If we're here we * know we're holding a PM Runtime reference and the only other place * that touches this is PM Runtime resume. */ if (!ret && ps_bridge->need_post_hpd_delay) { ps_bridge->need_post_hpd_delay = false; msleep(50); } return ret; } static int ps8640_wait_hpd_asserted(struct drm_dp_aux *aux, unsigned long wait_us) { struct ps8640 *ps_bridge = aux_to_ps8640(aux); struct device *dev = &ps_bridge->page[PAGE0_DP_CNTL]->dev; int ret; /* * Note that this function is called by code that has already powered * the panel. We have to power ourselves up but we don't need to worry * about powering the panel. */ pm_runtime_get_sync(dev); ret = _ps8640_wait_hpd_asserted(ps_bridge, wait_us); pm_runtime_mark_last_busy(dev); pm_runtime_put_autosuspend(dev); return ret; } static ssize_t ps8640_aux_transfer_msg(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg) { struct ps8640 *ps_bridge = aux_to_ps8640(aux); struct regmap *map = ps_bridge->regmap[PAGE0_DP_CNTL]; struct device *dev = &ps_bridge->page[PAGE0_DP_CNTL]->dev; unsigned int len = msg->size; unsigned int data; unsigned int base; int ret; u8 request = msg->request & ~(DP_AUX_I2C_MOT | DP_AUX_I2C_WRITE_STATUS_UPDATE); u8 *buf = msg->buffer; u8 addr_len[PAGE0_SWAUX_LENGTH + 1 - PAGE0_SWAUX_ADDR_7_0]; u8 i; bool is_native_aux = false; if (len > DP_AUX_MAX_PAYLOAD_BYTES) return -EINVAL; if (msg->address & ~SWAUX_ADDR_MASK) return -EINVAL; switch (request) { case DP_AUX_NATIVE_WRITE: case DP_AUX_NATIVE_READ: is_native_aux = true; fallthrough; case DP_AUX_I2C_WRITE: case DP_AUX_I2C_READ: break; default: return -EINVAL; } ret = regmap_write(map, PAGE0_AUXCH_CFG3, AUXCH_CFG3_RESET); if (ret) { DRM_DEV_ERROR(dev, "failed to write PAGE0_AUXCH_CFG3: %d\n", ret); return ret; } /* Assume it's good */ msg->reply = 0; base = PAGE0_SWAUX_ADDR_7_0; addr_len[PAGE0_SWAUX_ADDR_7_0 - base] = msg->address; addr_len[PAGE0_SWAUX_ADDR_15_8 - base] = msg->address >> 8; addr_len[PAGE0_SWAUX_ADDR_23_16 - base] = (msg->address >> 16) | (msg->request << 4); addr_len[PAGE0_SWAUX_LENGTH - base] = (len == 0) ? SWAUX_NO_PAYLOAD : ((len - 1) & SWAUX_LENGTH_MASK); regmap_bulk_write(map, PAGE0_SWAUX_ADDR_7_0, addr_len, ARRAY_SIZE(addr_len)); if (len && (request == DP_AUX_NATIVE_WRITE || request == DP_AUX_I2C_WRITE)) { /* Write to the internal FIFO buffer */ for (i = 0; i < len; i++) { ret = regmap_write(map, PAGE0_SWAUX_WDATA, buf[i]); if (ret) { DRM_DEV_ERROR(dev, "failed to write WDATA: %d\n", ret); return ret; } } } regmap_write(map, PAGE0_SWAUX_CTRL, SWAUX_SEND); /* Zero delay loop because i2c transactions are slow already */ regmap_read_poll_timeout(map, PAGE0_SWAUX_CTRL, data, !(data & SWAUX_SEND), 0, 50 * 1000); regmap_read(map, PAGE0_SWAUX_STATUS, &data); if (ret) { DRM_DEV_ERROR(dev, "failed to read PAGE0_SWAUX_STATUS: %d\n", ret); return ret; } switch (data & SWAUX_STATUS_MASK) { case SWAUX_STATUS_NACK: case SWAUX_STATUS_I2C_NACK: /* * The programming guide is not clear about whether a I2C NACK * would trigger SWAUX_STATUS_NACK or SWAUX_STATUS_I2C_NACK. So * we handle both cases together. */ if (is_native_aux) msg->reply |= DP_AUX_NATIVE_REPLY_NACK; else msg->reply |= DP_AUX_I2C_REPLY_NACK; fallthrough; case SWAUX_STATUS_ACKM: len = data & SWAUX_M_MASK; break; case SWAUX_STATUS_DEFER: case SWAUX_STATUS_I2C_DEFER: if (is_native_aux) msg->reply |= DP_AUX_NATIVE_REPLY_DEFER; else msg->reply |= DP_AUX_I2C_REPLY_DEFER; len = data & SWAUX_M_MASK; break; case SWAUX_STATUS_INVALID: return -EOPNOTSUPP; case SWAUX_STATUS_TIMEOUT: return -ETIMEDOUT; } if (len && (request == DP_AUX_NATIVE_READ || request == DP_AUX_I2C_READ)) { /* Read from the internal FIFO buffer */ for (i = 0; i < len; i++) { ret = regmap_read(map, PAGE0_SWAUX_RDATA, &data); if (ret) { DRM_DEV_ERROR(dev, "failed to read RDATA: %d\n", ret); return ret; } buf[i] = data; } } return len; } static ssize_t ps8640_aux_transfer(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg) { struct ps8640 *ps_bridge = aux_to_ps8640(aux); struct device *dev = &ps_bridge->page[PAGE0_DP_CNTL]->dev; int ret; pm_runtime_get_sync(dev); ret = ps8640_aux_transfer_msg(aux, msg); pm_runtime_mark_last_busy(dev); pm_runtime_put_autosuspend(dev); return ret; } static void ps8640_bridge_vdo_control(struct ps8640 *ps_bridge, const enum ps8640_vdo_control ctrl) { struct regmap *map = ps_bridge->regmap[PAGE3_DSI_CNTL1]; struct device *dev = &ps_bridge->page[PAGE3_DSI_CNTL1]->dev; u8 vdo_ctrl_buf[] = { VDO_CTL_ADD, ctrl }; int ret; ret = regmap_bulk_write(map, PAGE3_SET_ADD, vdo_ctrl_buf, sizeof(vdo_ctrl_buf)); if (ret < 0) dev_err(dev, "failed to %sable VDO: %d\n", ctrl == ENABLE ? "en" : "dis", ret); } static int __maybe_unused ps8640_resume(struct device *dev) { struct ps8640 *ps_bridge = dev_get_drvdata(dev); int ret; ret = regulator_bulk_enable(ARRAY_SIZE(ps_bridge->supplies), ps_bridge->supplies); if (ret < 0) { dev_err(dev, "cannot enable regulators %d\n", ret); return ret; } gpiod_set_value(ps_bridge->gpio_powerdown, 0); gpiod_set_value(ps_bridge->gpio_reset, 1); usleep_range(2000, 2500); gpiod_set_value(ps_bridge->gpio_reset, 0); /* Double reset for T4 and T5 */ msleep(50); gpiod_set_value(ps_bridge->gpio_reset, 1); msleep(50); gpiod_set_value(ps_bridge->gpio_reset, 0); /* We just reset things, so we need a delay after the first HPD */ ps_bridge->need_post_hpd_delay = true; /* * Mystery 200 ms delay for the "MCU to be ready". It's unclear if * this is truly necessary since the MCU will already signal that * things are "good to go" by signaling HPD on "gpio 9". See * _ps8640_wait_hpd_asserted(). For now we'll keep this mystery delay * just in case. */ msleep(200); return 0; } static int __maybe_unused ps8640_suspend(struct device *dev) { struct ps8640 *ps_bridge = dev_get_drvdata(dev); int ret; gpiod_set_value(ps_bridge->gpio_reset, 1); gpiod_set_value(ps_bridge->gpio_powerdown, 1); ret = regulator_bulk_disable(ARRAY_SIZE(ps_bridge->supplies), ps_bridge->supplies); if (ret < 0) dev_err(dev, "cannot disable regulators %d\n", ret); return ret; } static const struct dev_pm_ops ps8640_pm_ops = { SET_RUNTIME_PM_OPS(ps8640_suspend, ps8640_resume, NULL) SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend, pm_runtime_force_resume) }; static void ps8640_atomic_pre_enable(struct drm_bridge *bridge, struct drm_bridge_state *old_bridge_state) { struct ps8640 *ps_bridge = bridge_to_ps8640(bridge); struct regmap *map = ps_bridge->regmap[PAGE2_TOP_CNTL]; struct device *dev = &ps_bridge->page[PAGE0_DP_CNTL]->dev; int ret; pm_runtime_get_sync(dev); ret = _ps8640_wait_hpd_asserted(ps_bridge, 200 * 1000); if (ret < 0) dev_warn(dev, "HPD didn't go high: %d\n", ret); /* * The Manufacturer Command Set (MCS) is a device dependent interface * intended for factory programming of the display module default * parameters. Once the display module is configured, the MCS shall be * disabled by the manufacturer. Once disabled, all MCS commands are * ignored by the display interface. */ ret = regmap_update_bits(map, PAGE2_MCS_EN, MCS_EN, 0); if (ret < 0) dev_warn(dev, "failed write PAGE2_MCS_EN: %d\n", ret); /* Switch access edp panel's edid through i2c */ ret = regmap_write(map, PAGE2_I2C_BYPASS, I2C_BYPASS_EN); if (ret < 0) dev_warn(dev, "failed write PAGE2_MCS_EN: %d\n", ret); ps8640_bridge_vdo_control(ps_bridge, ENABLE); ps_bridge->pre_enabled = true; } static void ps8640_atomic_post_disable(struct drm_bridge *bridge, struct drm_bridge_state *old_bridge_state) { struct ps8640 *ps_bridge = bridge_to_ps8640(bridge); ps_bridge->pre_enabled = false; ps8640_bridge_vdo_control(ps_bridge, DISABLE); pm_runtime_put_sync_suspend(&ps_bridge->page[PAGE0_DP_CNTL]->dev); } static int ps8640_bridge_attach(struct drm_bridge *bridge, enum drm_bridge_attach_flags flags) { struct ps8640 *ps_bridge = bridge_to_ps8640(bridge); struct device *dev = &ps_bridge->page[0]->dev; int ret; if (!(flags & DRM_BRIDGE_ATTACH_NO_CONNECTOR)) return -EINVAL; ps_bridge->aux.drm_dev = bridge->dev; ret = drm_dp_aux_register(&ps_bridge->aux); if (ret) { dev_err(dev, "failed to register DP AUX channel: %d\n", ret); return ret; } ps_bridge->link = device_link_add(bridge->dev->dev, dev, DL_FLAG_STATELESS); if (!ps_bridge->link) { dev_err(dev, "failed to create device link"); ret = -EINVAL; goto err_devlink; } /* Attach the panel-bridge to the dsi bridge */ ret = drm_bridge_attach(bridge->encoder, ps_bridge->panel_bridge, &ps_bridge->bridge, flags); if (ret) goto err_bridge_attach; return 0; err_bridge_attach: device_link_del(ps_bridge->link); err_devlink: drm_dp_aux_unregister(&ps_bridge->aux); return ret; } static void ps8640_bridge_detach(struct drm_bridge *bridge) { struct ps8640 *ps_bridge = bridge_to_ps8640(bridge); drm_dp_aux_unregister(&ps_bridge->aux); if (ps_bridge->link) device_link_del(ps_bridge->link); } static void ps8640_runtime_disable(void *data) { pm_runtime_dont_use_autosuspend(data); pm_runtime_disable(data); } static const struct drm_bridge_funcs ps8640_bridge_funcs = { .attach = ps8640_bridge_attach, .detach = ps8640_bridge_detach, .atomic_post_disable = ps8640_atomic_post_disable, .atomic_pre_enable = ps8640_atomic_pre_enable, .atomic_duplicate_state = drm_atomic_helper_bridge_duplicate_state, .atomic_destroy_state = drm_atomic_helper_bridge_destroy_state, .atomic_reset = drm_atomic_helper_bridge_reset, }; static int ps8640_bridge_get_dsi_resources(struct device *dev, struct ps8640 *ps_bridge) { struct device_node *in_ep, *dsi_node; struct mipi_dsi_device *dsi; struct mipi_dsi_host *host; const struct mipi_dsi_device_info info = { .type = "ps8640", .channel = 0, .node = NULL, }; /* port@0 is ps8640 dsi input port */ in_ep = of_graph_get_endpoint_by_regs(dev->of_node, 0, -1); if (!in_ep) return -ENODEV; dsi_node = of_graph_get_remote_port_parent(in_ep); of_node_put(in_ep); if (!dsi_node) return -ENODEV; host = of_find_mipi_dsi_host_by_node(dsi_node); of_node_put(dsi_node); if (!host) return -EPROBE_DEFER; dsi = devm_mipi_dsi_device_register_full(dev, host, &info); if (IS_ERR(dsi)) { dev_err(dev, "failed to create dsi device\n"); return PTR_ERR(dsi); } ps_bridge->dsi = dsi; dsi->host = host; dsi->mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_SYNC_PULSE; dsi->format = MIPI_DSI_FMT_RGB888; dsi->lanes = NUM_MIPI_LANES; return 0; } static int ps8640_bridge_link_panel(struct drm_dp_aux *aux) { struct ps8640 *ps_bridge = aux_to_ps8640(aux); struct device *dev = aux->dev; struct device_node *np = dev->of_node; int ret; /* * NOTE about returning -EPROBE_DEFER from this function: if we * return an error (most relevant to -EPROBE_DEFER) it will only * be passed out to ps8640_probe() if it called this directly (AKA the * panel isn't under the "aux-bus" node). That should be fine because * if the panel is under "aux-bus" it's guaranteed to have probed by * the time this function has been called. */ /* port@1 is ps8640 output port */ ps_bridge->panel_bridge = devm_drm_of_get_bridge(dev, np, 1, 0); if (IS_ERR(ps_bridge->panel_bridge)) return PTR_ERR(ps_bridge->panel_bridge); ret = devm_drm_bridge_add(dev, &ps_bridge->bridge); if (ret) return ret; return devm_mipi_dsi_attach(dev, ps_bridge->dsi); } static int ps8640_probe(struct i2c_client *client) { struct device *dev = &client->dev; struct ps8640 *ps_bridge; int ret; u32 i; ps_bridge = devm_kzalloc(dev, sizeof(*ps_bridge), GFP_KERNEL); if (!ps_bridge) return -ENOMEM; ps_bridge->supplies[0].supply = "vdd12"; ps_bridge->supplies[1].supply = "vdd33"; ret = devm_regulator_bulk_get(dev, ARRAY_SIZE(ps_bridge->supplies), ps_bridge->supplies); if (ret) return ret; ps_bridge->gpio_powerdown = devm_gpiod_get(&client->dev, "powerdown", GPIOD_OUT_HIGH); if (IS_ERR(ps_bridge->gpio_powerdown)) return PTR_ERR(ps_bridge->gpio_powerdown); /* * Assert the reset to avoid the bridge being initialized prematurely */ ps_bridge->gpio_reset = devm_gpiod_get(&client->dev, "reset", GPIOD_OUT_HIGH); if (IS_ERR(ps_bridge->gpio_reset)) return PTR_ERR(ps_bridge->gpio_reset); ps_bridge->bridge.funcs = &ps8640_bridge_funcs; ps_bridge->bridge.of_node = dev->of_node; ps_bridge->bridge.type = DRM_MODE_CONNECTOR_eDP; /* * Get MIPI DSI resources early. These can return -EPROBE_DEFER so * we want to get them out of the way sooner. */ ret = ps8640_bridge_get_dsi_resources(&client->dev, ps_bridge); if (ret) return ret; ps_bridge->page[PAGE0_DP_CNTL] = client; ps_bridge->regmap[PAGE0_DP_CNTL] = devm_regmap_init_i2c(client, ps8640_regmap_config); if (IS_ERR(ps_bridge->regmap[PAGE0_DP_CNTL])) return PTR_ERR(ps_bridge->regmap[PAGE0_DP_CNTL]); for (i = 1; i < ARRAY_SIZE(ps_bridge->page); i++) { ps_bridge->page[i] = devm_i2c_new_dummy_device(&client->dev, client->adapter, client->addr + i); if (IS_ERR(ps_bridge->page[i])) return PTR_ERR(ps_bridge->page[i]); ps_bridge->regmap[i] = devm_regmap_init_i2c(ps_bridge->page[i], ps8640_regmap_config + i); if (IS_ERR(ps_bridge->regmap[i])) return PTR_ERR(ps_bridge->regmap[i]); } i2c_set_clientdata(client, ps_bridge); ps_bridge->aux.name = "parade-ps8640-aux"; ps_bridge->aux.dev = dev; ps_bridge->aux.transfer = ps8640_aux_transfer; ps_bridge->aux.wait_hpd_asserted = ps8640_wait_hpd_asserted; drm_dp_aux_init(&ps_bridge->aux); pm_runtime_enable(dev); /* * Powering on ps8640 takes ~300ms. To avoid wasting time on power * cycling ps8640 too often, set autosuspend_delay to 2000ms to ensure * the bridge wouldn't suspend in between each _aux_transfer_msg() call * during EDID read (~20ms in my experiment) and in between the last * _aux_transfer_msg() call during EDID read and the _pre_enable() call * (~100ms in my experiment). */ pm_runtime_set_autosuspend_delay(dev, 2000); pm_runtime_use_autosuspend(dev); pm_suspend_ignore_children(dev, true); ret = devm_add_action_or_reset(dev, ps8640_runtime_disable, dev); if (ret) return ret; ret = devm_of_dp_aux_populate_bus(&ps_bridge->aux, ps8640_bridge_link_panel); /* * If devm_of_dp_aux_populate_bus() returns -ENODEV then it's up to * usa to call ps8640_bridge_link_panel() directly. NOTE: in this case * the function is allowed to -EPROBE_DEFER. */ if (ret == -ENODEV) return ps8640_bridge_link_panel(&ps_bridge->aux); return ret; } static const struct of_device_id ps8640_match[] = { { .compatible = "parade,ps8640" }, { } }; MODULE_DEVICE_TABLE(of, ps8640_match); static struct i2c_driver ps8640_driver = { .probe = ps8640_probe, .driver = { .name = "ps8640", .of_match_table = ps8640_match, .pm = &ps8640_pm_ops, }, }; module_i2c_driver(ps8640_driver); MODULE_AUTHOR("Jitao Shi <[email protected]>"); MODULE_AUTHOR("CK Hu <[email protected]>"); MODULE_AUTHOR("Enric Balletbo i Serra <[email protected]>"); MODULE_DESCRIPTION("PARADE ps8640 DSI-eDP converter driver"); MODULE_LICENSE("GPL v2");
linux-master
drivers/gpu/drm/bridge/parade-ps8640.c
// SPDX-License-Identifier: GPL-2.0-only /* * Chrontel CH7033 Video Encoder Driver * * Copyright (C) 2019,2020 Lubomir Rintel */ #include <linux/gpio/consumer.h> #include <linux/i2c.h> #include <linux/module.h> #include <linux/regmap.h> #include <drm/drm_atomic_helper.h> #include <drm/drm_bridge.h> #include <drm/drm_edid.h> #include <drm/drm_of.h> #include <drm/drm_print.h> #include <drm/drm_probe_helper.h> /* Page 0, Register 0x07 */ enum { DRI_PD = BIT(3), IO_PD = BIT(5), }; /* Page 0, Register 0x08 */ enum { DRI_PDDRI = GENMASK(7, 4), PDDAC = GENMASK(3, 1), PANEN = BIT(0), }; /* Page 0, Register 0x09 */ enum { DPD = BIT(7), GCKOFF = BIT(6), TV_BP = BIT(5), SCLPD = BIT(4), SDPD = BIT(3), VGA_PD = BIT(2), HDBKPD = BIT(1), HDMI_PD = BIT(0), }; /* Page 0, Register 0x0a */ enum { MEMINIT = BIT(7), MEMIDLE = BIT(6), MEMPD = BIT(5), STOP = BIT(4), LVDS_PD = BIT(3), HD_DVIB = BIT(2), HDCP_PD = BIT(1), MCU_PD = BIT(0), }; /* Page 0, Register 0x18 */ enum { IDF = GENMASK(7, 4), INTEN = BIT(3), SWAP = GENMASK(2, 0), }; enum { BYTE_SWAP_RGB = 0, BYTE_SWAP_RBG = 1, BYTE_SWAP_GRB = 2, BYTE_SWAP_GBR = 3, BYTE_SWAP_BRG = 4, BYTE_SWAP_BGR = 5, }; /* Page 0, Register 0x19 */ enum { HPO_I = BIT(5), VPO_I = BIT(4), DEPO_I = BIT(3), CRYS_EN = BIT(2), GCLKFREQ = GENMASK(2, 0), }; /* Page 0, Register 0x2e */ enum { HFLIP = BIT(7), VFLIP = BIT(6), DEPO_O = BIT(5), HPO_O = BIT(4), VPO_O = BIT(3), TE = GENMASK(2, 0), }; /* Page 0, Register 0x2b */ enum { SWAPS = GENMASK(7, 4), VFMT = GENMASK(3, 0), }; /* Page 0, Register 0x54 */ enum { COMP_BP = BIT(7), DAC_EN_T = BIT(6), HWO_HDMI_HI = GENMASK(5, 3), HOO_HDMI_HI = GENMASK(2, 0), }; /* Page 0, Register 0x57 */ enum { FLDSEN = BIT(7), VWO_HDMI_HI = GENMASK(5, 3), VOO_HDMI_HI = GENMASK(2, 0), }; /* Page 0, Register 0x7e */ enum { HDMI_LVDS_SEL = BIT(7), DE_GEN = BIT(6), PWM_INDEX_HI = BIT(5), USE_DE = BIT(4), R_INT = GENMASK(3, 0), }; /* Page 1, Register 0x07 */ enum { BPCKSEL = BIT(7), DRI_CMFB_EN = BIT(6), CEC_PUEN = BIT(5), CEC_T = BIT(3), CKINV = BIT(2), CK_TVINV = BIT(1), DRI_CKS2 = BIT(0), }; /* Page 1, Register 0x08 */ enum { DACG = BIT(6), DACKTST = BIT(5), DEDGEB = BIT(4), SYO = BIT(3), DRI_IT_LVDS = GENMASK(2, 1), DISPON = BIT(0), }; /* Page 1, Register 0x0c */ enum { DRI_PLL_CP = GENMASK(7, 6), DRI_PLL_DIVSEL = BIT(5), DRI_PLL_N1_1 = BIT(4), DRI_PLL_N1_0 = BIT(3), DRI_PLL_N3_1 = BIT(2), DRI_PLL_N3_0 = BIT(1), DRI_PLL_CKTSTEN = BIT(0), }; /* Page 1, Register 0x6b */ enum { VCO3CS = GENMASK(7, 6), ICPGBK2_0 = GENMASK(5, 3), DRI_VCO357SC = BIT(2), PDPLL2 = BIT(1), DRI_PD_SER = BIT(0), }; /* Page 1, Register 0x6c */ enum { PLL2N11 = GENMASK(7, 4), PLL2N5_4 = BIT(3), PLL2N5_TOP = BIT(2), DRI_PLL_PD = BIT(1), PD_I2CM = BIT(0), }; /* Page 3, Register 0x28 */ enum { DIFF_EN = GENMASK(7, 6), CORREC_EN = GENMASK(5, 4), VGACLK_BP = BIT(3), HM_LV_SEL = BIT(2), HD_VGA_SEL = BIT(1), }; /* Page 3, Register 0x2a */ enum { LVDSCLK_BP = BIT(7), HDTVCLK_BP = BIT(6), HDMICLK_BP = BIT(5), HDTV_BP = BIT(4), HDMI_BP = BIT(3), THRWL = GENMASK(2, 0), }; /* Page 4, Register 0x52 */ enum { PGM_ARSTB = BIT(7), MCU_ARSTB = BIT(6), MCU_RETB = BIT(2), RESETIB = BIT(1), RESETDB = BIT(0), }; struct ch7033_priv { struct regmap *regmap; struct drm_bridge *next_bridge; struct drm_bridge bridge; struct drm_connector connector; }; #define conn_to_ch7033_priv(x) \ container_of(x, struct ch7033_priv, connector) #define bridge_to_ch7033_priv(x) \ container_of(x, struct ch7033_priv, bridge) static enum drm_connector_status ch7033_connector_detect( struct drm_connector *connector, bool force) { struct ch7033_priv *priv = conn_to_ch7033_priv(connector); return drm_bridge_detect(priv->next_bridge); } static const struct drm_connector_funcs ch7033_connector_funcs = { .reset = drm_atomic_helper_connector_reset, .fill_modes = drm_helper_probe_single_connector_modes, .detect = ch7033_connector_detect, .destroy = drm_connector_cleanup, .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state, .atomic_destroy_state = drm_atomic_helper_connector_destroy_state, }; static int ch7033_connector_get_modes(struct drm_connector *connector) { struct ch7033_priv *priv = conn_to_ch7033_priv(connector); struct edid *edid; int ret; edid = drm_bridge_get_edid(priv->next_bridge, connector); drm_connector_update_edid_property(connector, edid); if (edid) { ret = drm_add_edid_modes(connector, edid); kfree(edid); } else { ret = drm_add_modes_noedid(connector, 1920, 1080); drm_set_preferred_mode(connector, 1024, 768); } return ret; } static struct drm_encoder *ch7033_connector_best_encoder( struct drm_connector *connector) { struct ch7033_priv *priv = conn_to_ch7033_priv(connector); return priv->bridge.encoder; } static const struct drm_connector_helper_funcs ch7033_connector_helper_funcs = { .get_modes = ch7033_connector_get_modes, .best_encoder = ch7033_connector_best_encoder, }; static void ch7033_hpd_event(void *arg, enum drm_connector_status status) { struct ch7033_priv *priv = arg; if (priv->bridge.dev) drm_helper_hpd_irq_event(priv->connector.dev); } static int ch7033_bridge_attach(struct drm_bridge *bridge, enum drm_bridge_attach_flags flags) { struct ch7033_priv *priv = bridge_to_ch7033_priv(bridge); struct drm_connector *connector = &priv->connector; int ret; ret = drm_bridge_attach(bridge->encoder, priv->next_bridge, bridge, DRM_BRIDGE_ATTACH_NO_CONNECTOR); if (ret) return ret; if (flags & DRM_BRIDGE_ATTACH_NO_CONNECTOR) return 0; if (priv->next_bridge->ops & DRM_BRIDGE_OP_DETECT) { connector->polled = DRM_CONNECTOR_POLL_HPD; } else { connector->polled = DRM_CONNECTOR_POLL_CONNECT | DRM_CONNECTOR_POLL_DISCONNECT; } if (priv->next_bridge->ops & DRM_BRIDGE_OP_HPD) { drm_bridge_hpd_enable(priv->next_bridge, ch7033_hpd_event, priv); } drm_connector_helper_add(connector, &ch7033_connector_helper_funcs); ret = drm_connector_init_with_ddc(bridge->dev, &priv->connector, &ch7033_connector_funcs, priv->next_bridge->type, priv->next_bridge->ddc); if (ret) { DRM_ERROR("Failed to initialize connector\n"); return ret; } return drm_connector_attach_encoder(&priv->connector, bridge->encoder); } static void ch7033_bridge_detach(struct drm_bridge *bridge) { struct ch7033_priv *priv = bridge_to_ch7033_priv(bridge); if (priv->next_bridge->ops & DRM_BRIDGE_OP_HPD) drm_bridge_hpd_disable(priv->next_bridge); drm_connector_cleanup(&priv->connector); } static enum drm_mode_status ch7033_bridge_mode_valid(struct drm_bridge *bridge, const struct drm_display_info *info, const struct drm_display_mode *mode) { if (mode->clock > 165000) return MODE_CLOCK_HIGH; if (mode->hdisplay >= 1920) return MODE_BAD_HVALUE; if (mode->vdisplay >= 1080) return MODE_BAD_VVALUE; return MODE_OK; } static void ch7033_bridge_disable(struct drm_bridge *bridge) { struct ch7033_priv *priv = bridge_to_ch7033_priv(bridge); regmap_write(priv->regmap, 0x03, 0x04); regmap_update_bits(priv->regmap, 0x52, RESETDB, 0x00); } static void ch7033_bridge_enable(struct drm_bridge *bridge) { struct ch7033_priv *priv = bridge_to_ch7033_priv(bridge); regmap_write(priv->regmap, 0x03, 0x04); regmap_update_bits(priv->regmap, 0x52, RESETDB, RESETDB); } static void ch7033_bridge_mode_set(struct drm_bridge *bridge, const struct drm_display_mode *mode, const struct drm_display_mode *adjusted_mode) { struct ch7033_priv *priv = bridge_to_ch7033_priv(bridge); int hbporch = mode->hsync_start - mode->hdisplay; int hsynclen = mode->hsync_end - mode->hsync_start; int vbporch = mode->vsync_start - mode->vdisplay; int vsynclen = mode->vsync_end - mode->vsync_start; /* * Page 4 */ regmap_write(priv->regmap, 0x03, 0x04); /* Turn everything off to set all the registers to their defaults. */ regmap_write(priv->regmap, 0x52, 0x00); /* Bring I/O block up. */ regmap_write(priv->regmap, 0x52, RESETIB); /* * Page 0 */ regmap_write(priv->regmap, 0x03, 0x00); /* Bring up parts we need from the power down. */ regmap_update_bits(priv->regmap, 0x07, DRI_PD | IO_PD, 0); regmap_update_bits(priv->regmap, 0x08, DRI_PDDRI | PDDAC | PANEN, 0); regmap_update_bits(priv->regmap, 0x09, DPD | GCKOFF | HDMI_PD | VGA_PD, 0); regmap_update_bits(priv->regmap, 0x0a, HD_DVIB, 0); /* Horizontal input timing. */ regmap_write(priv->regmap, 0x0b, (mode->htotal >> 8) << 3 | (mode->hdisplay >> 8)); regmap_write(priv->regmap, 0x0c, mode->hdisplay); regmap_write(priv->regmap, 0x0d, mode->htotal); regmap_write(priv->regmap, 0x0e, (hsynclen >> 8) << 3 | (hbporch >> 8)); regmap_write(priv->regmap, 0x0f, hbporch); regmap_write(priv->regmap, 0x10, hsynclen); /* Vertical input timing. */ regmap_write(priv->regmap, 0x11, (mode->vtotal >> 8) << 3 | (mode->vdisplay >> 8)); regmap_write(priv->regmap, 0x12, mode->vdisplay); regmap_write(priv->regmap, 0x13, mode->vtotal); regmap_write(priv->regmap, 0x14, ((vsynclen >> 8) << 3) | (vbporch >> 8)); regmap_write(priv->regmap, 0x15, vbporch); regmap_write(priv->regmap, 0x16, vsynclen); /* Input color swap. */ regmap_update_bits(priv->regmap, 0x18, SWAP, BYTE_SWAP_BGR); /* Input clock and sync polarity. */ regmap_update_bits(priv->regmap, 0x19, 0x1, mode->clock >> 16); regmap_update_bits(priv->regmap, 0x19, HPO_I | VPO_I | GCLKFREQ, (mode->flags & DRM_MODE_FLAG_PHSYNC) ? HPO_I : 0 | (mode->flags & DRM_MODE_FLAG_PVSYNC) ? VPO_I : 0 | mode->clock >> 16); regmap_write(priv->regmap, 0x1a, mode->clock >> 8); regmap_write(priv->regmap, 0x1b, mode->clock); /* Horizontal output timing. */ regmap_write(priv->regmap, 0x1f, (mode->htotal >> 8) << 3 | (mode->hdisplay >> 8)); regmap_write(priv->regmap, 0x20, mode->hdisplay); regmap_write(priv->regmap, 0x21, mode->htotal); /* Vertical output timing. */ regmap_write(priv->regmap, 0x25, (mode->vtotal >> 8) << 3 | (mode->vdisplay >> 8)); regmap_write(priv->regmap, 0x26, mode->vdisplay); regmap_write(priv->regmap, 0x27, mode->vtotal); /* VGA channel bypass */ regmap_update_bits(priv->regmap, 0x2b, VFMT, 9); /* Output sync polarity. */ regmap_update_bits(priv->regmap, 0x2e, HPO_O | VPO_O, (mode->flags & DRM_MODE_FLAG_PHSYNC) ? HPO_O : 0 | (mode->flags & DRM_MODE_FLAG_PVSYNC) ? VPO_O : 0); /* HDMI horizontal output timing. */ regmap_update_bits(priv->regmap, 0x54, HWO_HDMI_HI | HOO_HDMI_HI, (hsynclen >> 8) << 3 | (hbporch >> 8)); regmap_write(priv->regmap, 0x55, hbporch); regmap_write(priv->regmap, 0x56, hsynclen); /* HDMI vertical output timing. */ regmap_update_bits(priv->regmap, 0x57, VWO_HDMI_HI | VOO_HDMI_HI, (vsynclen >> 8) << 3 | (vbporch >> 8)); regmap_write(priv->regmap, 0x58, vbporch); regmap_write(priv->regmap, 0x59, vsynclen); /* Pick HDMI, not LVDS. */ regmap_update_bits(priv->regmap, 0x7e, HDMI_LVDS_SEL, HDMI_LVDS_SEL); /* * Page 1 */ regmap_write(priv->regmap, 0x03, 0x01); /* No idea what these do, but VGA is wobbly and blinky without them. */ regmap_update_bits(priv->regmap, 0x07, CKINV, CKINV); regmap_update_bits(priv->regmap, 0x08, DISPON, DISPON); /* DRI PLL */ regmap_update_bits(priv->regmap, 0x0c, DRI_PLL_DIVSEL, DRI_PLL_DIVSEL); if (mode->clock <= 40000) { regmap_update_bits(priv->regmap, 0x0c, DRI_PLL_N1_1 | DRI_PLL_N1_0 | DRI_PLL_N3_1 | DRI_PLL_N3_0, 0); } else if (mode->clock < 80000) { regmap_update_bits(priv->regmap, 0x0c, DRI_PLL_N1_1 | DRI_PLL_N1_0 | DRI_PLL_N3_1 | DRI_PLL_N3_0, DRI_PLL_N3_0 | DRI_PLL_N1_0); } else { regmap_update_bits(priv->regmap, 0x0c, DRI_PLL_N1_1 | DRI_PLL_N1_0 | DRI_PLL_N3_1 | DRI_PLL_N3_0, DRI_PLL_N3_1 | DRI_PLL_N1_1); } /* This seems to be color calibration for VGA. */ regmap_write(priv->regmap, 0x64, 0x29); /* LSB Blue */ regmap_write(priv->regmap, 0x65, 0x29); /* LSB Green */ regmap_write(priv->regmap, 0x66, 0x29); /* LSB Red */ regmap_write(priv->regmap, 0x67, 0x00); /* MSB Blue */ regmap_write(priv->regmap, 0x68, 0x00); /* MSB Green */ regmap_write(priv->regmap, 0x69, 0x00); /* MSB Red */ regmap_update_bits(priv->regmap, 0x6b, DRI_PD_SER, 0x00); regmap_update_bits(priv->regmap, 0x6c, DRI_PLL_PD, 0x00); /* * Page 3 */ regmap_write(priv->regmap, 0x03, 0x03); /* More bypasses and apparently another HDMI/LVDS selector. */ regmap_update_bits(priv->regmap, 0x28, VGACLK_BP | HM_LV_SEL, VGACLK_BP | HM_LV_SEL); regmap_update_bits(priv->regmap, 0x2a, HDMICLK_BP | HDMI_BP, HDMICLK_BP | HDMI_BP); /* * Page 4 */ regmap_write(priv->regmap, 0x03, 0x04); /* Output clock. */ regmap_write(priv->regmap, 0x10, mode->clock >> 16); regmap_write(priv->regmap, 0x11, mode->clock >> 8); regmap_write(priv->regmap, 0x12, mode->clock); } static const struct drm_bridge_funcs ch7033_bridge_funcs = { .attach = ch7033_bridge_attach, .detach = ch7033_bridge_detach, .mode_valid = ch7033_bridge_mode_valid, .disable = ch7033_bridge_disable, .enable = ch7033_bridge_enable, .mode_set = ch7033_bridge_mode_set, }; static const struct regmap_config ch7033_regmap_config = { .reg_bits = 8, .val_bits = 8, .max_register = 0x7f, }; static int ch7033_probe(struct i2c_client *client) { struct device *dev = &client->dev; struct ch7033_priv *priv; unsigned int val; int ret; priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL); if (!priv) return -ENOMEM; dev_set_drvdata(dev, priv); ret = drm_of_find_panel_or_bridge(dev->of_node, 1, -1, NULL, &priv->next_bridge); if (ret) return ret; priv->regmap = devm_regmap_init_i2c(client, &ch7033_regmap_config); if (IS_ERR(priv->regmap)) { dev_err(&client->dev, "regmap init failed\n"); return PTR_ERR(priv->regmap); } ret = regmap_read(priv->regmap, 0x00, &val); if (ret < 0) { dev_err(&client->dev, "error reading the model id: %d\n", ret); return ret; } if ((val & 0xf7) != 0x56) { dev_err(&client->dev, "the device is not a ch7033\n"); return -ENODEV; } regmap_write(priv->regmap, 0x03, 0x04); ret = regmap_read(priv->regmap, 0x51, &val); if (ret < 0) { dev_err(&client->dev, "error reading the model id: %d\n", ret); return ret; } if ((val & 0x0f) != 3) { dev_err(&client->dev, "unknown revision %u\n", val); return -ENODEV; } INIT_LIST_HEAD(&priv->bridge.list); priv->bridge.funcs = &ch7033_bridge_funcs; priv->bridge.of_node = dev->of_node; drm_bridge_add(&priv->bridge); dev_info(dev, "Chrontel CH7033 Video Encoder\n"); return 0; } static void ch7033_remove(struct i2c_client *client) { struct device *dev = &client->dev; struct ch7033_priv *priv = dev_get_drvdata(dev); drm_bridge_remove(&priv->bridge); } static const struct of_device_id ch7033_dt_ids[] = { { .compatible = "chrontel,ch7033", }, { } }; MODULE_DEVICE_TABLE(of, ch7033_dt_ids); static const struct i2c_device_id ch7033_ids[] = { { "ch7033", 0 }, { } }; MODULE_DEVICE_TABLE(i2c, ch7033_ids); static struct i2c_driver ch7033_driver = { .probe = ch7033_probe, .remove = ch7033_remove, .driver = { .name = "ch7033", .of_match_table = ch7033_dt_ids, }, .id_table = ch7033_ids, }; module_i2c_driver(ch7033_driver); MODULE_AUTHOR("Lubomir Rintel <[email protected]>"); MODULE_DESCRIPTION("Chrontel CH7033 Video Encoder Driver"); MODULE_LICENSE("GPL v2");
linux-master
drivers/gpu/drm/bridge/chrontel-ch7033.c
// SPDX-License-Identifier: GPL-2.0 /* * Copyright (C) 2018 Samsung Electronics Co., Ltd * * Authors: * Andrzej Hajda <[email protected]> * Maciej Purski <[email protected]> */ #include <linux/delay.h> #include <linux/gpio/consumer.h> #include <linux/mod_devicetable.h> #include <linux/module.h> #include <linux/of_graph.h> #include <linux/regulator/consumer.h> #include <video/mipi_display.h> #include <drm/drm_atomic_helper.h> #include <drm/drm_mipi_dsi.h> #include <drm/drm_of.h> #include <drm/drm_print.h> #define FLD_MASK(start, end) (((1 << ((start) - (end) + 1)) - 1) << (end)) #define FLD_VAL(val, start, end) (((val) << (end)) & FLD_MASK(start, end)) /* PPI layer registers */ #define PPI_STARTPPI 0x0104 /* START control bit */ #define PPI_LPTXTIMECNT 0x0114 /* LPTX timing signal */ #define PPI_LANEENABLE 0x0134 /* Enables each lane */ #define PPI_TX_RX_TA 0x013C /* BTA timing parameters */ #define PPI_D0S_CLRSIPOCOUNT 0x0164 /* Assertion timer for Lane 0 */ #define PPI_D1S_CLRSIPOCOUNT 0x0168 /* Assertion timer for Lane 1 */ #define PPI_D2S_CLRSIPOCOUNT 0x016C /* Assertion timer for Lane 2 */ #define PPI_D3S_CLRSIPOCOUNT 0x0170 /* Assertion timer for Lane 3 */ #define PPI_START_FUNCTION 1 /* DSI layer registers */ #define DSI_STARTDSI 0x0204 /* START control bit of DSI-TX */ #define DSI_LANEENABLE 0x0210 /* Enables each lane */ #define DSI_RX_START 1 /* Video path registers */ #define VP_CTRL 0x0450 /* Video Path Control */ #define VP_CTRL_MSF BIT(0) /* Magic square in RGB666 */ #define VP_CTRL_VTGEN BIT(4) /* Use chip clock for timing */ #define VP_CTRL_EVTMODE BIT(5) /* Event mode */ #define VP_CTRL_RGB888 BIT(8) /* RGB888 mode */ #define VP_CTRL_VSDELAY(v) FLD_VAL(v, 31, 20) /* VSYNC delay */ #define VP_CTRL_HSPOL BIT(17) /* Polarity of HSYNC signal */ #define VP_CTRL_DEPOL BIT(18) /* Polarity of DE signal */ #define VP_CTRL_VSPOL BIT(19) /* Polarity of VSYNC signal */ #define VP_HTIM1 0x0454 /* Horizontal Timing Control 1 */ #define VP_HTIM1_HBP(v) FLD_VAL(v, 24, 16) #define VP_HTIM1_HSYNC(v) FLD_VAL(v, 8, 0) #define VP_HTIM2 0x0458 /* Horizontal Timing Control 2 */ #define VP_HTIM2_HFP(v) FLD_VAL(v, 24, 16) #define VP_HTIM2_HACT(v) FLD_VAL(v, 10, 0) #define VP_VTIM1 0x045C /* Vertical Timing Control 1 */ #define VP_VTIM1_VBP(v) FLD_VAL(v, 23, 16) #define VP_VTIM1_VSYNC(v) FLD_VAL(v, 7, 0) #define VP_VTIM2 0x0460 /* Vertical Timing Control 2 */ #define VP_VTIM2_VFP(v) FLD_VAL(v, 23, 16) #define VP_VTIM2_VACT(v) FLD_VAL(v, 10, 0) #define VP_VFUEN 0x0464 /* Video Frame Timing Update Enable */ /* LVDS registers */ #define LV_MX0003 0x0480 /* Mux input bit 0 to 3 */ #define LV_MX0407 0x0484 /* Mux input bit 4 to 7 */ #define LV_MX0811 0x0488 /* Mux input bit 8 to 11 */ #define LV_MX1215 0x048C /* Mux input bit 12 to 15 */ #define LV_MX1619 0x0490 /* Mux input bit 16 to 19 */ #define LV_MX2023 0x0494 /* Mux input bit 20 to 23 */ #define LV_MX2427 0x0498 /* Mux input bit 24 to 27 */ #define LV_MX(b0, b1, b2, b3) (FLD_VAL(b0, 4, 0) | FLD_VAL(b1, 12, 8) | \ FLD_VAL(b2, 20, 16) | FLD_VAL(b3, 28, 24)) /* Input bit numbers used in mux registers */ enum { LVI_R0, LVI_R1, LVI_R2, LVI_R3, LVI_R4, LVI_R5, LVI_R6, LVI_R7, LVI_G0, LVI_G1, LVI_G2, LVI_G3, LVI_G4, LVI_G5, LVI_G6, LVI_G7, LVI_B0, LVI_B1, LVI_B2, LVI_B3, LVI_B4, LVI_B5, LVI_B6, LVI_B7, LVI_HS, LVI_VS, LVI_DE, LVI_L0 }; #define LV_CFG 0x049C /* LVDS Configuration */ #define LV_PHY0 0x04A0 /* LVDS PHY 0 */ #define LV_PHY0_RST(v) FLD_VAL(v, 22, 22) /* PHY reset */ #define LV_PHY0_IS(v) FLD_VAL(v, 15, 14) #define LV_PHY0_ND(v) FLD_VAL(v, 4, 0) /* Frequency range select */ #define LV_PHY0_PRBS_ON(v) FLD_VAL(v, 20, 16) /* Clock/Data Flag pins */ /* System registers */ #define SYS_RST 0x0504 /* System Reset */ #define SYS_ID 0x0580 /* System ID */ #define SYS_RST_I2CS BIT(0) /* Reset I2C-Slave controller */ #define SYS_RST_I2CM BIT(1) /* Reset I2C-Master controller */ #define SYS_RST_LCD BIT(2) /* Reset LCD controller */ #define SYS_RST_BM BIT(3) /* Reset Bus Management controller */ #define SYS_RST_DSIRX BIT(4) /* Reset DSI-RX and App controller */ #define SYS_RST_REG BIT(5) /* Reset Register module */ #define LPX_PERIOD 2 #define TTA_SURE 3 #define TTA_GET 0x20000 /* Lane enable PPI and DSI register bits */ #define LANEENABLE_CLEN BIT(0) #define LANEENABLE_L0EN BIT(1) #define LANEENABLE_L1EN BIT(2) #define LANEENABLE_L2EN BIT(3) #define LANEENABLE_L3EN BIT(4) /* LVCFG fields */ #define LV_CFG_LVEN BIT(0) #define LV_CFG_LVDLINK BIT(1) #define LV_CFG_CLKPOL1 BIT(2) #define LV_CFG_CLKPOL2 BIT(3) static const char * const tc358764_supplies[] = { "vddc", "vddio", "vddlvds" }; struct tc358764 { struct device *dev; struct drm_bridge bridge; struct drm_bridge *next_bridge; struct regulator_bulk_data supplies[ARRAY_SIZE(tc358764_supplies)]; struct gpio_desc *gpio_reset; int error; }; static int tc358764_clear_error(struct tc358764 *ctx) { int ret = ctx->error; ctx->error = 0; return ret; } static void tc358764_read(struct tc358764 *ctx, u16 addr, u32 *val) { struct mipi_dsi_device *dsi = to_mipi_dsi_device(ctx->dev); ssize_t ret; if (ctx->error) return; cpu_to_le16s(&addr); ret = mipi_dsi_generic_read(dsi, &addr, sizeof(addr), val, sizeof(*val)); if (ret >= 0) le32_to_cpus(val); dev_dbg(ctx->dev, "read: addr=0x%04x data=0x%08x\n", addr, *val); } static void tc358764_write(struct tc358764 *ctx, u16 addr, u32 val) { struct mipi_dsi_device *dsi = to_mipi_dsi_device(ctx->dev); ssize_t ret; u8 data[6]; if (ctx->error) return; data[0] = addr; data[1] = addr >> 8; data[2] = val; data[3] = val >> 8; data[4] = val >> 16; data[5] = val >> 24; ret = mipi_dsi_generic_write(dsi, data, sizeof(data)); if (ret < 0) ctx->error = ret; } static inline struct tc358764 *bridge_to_tc358764(struct drm_bridge *bridge) { return container_of(bridge, struct tc358764, bridge); } static int tc358764_init(struct tc358764 *ctx) { u32 v = 0; tc358764_read(ctx, SYS_ID, &v); if (ctx->error) return tc358764_clear_error(ctx); dev_info(ctx->dev, "ID: %#x\n", v); /* configure PPI counters */ tc358764_write(ctx, PPI_TX_RX_TA, TTA_GET | TTA_SURE); tc358764_write(ctx, PPI_LPTXTIMECNT, LPX_PERIOD); tc358764_write(ctx, PPI_D0S_CLRSIPOCOUNT, 5); tc358764_write(ctx, PPI_D1S_CLRSIPOCOUNT, 5); tc358764_write(ctx, PPI_D2S_CLRSIPOCOUNT, 5); tc358764_write(ctx, PPI_D3S_CLRSIPOCOUNT, 5); /* enable four data lanes and clock lane */ tc358764_write(ctx, PPI_LANEENABLE, LANEENABLE_L3EN | LANEENABLE_L2EN | LANEENABLE_L1EN | LANEENABLE_L0EN | LANEENABLE_CLEN); tc358764_write(ctx, DSI_LANEENABLE, LANEENABLE_L3EN | LANEENABLE_L2EN | LANEENABLE_L1EN | LANEENABLE_L0EN | LANEENABLE_CLEN); /* start */ tc358764_write(ctx, PPI_STARTPPI, PPI_START_FUNCTION); tc358764_write(ctx, DSI_STARTDSI, DSI_RX_START); /* configure video path */ tc358764_write(ctx, VP_CTRL, VP_CTRL_VSDELAY(15) | VP_CTRL_RGB888 | VP_CTRL_EVTMODE | VP_CTRL_HSPOL | VP_CTRL_VSPOL); /* reset PHY */ tc358764_write(ctx, LV_PHY0, LV_PHY0_RST(1) | LV_PHY0_PRBS_ON(4) | LV_PHY0_IS(2) | LV_PHY0_ND(6)); tc358764_write(ctx, LV_PHY0, LV_PHY0_PRBS_ON(4) | LV_PHY0_IS(2) | LV_PHY0_ND(6)); /* reset bridge */ tc358764_write(ctx, SYS_RST, SYS_RST_LCD); /* set bit order */ tc358764_write(ctx, LV_MX0003, LV_MX(LVI_R0, LVI_R1, LVI_R2, LVI_R3)); tc358764_write(ctx, LV_MX0407, LV_MX(LVI_R4, LVI_R7, LVI_R5, LVI_G0)); tc358764_write(ctx, LV_MX0811, LV_MX(LVI_G1, LVI_G2, LVI_G6, LVI_G7)); tc358764_write(ctx, LV_MX1215, LV_MX(LVI_G3, LVI_G4, LVI_G5, LVI_B0)); tc358764_write(ctx, LV_MX1619, LV_MX(LVI_B6, LVI_B7, LVI_B1, LVI_B2)); tc358764_write(ctx, LV_MX2023, LV_MX(LVI_B3, LVI_B4, LVI_B5, LVI_L0)); tc358764_write(ctx, LV_MX2427, LV_MX(LVI_HS, LVI_VS, LVI_DE, LVI_R6)); tc358764_write(ctx, LV_CFG, LV_CFG_CLKPOL2 | LV_CFG_CLKPOL1 | LV_CFG_LVEN); return tc358764_clear_error(ctx); } static void tc358764_reset(struct tc358764 *ctx) { gpiod_set_value(ctx->gpio_reset, 1); usleep_range(1000, 2000); gpiod_set_value(ctx->gpio_reset, 0); usleep_range(1000, 2000); } static void tc358764_post_disable(struct drm_bridge *bridge) { struct tc358764 *ctx = bridge_to_tc358764(bridge); int ret; tc358764_reset(ctx); usleep_range(10000, 15000); ret = regulator_bulk_disable(ARRAY_SIZE(ctx->supplies), ctx->supplies); if (ret < 0) dev_err(ctx->dev, "error disabling regulators (%d)\n", ret); } static void tc358764_pre_enable(struct drm_bridge *bridge) { struct tc358764 *ctx = bridge_to_tc358764(bridge); int ret; ret = regulator_bulk_enable(ARRAY_SIZE(ctx->supplies), ctx->supplies); if (ret < 0) dev_err(ctx->dev, "error enabling regulators (%d)\n", ret); usleep_range(10000, 15000); tc358764_reset(ctx); ret = tc358764_init(ctx); if (ret < 0) dev_err(ctx->dev, "error initializing bridge (%d)\n", ret); } static int tc358764_attach(struct drm_bridge *bridge, enum drm_bridge_attach_flags flags) { struct tc358764 *ctx = bridge_to_tc358764(bridge); return drm_bridge_attach(bridge->encoder, ctx->next_bridge, bridge, flags); } static const struct drm_bridge_funcs tc358764_bridge_funcs = { .post_disable = tc358764_post_disable, .pre_enable = tc358764_pre_enable, .attach = tc358764_attach, }; static int tc358764_parse_dt(struct tc358764 *ctx) { struct device *dev = ctx->dev; ctx->gpio_reset = devm_gpiod_get(dev, "reset", GPIOD_OUT_LOW); if (IS_ERR(ctx->gpio_reset)) { dev_err(dev, "no reset GPIO pin provided\n"); return PTR_ERR(ctx->gpio_reset); } ctx->next_bridge = devm_drm_of_get_bridge(dev, dev->of_node, 1, 0); if (IS_ERR(ctx->next_bridge)) return PTR_ERR(ctx->next_bridge); return 0; } static int tc358764_configure_regulators(struct tc358764 *ctx) { int i, ret; for (i = 0; i < ARRAY_SIZE(ctx->supplies); ++i) ctx->supplies[i].supply = tc358764_supplies[i]; ret = devm_regulator_bulk_get(ctx->dev, ARRAY_SIZE(ctx->supplies), ctx->supplies); if (ret < 0) dev_err(ctx->dev, "failed to get regulators: %d\n", ret); return ret; } static int tc358764_probe(struct mipi_dsi_device *dsi) { struct device *dev = &dsi->dev; struct tc358764 *ctx; int ret; ctx = devm_kzalloc(dev, sizeof(struct tc358764), GFP_KERNEL); if (!ctx) return -ENOMEM; mipi_dsi_set_drvdata(dsi, ctx); ctx->dev = dev; dsi->lanes = 4; dsi->format = MIPI_DSI_FMT_RGB888; dsi->mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_BURST | MIPI_DSI_MODE_VIDEO_AUTO_VERT | MIPI_DSI_MODE_LPM; ret = tc358764_parse_dt(ctx); if (ret < 0) return ret; ret = tc358764_configure_regulators(ctx); if (ret < 0) return ret; ctx->bridge.funcs = &tc358764_bridge_funcs; ctx->bridge.of_node = dev->of_node; ctx->bridge.pre_enable_prev_first = true; drm_bridge_add(&ctx->bridge); ret = mipi_dsi_attach(dsi); if (ret < 0) { drm_bridge_remove(&ctx->bridge); dev_err(dev, "failed to attach dsi\n"); } return ret; } static void tc358764_remove(struct mipi_dsi_device *dsi) { struct tc358764 *ctx = mipi_dsi_get_drvdata(dsi); mipi_dsi_detach(dsi); drm_bridge_remove(&ctx->bridge); } static const struct of_device_id tc358764_of_match[] = { { .compatible = "toshiba,tc358764" }, { } }; MODULE_DEVICE_TABLE(of, tc358764_of_match); static struct mipi_dsi_driver tc358764_driver = { .probe = tc358764_probe, .remove = tc358764_remove, .driver = { .name = "tc358764", .owner = THIS_MODULE, .of_match_table = tc358764_of_match, }, }; module_mipi_dsi_driver(tc358764_driver); MODULE_AUTHOR("Andrzej Hajda <[email protected]>"); MODULE_AUTHOR("Maciej Purski <[email protected]>"); MODULE_DESCRIPTION("MIPI-DSI based Driver for TC358764 DSI/LVDS Bridge"); MODULE_LICENSE("GPL v2");
linux-master
drivers/gpu/drm/bridge/tc358764.c
// SPDX-License-Identifier: GPL-2.0 /* * Copyright (c) 2018, The Linux Foundation. All rights reserved. * Copyright (c) 2019-2020. Linaro Limited. */ #include <linux/firmware.h> #include <linux/gpio/consumer.h> #include <linux/i2c.h> #include <linux/interrupt.h> #include <linux/module.h> #include <linux/mutex.h> #include <linux/of_graph.h> #include <linux/platform_device.h> #include <linux/regmap.h> #include <linux/regulator/consumer.h> #include <linux/wait.h> #include <linux/workqueue.h> #include <sound/hdmi-codec.h> #include <drm/drm_atomic_helper.h> #include <drm/drm_bridge.h> #include <drm/drm_mipi_dsi.h> #include <drm/drm_print.h> #include <drm/drm_probe_helper.h> #define EDID_BLOCK_SIZE 128 #define EDID_NUM_BLOCKS 2 #define FW_FILE "lt9611uxc_fw.bin" struct lt9611uxc { struct device *dev; struct drm_bridge bridge; struct drm_connector connector; struct regmap *regmap; /* Protects all accesses to registers by stopping the on-chip MCU */ struct mutex ocm_lock; struct wait_queue_head wq; struct work_struct work; struct device_node *dsi0_node; struct device_node *dsi1_node; struct mipi_dsi_device *dsi0; struct mipi_dsi_device *dsi1; struct platform_device *audio_pdev; struct gpio_desc *reset_gpio; struct gpio_desc *enable_gpio; struct regulator_bulk_data supplies[2]; struct i2c_client *client; bool hpd_supported; bool edid_read; /* can be accessed from different threads, so protect this with ocm_lock */ bool hdmi_connected; uint8_t fw_version; }; #define LT9611_PAGE_CONTROL 0xff static const struct regmap_range_cfg lt9611uxc_ranges[] = { { .name = "register_range", .range_min = 0, .range_max = 0xd0ff, .selector_reg = LT9611_PAGE_CONTROL, .selector_mask = 0xff, .selector_shift = 0, .window_start = 0, .window_len = 0x100, }, }; static const struct regmap_config lt9611uxc_regmap_config = { .reg_bits = 8, .val_bits = 8, .max_register = 0xffff, .ranges = lt9611uxc_ranges, .num_ranges = ARRAY_SIZE(lt9611uxc_ranges), }; struct lt9611uxc_mode { u16 hdisplay; u16 vdisplay; u8 vrefresh; }; /* * This chip supports only a fixed set of modes. * Enumerate them here to check whether the mode is supported. */ static struct lt9611uxc_mode lt9611uxc_modes[] = { { 1920, 1080, 60 }, { 1920, 1080, 30 }, { 1920, 1080, 25 }, { 1366, 768, 60 }, { 1360, 768, 60 }, { 1280, 1024, 60 }, { 1280, 800, 60 }, { 1280, 720, 60 }, { 1280, 720, 50 }, { 1280, 720, 30 }, { 1152, 864, 60 }, { 1024, 768, 60 }, { 800, 600, 60 }, { 720, 576, 50 }, { 720, 480, 60 }, { 640, 480, 60 }, }; static struct lt9611uxc *bridge_to_lt9611uxc(struct drm_bridge *bridge) { return container_of(bridge, struct lt9611uxc, bridge); } static struct lt9611uxc *connector_to_lt9611uxc(struct drm_connector *connector) { return container_of(connector, struct lt9611uxc, connector); } static void lt9611uxc_lock(struct lt9611uxc *lt9611uxc) { mutex_lock(&lt9611uxc->ocm_lock); regmap_write(lt9611uxc->regmap, 0x80ee, 0x01); } static void lt9611uxc_unlock(struct lt9611uxc *lt9611uxc) { regmap_write(lt9611uxc->regmap, 0x80ee, 0x00); msleep(50); mutex_unlock(&lt9611uxc->ocm_lock); } static irqreturn_t lt9611uxc_irq_thread_handler(int irq, void *dev_id) { struct lt9611uxc *lt9611uxc = dev_id; unsigned int irq_status = 0; unsigned int hpd_status = 0; lt9611uxc_lock(lt9611uxc); regmap_read(lt9611uxc->regmap, 0xb022, &irq_status); regmap_read(lt9611uxc->regmap, 0xb023, &hpd_status); if (irq_status) regmap_write(lt9611uxc->regmap, 0xb022, 0); if (irq_status & BIT(0)) { lt9611uxc->edid_read = !!(hpd_status & BIT(0)); wake_up_all(&lt9611uxc->wq); } if (irq_status & BIT(1)) { lt9611uxc->hdmi_connected = hpd_status & BIT(1); schedule_work(&lt9611uxc->work); } lt9611uxc_unlock(lt9611uxc); return IRQ_HANDLED; } static void lt9611uxc_hpd_work(struct work_struct *work) { struct lt9611uxc *lt9611uxc = container_of(work, struct lt9611uxc, work); bool connected; if (lt9611uxc->connector.dev) { if (lt9611uxc->connector.dev->mode_config.funcs) drm_kms_helper_hotplug_event(lt9611uxc->connector.dev); } else { mutex_lock(&lt9611uxc->ocm_lock); connected = lt9611uxc->hdmi_connected; mutex_unlock(&lt9611uxc->ocm_lock); drm_bridge_hpd_notify(&lt9611uxc->bridge, connected ? connector_status_connected : connector_status_disconnected); } } static void lt9611uxc_reset(struct lt9611uxc *lt9611uxc) { gpiod_set_value_cansleep(lt9611uxc->reset_gpio, 1); msleep(20); gpiod_set_value_cansleep(lt9611uxc->reset_gpio, 0); msleep(20); gpiod_set_value_cansleep(lt9611uxc->reset_gpio, 1); msleep(300); } static void lt9611uxc_assert_5v(struct lt9611uxc *lt9611uxc) { if (!lt9611uxc->enable_gpio) return; gpiod_set_value_cansleep(lt9611uxc->enable_gpio, 1); msleep(20); } static int lt9611uxc_regulator_init(struct lt9611uxc *lt9611uxc) { int ret; lt9611uxc->supplies[0].supply = "vdd"; lt9611uxc->supplies[1].supply = "vcc"; ret = devm_regulator_bulk_get(lt9611uxc->dev, 2, lt9611uxc->supplies); if (ret < 0) return ret; return regulator_set_load(lt9611uxc->supplies[0].consumer, 200000); } static int lt9611uxc_regulator_enable(struct lt9611uxc *lt9611uxc) { int ret; ret = regulator_enable(lt9611uxc->supplies[0].consumer); if (ret < 0) return ret; usleep_range(1000, 10000); /* 50000 according to dtsi */ ret = regulator_enable(lt9611uxc->supplies[1].consumer); if (ret < 0) { regulator_disable(lt9611uxc->supplies[0].consumer); return ret; } return 0; } static struct lt9611uxc_mode *lt9611uxc_find_mode(const struct drm_display_mode *mode) { int i; for (i = 0; i < ARRAY_SIZE(lt9611uxc_modes); i++) { if (lt9611uxc_modes[i].hdisplay == mode->hdisplay && lt9611uxc_modes[i].vdisplay == mode->vdisplay && lt9611uxc_modes[i].vrefresh == drm_mode_vrefresh(mode)) { return &lt9611uxc_modes[i]; } } return NULL; } static struct mipi_dsi_device *lt9611uxc_attach_dsi(struct lt9611uxc *lt9611uxc, struct device_node *dsi_node) { const struct mipi_dsi_device_info info = { "lt9611uxc", 0, NULL }; struct mipi_dsi_device *dsi; struct mipi_dsi_host *host; struct device *dev = lt9611uxc->dev; int ret; host = of_find_mipi_dsi_host_by_node(dsi_node); if (!host) { dev_err(dev, "failed to find dsi host\n"); return ERR_PTR(-EPROBE_DEFER); } dsi = devm_mipi_dsi_device_register_full(dev, host, &info); if (IS_ERR(dsi)) { dev_err(dev, "failed to create dsi device\n"); return dsi; } dsi->lanes = 4; dsi->format = MIPI_DSI_FMT_RGB888; dsi->mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_SYNC_PULSE | MIPI_DSI_MODE_VIDEO_HSE; ret = devm_mipi_dsi_attach(dev, dsi); if (ret < 0) { dev_err(dev, "failed to attach dsi to host\n"); return ERR_PTR(ret); } return dsi; } static int lt9611uxc_connector_get_modes(struct drm_connector *connector) { struct lt9611uxc *lt9611uxc = connector_to_lt9611uxc(connector); unsigned int count; struct edid *edid; edid = lt9611uxc->bridge.funcs->get_edid(&lt9611uxc->bridge, connector); drm_connector_update_edid_property(connector, edid); count = drm_add_edid_modes(connector, edid); kfree(edid); return count; } static enum drm_connector_status lt9611uxc_connector_detect(struct drm_connector *connector, bool force) { struct lt9611uxc *lt9611uxc = connector_to_lt9611uxc(connector); return lt9611uxc->bridge.funcs->detect(&lt9611uxc->bridge); } static enum drm_mode_status lt9611uxc_connector_mode_valid(struct drm_connector *connector, struct drm_display_mode *mode) { struct lt9611uxc_mode *lt9611uxc_mode = lt9611uxc_find_mode(mode); return lt9611uxc_mode ? MODE_OK : MODE_BAD; } static const struct drm_connector_helper_funcs lt9611uxc_bridge_connector_helper_funcs = { .get_modes = lt9611uxc_connector_get_modes, .mode_valid = lt9611uxc_connector_mode_valid, }; static const struct drm_connector_funcs lt9611uxc_bridge_connector_funcs = { .fill_modes = drm_helper_probe_single_connector_modes, .detect = lt9611uxc_connector_detect, .destroy = drm_connector_cleanup, .reset = drm_atomic_helper_connector_reset, .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state, .atomic_destroy_state = drm_atomic_helper_connector_destroy_state, }; static int lt9611uxc_connector_init(struct drm_bridge *bridge, struct lt9611uxc *lt9611uxc) { int ret; if (!bridge->encoder) { DRM_ERROR("Parent encoder object not found"); return -ENODEV; } lt9611uxc->connector.polled = DRM_CONNECTOR_POLL_HPD; drm_connector_helper_add(&lt9611uxc->connector, &lt9611uxc_bridge_connector_helper_funcs); ret = drm_connector_init(bridge->dev, &lt9611uxc->connector, &lt9611uxc_bridge_connector_funcs, DRM_MODE_CONNECTOR_HDMIA); if (ret) { DRM_ERROR("Failed to initialize connector with drm\n"); return ret; } return drm_connector_attach_encoder(&lt9611uxc->connector, bridge->encoder); } static int lt9611uxc_bridge_attach(struct drm_bridge *bridge, enum drm_bridge_attach_flags flags) { struct lt9611uxc *lt9611uxc = bridge_to_lt9611uxc(bridge); int ret; if (!(flags & DRM_BRIDGE_ATTACH_NO_CONNECTOR)) { ret = lt9611uxc_connector_init(bridge, lt9611uxc); if (ret < 0) return ret; } return 0; } static enum drm_mode_status lt9611uxc_bridge_mode_valid(struct drm_bridge *bridge, const struct drm_display_info *info, const struct drm_display_mode *mode) { struct lt9611uxc_mode *lt9611uxc_mode; lt9611uxc_mode = lt9611uxc_find_mode(mode); return lt9611uxc_mode ? MODE_OK : MODE_BAD; } static void lt9611uxc_video_setup(struct lt9611uxc *lt9611uxc, const struct drm_display_mode *mode) { u32 h_total, hactive, hsync_len, hfront_porch; u32 v_total, vactive, vsync_len, vfront_porch; h_total = mode->htotal; v_total = mode->vtotal; hactive = mode->hdisplay; hsync_len = mode->hsync_end - mode->hsync_start; hfront_porch = mode->hsync_start - mode->hdisplay; vactive = mode->vdisplay; vsync_len = mode->vsync_end - mode->vsync_start; vfront_porch = mode->vsync_start - mode->vdisplay; regmap_write(lt9611uxc->regmap, 0xd00d, (u8)(v_total / 256)); regmap_write(lt9611uxc->regmap, 0xd00e, (u8)(v_total % 256)); regmap_write(lt9611uxc->regmap, 0xd00f, (u8)(vactive / 256)); regmap_write(lt9611uxc->regmap, 0xd010, (u8)(vactive % 256)); regmap_write(lt9611uxc->regmap, 0xd011, (u8)(h_total / 256)); regmap_write(lt9611uxc->regmap, 0xd012, (u8)(h_total % 256)); regmap_write(lt9611uxc->regmap, 0xd013, (u8)(hactive / 256)); regmap_write(lt9611uxc->regmap, 0xd014, (u8)(hactive % 256)); regmap_write(lt9611uxc->regmap, 0xd015, (u8)(vsync_len % 256)); regmap_update_bits(lt9611uxc->regmap, 0xd016, 0xf, (u8)(hsync_len / 256)); regmap_write(lt9611uxc->regmap, 0xd017, (u8)(hsync_len % 256)); regmap_update_bits(lt9611uxc->regmap, 0xd018, 0xf, (u8)(vfront_porch / 256)); regmap_write(lt9611uxc->regmap, 0xd019, (u8)(vfront_porch % 256)); regmap_update_bits(lt9611uxc->regmap, 0xd01a, 0xf, (u8)(hfront_porch / 256)); regmap_write(lt9611uxc->regmap, 0xd01b, (u8)(hfront_porch % 256)); } static void lt9611uxc_bridge_mode_set(struct drm_bridge *bridge, const struct drm_display_mode *mode, const struct drm_display_mode *adj_mode) { struct lt9611uxc *lt9611uxc = bridge_to_lt9611uxc(bridge); lt9611uxc_lock(lt9611uxc); lt9611uxc_video_setup(lt9611uxc, mode); lt9611uxc_unlock(lt9611uxc); } static enum drm_connector_status lt9611uxc_bridge_detect(struct drm_bridge *bridge) { struct lt9611uxc *lt9611uxc = bridge_to_lt9611uxc(bridge); unsigned int reg_val = 0; int ret; bool connected = true; lt9611uxc_lock(lt9611uxc); if (lt9611uxc->hpd_supported) { ret = regmap_read(lt9611uxc->regmap, 0xb023, &reg_val); if (ret) dev_err(lt9611uxc->dev, "failed to read hpd status: %d\n", ret); else connected = reg_val & BIT(1); } lt9611uxc->hdmi_connected = connected; lt9611uxc_unlock(lt9611uxc); return connected ? connector_status_connected : connector_status_disconnected; } static int lt9611uxc_wait_for_edid(struct lt9611uxc *lt9611uxc) { return wait_event_interruptible_timeout(lt9611uxc->wq, lt9611uxc->edid_read, msecs_to_jiffies(500)); } static int lt9611uxc_get_edid_block(void *data, u8 *buf, unsigned int block, size_t len) { struct lt9611uxc *lt9611uxc = data; int ret; if (len > EDID_BLOCK_SIZE) return -EINVAL; if (block >= EDID_NUM_BLOCKS) return -EINVAL; lt9611uxc_lock(lt9611uxc); regmap_write(lt9611uxc->regmap, 0xb00b, 0x10); regmap_write(lt9611uxc->regmap, 0xb00a, block * EDID_BLOCK_SIZE); ret = regmap_noinc_read(lt9611uxc->regmap, 0xb0b0, buf, len); if (ret) dev_err(lt9611uxc->dev, "edid read failed: %d\n", ret); lt9611uxc_unlock(lt9611uxc); return 0; }; static struct edid *lt9611uxc_bridge_get_edid(struct drm_bridge *bridge, struct drm_connector *connector) { struct lt9611uxc *lt9611uxc = bridge_to_lt9611uxc(bridge); int ret; ret = lt9611uxc_wait_for_edid(lt9611uxc); if (ret < 0) { dev_err(lt9611uxc->dev, "wait for EDID failed: %d\n", ret); return NULL; } else if (ret == 0) { dev_err(lt9611uxc->dev, "wait for EDID timeout\n"); return NULL; } return drm_do_get_edid(connector, lt9611uxc_get_edid_block, lt9611uxc); } static const struct drm_bridge_funcs lt9611uxc_bridge_funcs = { .attach = lt9611uxc_bridge_attach, .mode_valid = lt9611uxc_bridge_mode_valid, .mode_set = lt9611uxc_bridge_mode_set, .detect = lt9611uxc_bridge_detect, .get_edid = lt9611uxc_bridge_get_edid, }; static int lt9611uxc_parse_dt(struct device *dev, struct lt9611uxc *lt9611uxc) { lt9611uxc->dsi0_node = of_graph_get_remote_node(dev->of_node, 0, -1); if (!lt9611uxc->dsi0_node) { dev_err(lt9611uxc->dev, "failed to get remote node for primary dsi\n"); return -ENODEV; } lt9611uxc->dsi1_node = of_graph_get_remote_node(dev->of_node, 1, -1); return 0; } static int lt9611uxc_gpio_init(struct lt9611uxc *lt9611uxc) { struct device *dev = lt9611uxc->dev; lt9611uxc->reset_gpio = devm_gpiod_get(dev, "reset", GPIOD_OUT_HIGH); if (IS_ERR(lt9611uxc->reset_gpio)) { dev_err(dev, "failed to acquire reset gpio\n"); return PTR_ERR(lt9611uxc->reset_gpio); } lt9611uxc->enable_gpio = devm_gpiod_get_optional(dev, "enable", GPIOD_OUT_LOW); if (IS_ERR(lt9611uxc->enable_gpio)) { dev_err(dev, "failed to acquire enable gpio\n"); return PTR_ERR(lt9611uxc->enable_gpio); } return 0; } static int lt9611uxc_read_device_rev(struct lt9611uxc *lt9611uxc) { unsigned int rev0, rev1, rev2; int ret; lt9611uxc_lock(lt9611uxc); ret = regmap_read(lt9611uxc->regmap, 0x8100, &rev0); ret |= regmap_read(lt9611uxc->regmap, 0x8101, &rev1); ret |= regmap_read(lt9611uxc->regmap, 0x8102, &rev2); if (ret) dev_err(lt9611uxc->dev, "failed to read revision: %d\n", ret); else dev_info(lt9611uxc->dev, "LT9611 revision: 0x%02x.%02x.%02x\n", rev0, rev1, rev2); lt9611uxc_unlock(lt9611uxc); return ret; } static int lt9611uxc_read_version(struct lt9611uxc *lt9611uxc) { unsigned int rev; int ret; lt9611uxc_lock(lt9611uxc); ret = regmap_read(lt9611uxc->regmap, 0xb021, &rev); if (ret) dev_err(lt9611uxc->dev, "failed to read revision: %d\n", ret); else dev_info(lt9611uxc->dev, "LT9611 version: 0x%02x\n", rev); lt9611uxc_unlock(lt9611uxc); return ret < 0 ? ret : rev; } static int lt9611uxc_hdmi_hw_params(struct device *dev, void *data, struct hdmi_codec_daifmt *fmt, struct hdmi_codec_params *hparms) { /* * LT9611UXC will automatically detect rate and sample size, so no need * to setup anything here. */ return 0; } static void lt9611uxc_audio_shutdown(struct device *dev, void *data) { } static int lt9611uxc_hdmi_i2s_get_dai_id(struct snd_soc_component *component, struct device_node *endpoint) { struct of_endpoint of_ep; int ret; ret = of_graph_parse_endpoint(endpoint, &of_ep); if (ret < 0) return ret; /* * HDMI sound should be located as reg = <2> * Then, it is sound port 0 */ if (of_ep.port == 2) return 0; return -EINVAL; } static const struct hdmi_codec_ops lt9611uxc_codec_ops = { .hw_params = lt9611uxc_hdmi_hw_params, .audio_shutdown = lt9611uxc_audio_shutdown, .get_dai_id = lt9611uxc_hdmi_i2s_get_dai_id, }; static int lt9611uxc_audio_init(struct device *dev, struct lt9611uxc *lt9611uxc) { struct hdmi_codec_pdata codec_data = { .ops = &lt9611uxc_codec_ops, .max_i2s_channels = 2, .i2s = 1, .data = lt9611uxc, }; lt9611uxc->audio_pdev = platform_device_register_data(dev, HDMI_CODEC_DRV_NAME, PLATFORM_DEVID_AUTO, &codec_data, sizeof(codec_data)); return PTR_ERR_OR_ZERO(lt9611uxc->audio_pdev); } static void lt9611uxc_audio_exit(struct lt9611uxc *lt9611uxc) { if (lt9611uxc->audio_pdev) { platform_device_unregister(lt9611uxc->audio_pdev); lt9611uxc->audio_pdev = NULL; } } #define LT9611UXC_FW_PAGE_SIZE 32 static void lt9611uxc_firmware_write_page(struct lt9611uxc *lt9611uxc, u16 addr, const u8 *buf) { struct reg_sequence seq_write_prepare[] = { REG_SEQ0(0x805a, 0x04), REG_SEQ0(0x805a, 0x00), REG_SEQ0(0x805e, 0xdf), REG_SEQ0(0x805a, 0x20), REG_SEQ0(0x805a, 0x00), REG_SEQ0(0x8058, 0x21), }; struct reg_sequence seq_write_addr[] = { REG_SEQ0(0x805b, (addr >> 16) & 0xff), REG_SEQ0(0x805c, (addr >> 8) & 0xff), REG_SEQ0(0x805d, addr & 0xff), REG_SEQ0(0x805a, 0x10), REG_SEQ0(0x805a, 0x00), }; regmap_write(lt9611uxc->regmap, 0x8108, 0xbf); msleep(20); regmap_write(lt9611uxc->regmap, 0x8108, 0xff); msleep(20); regmap_multi_reg_write(lt9611uxc->regmap, seq_write_prepare, ARRAY_SIZE(seq_write_prepare)); regmap_noinc_write(lt9611uxc->regmap, 0x8059, buf, LT9611UXC_FW_PAGE_SIZE); regmap_multi_reg_write(lt9611uxc->regmap, seq_write_addr, ARRAY_SIZE(seq_write_addr)); msleep(20); } static void lt9611uxc_firmware_read_page(struct lt9611uxc *lt9611uxc, u16 addr, char *buf) { struct reg_sequence seq_read_page[] = { REG_SEQ0(0x805a, 0xa0), REG_SEQ0(0x805a, 0x80), REG_SEQ0(0x805b, (addr >> 16) & 0xff), REG_SEQ0(0x805c, (addr >> 8) & 0xff), REG_SEQ0(0x805d, addr & 0xff), REG_SEQ0(0x805a, 0x90), REG_SEQ0(0x805a, 0x80), REG_SEQ0(0x8058, 0x21), }; regmap_multi_reg_write(lt9611uxc->regmap, seq_read_page, ARRAY_SIZE(seq_read_page)); regmap_noinc_read(lt9611uxc->regmap, 0x805f, buf, LT9611UXC_FW_PAGE_SIZE); } static char *lt9611uxc_firmware_read(struct lt9611uxc *lt9611uxc, size_t size) { struct reg_sequence seq_read_setup[] = { REG_SEQ0(0x805a, 0x84), REG_SEQ0(0x805a, 0x80), }; char *readbuf; u16 offset; readbuf = kzalloc(ALIGN(size, 32), GFP_KERNEL); if (!readbuf) return NULL; regmap_multi_reg_write(lt9611uxc->regmap, seq_read_setup, ARRAY_SIZE(seq_read_setup)); for (offset = 0; offset < size; offset += LT9611UXC_FW_PAGE_SIZE) lt9611uxc_firmware_read_page(lt9611uxc, offset, &readbuf[offset]); return readbuf; } static int lt9611uxc_firmware_update(struct lt9611uxc *lt9611uxc) { int ret; u16 offset; size_t remain; char *readbuf; const struct firmware *fw; struct reg_sequence seq_setup[] = { REG_SEQ0(0x805e, 0xdf), REG_SEQ0(0x8058, 0x00), REG_SEQ0(0x8059, 0x50), REG_SEQ0(0x805a, 0x10), REG_SEQ0(0x805a, 0x00), }; struct reg_sequence seq_block_erase[] = { REG_SEQ0(0x805a, 0x04), REG_SEQ0(0x805a, 0x00), REG_SEQ0(0x805b, 0x00), REG_SEQ0(0x805c, 0x00), REG_SEQ0(0x805d, 0x00), REG_SEQ0(0x805a, 0x01), REG_SEQ0(0x805a, 0x00), }; ret = request_firmware(&fw, FW_FILE, lt9611uxc->dev); if (ret < 0) return ret; dev_info(lt9611uxc->dev, "Updating firmware\n"); lt9611uxc_lock(lt9611uxc); regmap_multi_reg_write(lt9611uxc->regmap, seq_setup, ARRAY_SIZE(seq_setup)); /* * Need erase block 2 timess here. Sometimes, block erase can fail. * This is a workaroud. */ regmap_multi_reg_write(lt9611uxc->regmap, seq_block_erase, ARRAY_SIZE(seq_block_erase)); msleep(3000); regmap_multi_reg_write(lt9611uxc->regmap, seq_block_erase, ARRAY_SIZE(seq_block_erase)); msleep(3000); for (offset = 0, remain = fw->size; remain >= LT9611UXC_FW_PAGE_SIZE; offset += LT9611UXC_FW_PAGE_SIZE, remain -= LT9611UXC_FW_PAGE_SIZE) lt9611uxc_firmware_write_page(lt9611uxc, offset, fw->data + offset); if (remain > 0) { char buf[LT9611UXC_FW_PAGE_SIZE]; memset(buf, 0xff, LT9611UXC_FW_PAGE_SIZE); memcpy(buf, fw->data + offset, remain); lt9611uxc_firmware_write_page(lt9611uxc, offset, buf); } msleep(20); readbuf = lt9611uxc_firmware_read(lt9611uxc, fw->size); if (!readbuf) { ret = -ENOMEM; goto out; } if (!memcmp(readbuf, fw->data, fw->size)) { dev_err(lt9611uxc->dev, "Firmware update failed\n"); print_hex_dump(KERN_ERR, "fw: ", DUMP_PREFIX_OFFSET, 16, 1, readbuf, fw->size, false); ret = -EINVAL; } else { dev_info(lt9611uxc->dev, "Firmware updates successfully\n"); ret = 0; } kfree(readbuf); out: lt9611uxc_unlock(lt9611uxc); lt9611uxc_reset(lt9611uxc); release_firmware(fw); return ret; } static ssize_t lt9611uxc_firmware_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t len) { struct lt9611uxc *lt9611uxc = dev_get_drvdata(dev); int ret; ret = lt9611uxc_firmware_update(lt9611uxc); if (ret < 0) return ret; return len; } static ssize_t lt9611uxc_firmware_show(struct device *dev, struct device_attribute *attr, char *buf) { struct lt9611uxc *lt9611uxc = dev_get_drvdata(dev); return sysfs_emit(buf, "%02x\n", lt9611uxc->fw_version); } static DEVICE_ATTR_RW(lt9611uxc_firmware); static struct attribute *lt9611uxc_attrs[] = { &dev_attr_lt9611uxc_firmware.attr, NULL, }; static const struct attribute_group lt9611uxc_attr_group = { .attrs = lt9611uxc_attrs, }; static const struct attribute_group *lt9611uxc_attr_groups[] = { &lt9611uxc_attr_group, NULL, }; static int lt9611uxc_probe(struct i2c_client *client) { struct lt9611uxc *lt9611uxc; struct device *dev = &client->dev; int ret; bool fw_updated = false; if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) { dev_err(dev, "device doesn't support I2C\n"); return -ENODEV; } lt9611uxc = devm_kzalloc(dev, sizeof(*lt9611uxc), GFP_KERNEL); if (!lt9611uxc) return -ENOMEM; lt9611uxc->dev = dev; lt9611uxc->client = client; mutex_init(&lt9611uxc->ocm_lock); lt9611uxc->regmap = devm_regmap_init_i2c(client, &lt9611uxc_regmap_config); if (IS_ERR(lt9611uxc->regmap)) { dev_err(lt9611uxc->dev, "regmap i2c init failed\n"); return PTR_ERR(lt9611uxc->regmap); } ret = lt9611uxc_parse_dt(dev, lt9611uxc); if (ret) { dev_err(dev, "failed to parse device tree\n"); return ret; } ret = lt9611uxc_gpio_init(lt9611uxc); if (ret < 0) goto err_of_put; ret = lt9611uxc_regulator_init(lt9611uxc); if (ret < 0) goto err_of_put; lt9611uxc_assert_5v(lt9611uxc); ret = lt9611uxc_regulator_enable(lt9611uxc); if (ret) goto err_of_put; lt9611uxc_reset(lt9611uxc); ret = lt9611uxc_read_device_rev(lt9611uxc); if (ret) { dev_err(dev, "failed to read chip rev\n"); goto err_disable_regulators; } retry: ret = lt9611uxc_read_version(lt9611uxc); if (ret < 0) { dev_err(dev, "failed to read FW version\n"); goto err_disable_regulators; } else if (ret == 0) { if (!fw_updated) { fw_updated = true; dev_err(dev, "FW version 0, enforcing firmware update\n"); ret = lt9611uxc_firmware_update(lt9611uxc); if (ret < 0) goto err_disable_regulators; else goto retry; } else { dev_err(dev, "FW version 0, update failed\n"); ret = -EOPNOTSUPP; goto err_disable_regulators; } } else if (ret < 0x40) { dev_info(dev, "FW version 0x%x, HPD not supported\n", ret); } else { lt9611uxc->hpd_supported = true; } lt9611uxc->fw_version = ret; init_waitqueue_head(&lt9611uxc->wq); INIT_WORK(&lt9611uxc->work, lt9611uxc_hpd_work); ret = devm_request_threaded_irq(dev, client->irq, NULL, lt9611uxc_irq_thread_handler, IRQF_ONESHOT, "lt9611uxc", lt9611uxc); if (ret) { dev_err(dev, "failed to request irq\n"); goto err_disable_regulators; } i2c_set_clientdata(client, lt9611uxc); lt9611uxc->bridge.funcs = &lt9611uxc_bridge_funcs; lt9611uxc->bridge.of_node = client->dev.of_node; lt9611uxc->bridge.ops = DRM_BRIDGE_OP_DETECT | DRM_BRIDGE_OP_EDID; if (lt9611uxc->hpd_supported) lt9611uxc->bridge.ops |= DRM_BRIDGE_OP_HPD; lt9611uxc->bridge.type = DRM_MODE_CONNECTOR_HDMIA; drm_bridge_add(&lt9611uxc->bridge); /* Attach primary DSI */ lt9611uxc->dsi0 = lt9611uxc_attach_dsi(lt9611uxc, lt9611uxc->dsi0_node); if (IS_ERR(lt9611uxc->dsi0)) { ret = PTR_ERR(lt9611uxc->dsi0); goto err_remove_bridge; } /* Attach secondary DSI, if specified */ if (lt9611uxc->dsi1_node) { lt9611uxc->dsi1 = lt9611uxc_attach_dsi(lt9611uxc, lt9611uxc->dsi1_node); if (IS_ERR(lt9611uxc->dsi1)) { ret = PTR_ERR(lt9611uxc->dsi1); goto err_remove_bridge; } } return lt9611uxc_audio_init(dev, lt9611uxc); err_remove_bridge: drm_bridge_remove(&lt9611uxc->bridge); err_disable_regulators: regulator_bulk_disable(ARRAY_SIZE(lt9611uxc->supplies), lt9611uxc->supplies); err_of_put: of_node_put(lt9611uxc->dsi1_node); of_node_put(lt9611uxc->dsi0_node); return ret; } static void lt9611uxc_remove(struct i2c_client *client) { struct lt9611uxc *lt9611uxc = i2c_get_clientdata(client); disable_irq(client->irq); cancel_work_sync(&lt9611uxc->work); lt9611uxc_audio_exit(lt9611uxc); drm_bridge_remove(&lt9611uxc->bridge); mutex_destroy(&lt9611uxc->ocm_lock); regulator_bulk_disable(ARRAY_SIZE(lt9611uxc->supplies), lt9611uxc->supplies); of_node_put(lt9611uxc->dsi1_node); of_node_put(lt9611uxc->dsi0_node); } static struct i2c_device_id lt9611uxc_id[] = { { "lontium,lt9611uxc", 0 }, { /* sentinel */ } }; static const struct of_device_id lt9611uxc_match_table[] = { { .compatible = "lontium,lt9611uxc" }, { /* sentinel */ } }; MODULE_DEVICE_TABLE(of, lt9611uxc_match_table); static struct i2c_driver lt9611uxc_driver = { .driver = { .name = "lt9611uxc", .of_match_table = lt9611uxc_match_table, .dev_groups = lt9611uxc_attr_groups, }, .probe = lt9611uxc_probe, .remove = lt9611uxc_remove, .id_table = lt9611uxc_id, }; module_i2c_driver(lt9611uxc_driver); MODULE_AUTHOR("Dmitry Baryshkov <[email protected]>"); MODULE_LICENSE("GPL v2"); MODULE_FIRMWARE(FW_FILE);
linux-master
drivers/gpu/drm/bridge/lontium-lt9611uxc.c
// SPDX-License-Identifier: GPL-2.0 /* * THC63LVD1024 LVDS to parallel data DRM bridge driver. * * Copyright (C) 2018 Jacopo Mondi <[email protected]> */ #include <linux/gpio/consumer.h> #include <linux/module.h> #include <linux/of.h> #include <linux/of_graph.h> #include <linux/platform_device.h> #include <linux/regulator/consumer.h> #include <linux/slab.h> #include <drm/drm_bridge.h> #include <drm/drm_panel.h> enum thc63_ports { THC63_LVDS_IN0, THC63_LVDS_IN1, THC63_RGB_OUT0, THC63_RGB_OUT1, }; struct thc63_dev { struct device *dev; struct regulator *vcc; struct gpio_desc *pdwn; struct gpio_desc *oe; struct drm_bridge bridge; struct drm_bridge *next; struct drm_bridge_timings timings; }; static inline struct thc63_dev *to_thc63(struct drm_bridge *bridge) { return container_of(bridge, struct thc63_dev, bridge); } static int thc63_attach(struct drm_bridge *bridge, enum drm_bridge_attach_flags flags) { struct thc63_dev *thc63 = to_thc63(bridge); return drm_bridge_attach(bridge->encoder, thc63->next, bridge, flags); } static enum drm_mode_status thc63_mode_valid(struct drm_bridge *bridge, const struct drm_display_info *info, const struct drm_display_mode *mode) { struct thc63_dev *thc63 = to_thc63(bridge); unsigned int min_freq; unsigned int max_freq; /* * The THC63LVD1024 pixel rate range is 8 to 135 MHz in all modes but * dual-in, single-out where it is 40 to 150 MHz. As dual-in, dual-out * isn't supported by the driver yet, simply derive the limits from the * input mode. */ if (thc63->timings.dual_link) { min_freq = 40000; max_freq = 150000; } else { min_freq = 8000; max_freq = 135000; } if (mode->clock < min_freq) return MODE_CLOCK_LOW; if (mode->clock > max_freq) return MODE_CLOCK_HIGH; return MODE_OK; } static void thc63_enable(struct drm_bridge *bridge) { struct thc63_dev *thc63 = to_thc63(bridge); int ret; ret = regulator_enable(thc63->vcc); if (ret) { dev_err(thc63->dev, "Failed to enable regulator \"vcc\": %d\n", ret); return; } gpiod_set_value(thc63->pdwn, 0); gpiod_set_value(thc63->oe, 1); } static void thc63_disable(struct drm_bridge *bridge) { struct thc63_dev *thc63 = to_thc63(bridge); int ret; gpiod_set_value(thc63->oe, 0); gpiod_set_value(thc63->pdwn, 1); ret = regulator_disable(thc63->vcc); if (ret) dev_err(thc63->dev, "Failed to disable regulator \"vcc\": %d\n", ret); } static const struct drm_bridge_funcs thc63_bridge_func = { .attach = thc63_attach, .mode_valid = thc63_mode_valid, .enable = thc63_enable, .disable = thc63_disable, }; static int thc63_parse_dt(struct thc63_dev *thc63) { struct device_node *endpoint; struct device_node *remote; endpoint = of_graph_get_endpoint_by_regs(thc63->dev->of_node, THC63_RGB_OUT0, -1); if (!endpoint) { dev_err(thc63->dev, "Missing endpoint in port@%u\n", THC63_RGB_OUT0); return -ENODEV; } remote = of_graph_get_remote_port_parent(endpoint); of_node_put(endpoint); if (!remote) { dev_err(thc63->dev, "Endpoint in port@%u unconnected\n", THC63_RGB_OUT0); return -ENODEV; } if (!of_device_is_available(remote)) { dev_err(thc63->dev, "port@%u remote endpoint is disabled\n", THC63_RGB_OUT0); of_node_put(remote); return -ENODEV; } thc63->next = of_drm_find_bridge(remote); of_node_put(remote); if (!thc63->next) return -EPROBE_DEFER; endpoint = of_graph_get_endpoint_by_regs(thc63->dev->of_node, THC63_LVDS_IN1, -1); if (endpoint) { remote = of_graph_get_remote_port_parent(endpoint); of_node_put(endpoint); if (remote) { if (of_device_is_available(remote)) thc63->timings.dual_link = true; of_node_put(remote); } } dev_dbg(thc63->dev, "operating in %s-link mode\n", thc63->timings.dual_link ? "dual" : "single"); return 0; } static int thc63_gpio_init(struct thc63_dev *thc63) { thc63->oe = devm_gpiod_get_optional(thc63->dev, "oe", GPIOD_OUT_LOW); if (IS_ERR(thc63->oe)) { dev_err(thc63->dev, "Unable to get \"oe-gpios\": %ld\n", PTR_ERR(thc63->oe)); return PTR_ERR(thc63->oe); } thc63->pdwn = devm_gpiod_get_optional(thc63->dev, "powerdown", GPIOD_OUT_HIGH); if (IS_ERR(thc63->pdwn)) { dev_err(thc63->dev, "Unable to get \"powerdown-gpios\": %ld\n", PTR_ERR(thc63->pdwn)); return PTR_ERR(thc63->pdwn); } return 0; } static int thc63_probe(struct platform_device *pdev) { struct thc63_dev *thc63; int ret; thc63 = devm_kzalloc(&pdev->dev, sizeof(*thc63), GFP_KERNEL); if (!thc63) return -ENOMEM; thc63->dev = &pdev->dev; platform_set_drvdata(pdev, thc63); thc63->vcc = devm_regulator_get(thc63->dev, "vcc"); if (IS_ERR(thc63->vcc)) { if (PTR_ERR(thc63->vcc) == -EPROBE_DEFER) return -EPROBE_DEFER; dev_err(thc63->dev, "Unable to get \"vcc\" supply: %ld\n", PTR_ERR(thc63->vcc)); return PTR_ERR(thc63->vcc); } ret = thc63_gpio_init(thc63); if (ret) return ret; ret = thc63_parse_dt(thc63); if (ret) return ret; thc63->bridge.driver_private = thc63; thc63->bridge.of_node = pdev->dev.of_node; thc63->bridge.funcs = &thc63_bridge_func; thc63->bridge.timings = &thc63->timings; drm_bridge_add(&thc63->bridge); return 0; } static void thc63_remove(struct platform_device *pdev) { struct thc63_dev *thc63 = platform_get_drvdata(pdev); drm_bridge_remove(&thc63->bridge); } static const struct of_device_id thc63_match[] = { { .compatible = "thine,thc63lvd1024", }, { }, }; MODULE_DEVICE_TABLE(of, thc63_match); static struct platform_driver thc63_driver = { .probe = thc63_probe, .remove_new = thc63_remove, .driver = { .name = "thc63lvd1024", .of_match_table = thc63_match, }, }; module_platform_driver(thc63_driver); MODULE_AUTHOR("Jacopo Mondi <[email protected]>"); MODULE_DESCRIPTION("Thine THC63LVD1024 LVDS decoder DRM bridge driver"); MODULE_LICENSE("GPL v2");
linux-master
drivers/gpu/drm/bridge/thc63lvd1024.c
// SPDX-License-Identifier: GPL-2.0-only /* * Parade PS8622 eDP/LVDS bridge driver * * Copyright (C) 2014 Google, Inc. */ #include <linux/backlight.h> #include <linux/delay.h> #include <linux/err.h> #include <linux/gpio/consumer.h> #include <linux/i2c.h> #include <linux/module.h> #include <linux/of.h> #include <linux/pm.h> #include <linux/regulator/consumer.h> #include <drm/drm_atomic_helper.h> #include <drm/drm_bridge.h> #include <drm/drm_crtc.h> #include <drm/drm_of.h> #include <drm/drm_panel.h> #include <drm/drm_print.h> #include <drm/drm_probe_helper.h> /* Brightness scale on the Parade chip */ #define PS8622_MAX_BRIGHTNESS 0xff /* Timings taken from the version 1.7 datasheet for the PS8622/PS8625 */ #define PS8622_POWER_RISE_T1_MIN_US 10 #define PS8622_POWER_RISE_T1_MAX_US 10000 #define PS8622_RST_HIGH_T2_MIN_US 3000 #define PS8622_RST_HIGH_T2_MAX_US 30000 #define PS8622_PWMO_END_T12_MS 200 #define PS8622_POWER_FALL_T16_MAX_US 10000 #define PS8622_POWER_OFF_T17_MS 500 #if ((PS8622_RST_HIGH_T2_MIN_US + PS8622_POWER_RISE_T1_MAX_US) > \ (PS8622_RST_HIGH_T2_MAX_US + PS8622_POWER_RISE_T1_MIN_US)) #error "T2.min + T1.max must be less than T2.max + T1.min" #endif struct ps8622_bridge { struct i2c_client *client; struct drm_bridge bridge; struct drm_bridge *panel_bridge; struct regulator *v12; struct backlight_device *bl; struct gpio_desc *gpio_slp; struct gpio_desc *gpio_rst; u32 max_lane_count; u32 lane_count; bool enabled; }; static inline struct ps8622_bridge * bridge_to_ps8622(struct drm_bridge *bridge) { return container_of(bridge, struct ps8622_bridge, bridge); } static int ps8622_set(struct i2c_client *client, u8 page, u8 reg, u8 val) { int ret; struct i2c_adapter *adap = client->adapter; struct i2c_msg msg; u8 data[] = {reg, val}; msg.addr = client->addr + page; msg.flags = 0; msg.len = sizeof(data); msg.buf = data; ret = i2c_transfer(adap, &msg, 1); if (ret != 1) pr_warn("PS8622 I2C write (0x%02x,0x%02x,0x%02x) failed: %d\n", client->addr + page, reg, val, ret); return !(ret == 1); } static int ps8622_send_config(struct ps8622_bridge *ps8622) { struct i2c_client *cl = ps8622->client; int err = 0; /* HPD low */ err = ps8622_set(cl, 0x02, 0xa1, 0x01); if (err) goto error; /* SW setting: [1:0] SW output 1.2V voltage is lower to 96% */ err = ps8622_set(cl, 0x04, 0x14, 0x01); if (err) goto error; /* RCO SS setting: [5:4] = b01 0.5%, b10 1%, b11 1.5% */ err = ps8622_set(cl, 0x04, 0xe3, 0x20); if (err) goto error; /* [7] RCO SS enable */ err = ps8622_set(cl, 0x04, 0xe2, 0x80); if (err) goto error; /* RPHY Setting * [3:2] CDR tune wait cycle before measure for fine tune * b00: 1us b01: 0.5us b10:2us, b11: 4us */ err = ps8622_set(cl, 0x04, 0x8a, 0x0c); if (err) goto error; /* [3] RFD always on */ err = ps8622_set(cl, 0x04, 0x89, 0x08); if (err) goto error; /* CTN lock in/out: 20000ppm/80000ppm. Lock out 2 times. */ err = ps8622_set(cl, 0x04, 0x71, 0x2d); if (err) goto error; /* 2.7G CDR settings: NOF=40LSB for HBR CDR setting */ err = ps8622_set(cl, 0x04, 0x7d, 0x07); if (err) goto error; /* [1:0] Fmin=+4bands */ err = ps8622_set(cl, 0x04, 0x7b, 0x00); if (err) goto error; /* [7:5] DCO_FTRNG=+-40% */ err = ps8622_set(cl, 0x04, 0x7a, 0xfd); if (err) goto error; /* 1.62G CDR settings: [5:2]NOF=64LSB [1:0]DCO scale is 2/5 */ err = ps8622_set(cl, 0x04, 0xc0, 0x12); if (err) goto error; /* Gitune=-37% */ err = ps8622_set(cl, 0x04, 0xc1, 0x92); if (err) goto error; /* Fbstep=100% */ err = ps8622_set(cl, 0x04, 0xc2, 0x1c); if (err) goto error; /* [7] LOS signal disable */ err = ps8622_set(cl, 0x04, 0x32, 0x80); if (err) goto error; /* RPIO Setting: [7:4] LVDS driver bias current : 75% (250mV swing) */ err = ps8622_set(cl, 0x04, 0x00, 0xb0); if (err) goto error; /* [7:6] Right-bar GPIO output strength is 8mA */ err = ps8622_set(cl, 0x04, 0x15, 0x40); if (err) goto error; /* EQ Training State Machine Setting, RCO calibration start */ err = ps8622_set(cl, 0x04, 0x54, 0x10); if (err) goto error; /* Logic, needs more than 10 I2C command */ /* [4:0] MAX_LANE_COUNT set to max supported lanes */ err = ps8622_set(cl, 0x01, 0x02, 0x80 | ps8622->max_lane_count); if (err) goto error; /* [4:0] LANE_COUNT_SET set to chosen lane count */ err = ps8622_set(cl, 0x01, 0x21, 0x80 | ps8622->lane_count); if (err) goto error; err = ps8622_set(cl, 0x00, 0x52, 0x20); if (err) goto error; /* HPD CP toggle enable */ err = ps8622_set(cl, 0x00, 0xf1, 0x03); if (err) goto error; err = ps8622_set(cl, 0x00, 0x62, 0x41); if (err) goto error; /* Counter number, add 1ms counter delay */ err = ps8622_set(cl, 0x00, 0xf6, 0x01); if (err) goto error; /* [6]PWM function control by DPCD0040f[7], default is PWM block */ err = ps8622_set(cl, 0x00, 0x77, 0x06); if (err) goto error; /* 04h Adjust VTotal toleranceto fix the 30Hz no display issue */ err = ps8622_set(cl, 0x00, 0x4c, 0x04); if (err) goto error; /* DPCD00400='h00, Parade OUI ='h001cf8 */ err = ps8622_set(cl, 0x01, 0xc0, 0x00); if (err) goto error; /* DPCD00401='h1c */ err = ps8622_set(cl, 0x01, 0xc1, 0x1c); if (err) goto error; /* DPCD00402='hf8 */ err = ps8622_set(cl, 0x01, 0xc2, 0xf8); if (err) goto error; /* DPCD403~408 = ASCII code, D2SLV5='h4432534c5635 */ err = ps8622_set(cl, 0x01, 0xc3, 0x44); if (err) goto error; /* DPCD404 */ err = ps8622_set(cl, 0x01, 0xc4, 0x32); if (err) goto error; /* DPCD405 */ err = ps8622_set(cl, 0x01, 0xc5, 0x53); if (err) goto error; /* DPCD406 */ err = ps8622_set(cl, 0x01, 0xc6, 0x4c); if (err) goto error; /* DPCD407 */ err = ps8622_set(cl, 0x01, 0xc7, 0x56); if (err) goto error; /* DPCD408 */ err = ps8622_set(cl, 0x01, 0xc8, 0x35); if (err) goto error; /* DPCD40A, Initial Code major revision '01' */ err = ps8622_set(cl, 0x01, 0xca, 0x01); if (err) goto error; /* DPCD40B, Initial Code minor revision '05' */ err = ps8622_set(cl, 0x01, 0xcb, 0x05); if (err) goto error; if (ps8622->bl) { /* DPCD720, internal PWM */ err = ps8622_set(cl, 0x01, 0xa5, 0xa0); if (err) goto error; /* FFh for 100% brightness, 0h for 0% brightness */ err = ps8622_set(cl, 0x01, 0xa7, ps8622->bl->props.brightness); if (err) goto error; } else { /* DPCD720, external PWM */ err = ps8622_set(cl, 0x01, 0xa5, 0x80); if (err) goto error; } /* Set LVDS output as 6bit-VESA mapping, single LVDS channel */ err = ps8622_set(cl, 0x01, 0xcc, 0x13); if (err) goto error; /* Enable SSC set by register */ err = ps8622_set(cl, 0x02, 0xb1, 0x20); if (err) goto error; /* Set SSC enabled and +/-1% central spreading */ err = ps8622_set(cl, 0x04, 0x10, 0x16); if (err) goto error; /* Logic end */ /* MPU Clock source: LC => RCO */ err = ps8622_set(cl, 0x04, 0x59, 0x60); if (err) goto error; /* LC -> RCO */ err = ps8622_set(cl, 0x04, 0x54, 0x14); if (err) goto error; /* HPD high */ err = ps8622_set(cl, 0x02, 0xa1, 0x91); error: return err ? -EIO : 0; } static int ps8622_backlight_update(struct backlight_device *bl) { struct ps8622_bridge *ps8622 = dev_get_drvdata(&bl->dev); int ret, brightness = backlight_get_brightness(bl); if (!ps8622->enabled) return -EINVAL; ret = ps8622_set(ps8622->client, 0x01, 0xa7, brightness); return ret; } static const struct backlight_ops ps8622_backlight_ops = { .update_status = ps8622_backlight_update, }; static void ps8622_pre_enable(struct drm_bridge *bridge) { struct ps8622_bridge *ps8622 = bridge_to_ps8622(bridge); int ret; if (ps8622->enabled) return; gpiod_set_value(ps8622->gpio_rst, 0); if (ps8622->v12) { ret = regulator_enable(ps8622->v12); if (ret) DRM_ERROR("fails to enable ps8622->v12"); } gpiod_set_value(ps8622->gpio_slp, 1); /* * T1 is the range of time that it takes for the power to rise after we * enable the lcd/ps8622 fet. T2 is the range of time in which the * data sheet specifies we should deassert the reset pin. * * If it takes T1.max for the power to rise, we need to wait atleast * T2.min before deasserting the reset pin. If it takes T1.min for the * power to rise, we need to wait at most T2.max before deasserting the * reset pin. */ usleep_range(PS8622_RST_HIGH_T2_MIN_US + PS8622_POWER_RISE_T1_MAX_US, PS8622_RST_HIGH_T2_MAX_US + PS8622_POWER_RISE_T1_MIN_US); gpiod_set_value(ps8622->gpio_rst, 1); /* wait 20ms after RST high */ usleep_range(20000, 30000); ret = ps8622_send_config(ps8622); if (ret) { DRM_ERROR("Failed to send config to bridge (%d)\n", ret); return; } ps8622->enabled = true; } static void ps8622_disable(struct drm_bridge *bridge) { /* Delay after panel is disabled */ msleep(PS8622_PWMO_END_T12_MS); } static void ps8622_post_disable(struct drm_bridge *bridge) { struct ps8622_bridge *ps8622 = bridge_to_ps8622(bridge); if (!ps8622->enabled) return; ps8622->enabled = false; /* * This doesn't matter if the regulators are turned off, but something * else might keep them on. In that case, we want to assert the slp gpio * to lower power. */ gpiod_set_value(ps8622->gpio_slp, 0); if (ps8622->v12) regulator_disable(ps8622->v12); /* * Sleep for at least the amount of time that it takes the power rail to * fall to prevent asserting the rst gpio from doing anything. */ usleep_range(PS8622_POWER_FALL_T16_MAX_US, 2 * PS8622_POWER_FALL_T16_MAX_US); gpiod_set_value(ps8622->gpio_rst, 0); msleep(PS8622_POWER_OFF_T17_MS); } static int ps8622_attach(struct drm_bridge *bridge, enum drm_bridge_attach_flags flags) { struct ps8622_bridge *ps8622 = bridge_to_ps8622(bridge); return drm_bridge_attach(ps8622->bridge.encoder, ps8622->panel_bridge, &ps8622->bridge, flags); } static const struct drm_bridge_funcs ps8622_bridge_funcs = { .pre_enable = ps8622_pre_enable, .disable = ps8622_disable, .post_disable = ps8622_post_disable, .attach = ps8622_attach, }; static const struct of_device_id ps8622_devices[] = { {.compatible = "parade,ps8622",}, {.compatible = "parade,ps8625",}, {} }; MODULE_DEVICE_TABLE(of, ps8622_devices); static int ps8622_probe(struct i2c_client *client) { const struct i2c_device_id *id = i2c_client_get_device_id(client); struct device *dev = &client->dev; struct ps8622_bridge *ps8622; struct drm_bridge *panel_bridge; int ret; ps8622 = devm_kzalloc(dev, sizeof(*ps8622), GFP_KERNEL); if (!ps8622) return -ENOMEM; panel_bridge = devm_drm_of_get_bridge(dev, dev->of_node, 0, 0); if (IS_ERR(panel_bridge)) return PTR_ERR(panel_bridge); ps8622->panel_bridge = panel_bridge; ps8622->client = client; ps8622->v12 = devm_regulator_get(dev, "vdd12"); if (IS_ERR(ps8622->v12)) { dev_info(dev, "no 1.2v regulator found for PS8622\n"); ps8622->v12 = NULL; } ps8622->gpio_slp = devm_gpiod_get(dev, "sleep", GPIOD_OUT_HIGH); if (IS_ERR(ps8622->gpio_slp)) { ret = PTR_ERR(ps8622->gpio_slp); dev_err(dev, "cannot get gpio_slp %d\n", ret); return ret; } /* * Assert the reset pin high to avoid the bridge being * initialized prematurely */ ps8622->gpio_rst = devm_gpiod_get(dev, "reset", GPIOD_OUT_HIGH); if (IS_ERR(ps8622->gpio_rst)) { ret = PTR_ERR(ps8622->gpio_rst); dev_err(dev, "cannot get gpio_rst %d\n", ret); return ret; } ps8622->max_lane_count = id->driver_data; if (of_property_read_u32(dev->of_node, "lane-count", &ps8622->lane_count)) { ps8622->lane_count = ps8622->max_lane_count; } else if (ps8622->lane_count > ps8622->max_lane_count) { dev_info(dev, "lane-count property is too high," "using max_lane_count\n"); ps8622->lane_count = ps8622->max_lane_count; } if (!of_property_read_bool(dev->of_node, "use-external-pwm")) { ps8622->bl = backlight_device_register("ps8622-backlight", dev, ps8622, &ps8622_backlight_ops, NULL); if (IS_ERR(ps8622->bl)) { DRM_ERROR("failed to register backlight\n"); ret = PTR_ERR(ps8622->bl); ps8622->bl = NULL; return ret; } ps8622->bl->props.max_brightness = PS8622_MAX_BRIGHTNESS; ps8622->bl->props.brightness = PS8622_MAX_BRIGHTNESS; } ps8622->bridge.funcs = &ps8622_bridge_funcs; ps8622->bridge.type = DRM_MODE_CONNECTOR_LVDS; ps8622->bridge.of_node = dev->of_node; drm_bridge_add(&ps8622->bridge); i2c_set_clientdata(client, ps8622); return 0; } static void ps8622_remove(struct i2c_client *client) { struct ps8622_bridge *ps8622 = i2c_get_clientdata(client); backlight_device_unregister(ps8622->bl); drm_bridge_remove(&ps8622->bridge); } static const struct i2c_device_id ps8622_i2c_table[] = { /* Device type, max_lane_count */ {"ps8622", 1}, {"ps8625", 2}, {}, }; MODULE_DEVICE_TABLE(i2c, ps8622_i2c_table); static struct i2c_driver ps8622_driver = { .id_table = ps8622_i2c_table, .probe = ps8622_probe, .remove = ps8622_remove, .driver = { .name = "ps8622", .of_match_table = ps8622_devices, }, }; module_i2c_driver(ps8622_driver); MODULE_AUTHOR("Vincent Palatin <[email protected]>"); MODULE_DESCRIPTION("Parade ps8622/ps8625 eDP-LVDS converter driver"); MODULE_LICENSE("GPL v2");
linux-master
drivers/gpu/drm/bridge/parade-ps8622.c
// SPDX-License-Identifier: GPL-2.0+ /* * i.MX8 NWL MIPI DSI host driver * * Copyright (C) 2017 NXP * Copyright (C) 2020 Purism SPC */ #include <linux/bitfield.h> #include <linux/bits.h> #include <linux/clk.h> #include <linux/irq.h> #include <linux/math64.h> #include <linux/mfd/syscon.h> #include <linux/media-bus-format.h> #include <linux/module.h> #include <linux/mux/consumer.h> #include <linux/of.h> #include <linux/phy/phy.h> #include <linux/platform_device.h> #include <linux/regmap.h> #include <linux/reset.h> #include <linux/sys_soc.h> #include <linux/time64.h> #include <drm/drm_atomic_state_helper.h> #include <drm/drm_bridge.h> #include <drm/drm_mipi_dsi.h> #include <drm/drm_of.h> #include <drm/drm_print.h> #include <video/mipi_display.h> #include "nwl-dsi.h" #define DRV_NAME "nwl-dsi" /* i.MX8 NWL quirks */ /* i.MX8MQ errata E11418 */ #define E11418_HS_MODE_QUIRK BIT(0) #define NWL_DSI_MIPI_FIFO_TIMEOUT msecs_to_jiffies(500) enum transfer_direction { DSI_PACKET_SEND, DSI_PACKET_RECEIVE, }; #define NWL_DSI_ENDPOINT_LCDIF 0 #define NWL_DSI_ENDPOINT_DCSS 1 struct nwl_dsi_transfer { const struct mipi_dsi_msg *msg; struct mipi_dsi_packet packet; struct completion completed; int status; /* status of transmission */ enum transfer_direction direction; bool need_bta; u8 cmd; u16 rx_word_count; size_t tx_len; /* in bytes */ size_t rx_len; /* in bytes */ }; struct nwl_dsi { struct drm_bridge bridge; struct mipi_dsi_host dsi_host; struct device *dev; struct phy *phy; union phy_configure_opts phy_cfg; unsigned int quirks; struct regmap *regmap; int irq; /* * The DSI host controller needs this reset sequence according to NWL: * 1. Deassert pclk reset to get access to DSI regs * 2. Configure DSI Host and DPHY and enable DPHY * 3. Deassert ESC and BYTE resets to allow host TX operations) * 4. Send DSI cmds to configure peripheral (handled by panel drv) * 5. Deassert DPI reset so DPI receives pixels and starts sending * DSI data * * TODO: Since panel_bridges do their DSI setup in enable we * currently have 4. and 5. swapped. */ struct reset_control *rst_byte; struct reset_control *rst_esc; struct reset_control *rst_dpi; struct reset_control *rst_pclk; struct mux_control *mux; /* DSI clocks */ struct clk *phy_ref_clk; struct clk *rx_esc_clk; struct clk *tx_esc_clk; struct clk *core_clk; /* * hardware bug: the i.MX8MQ needs this clock on during reset * even when not using LCDIF. */ struct clk *lcdif_clk; /* dsi lanes */ u32 lanes; enum mipi_dsi_pixel_format format; struct drm_display_mode mode; unsigned long dsi_mode_flags; int error; struct nwl_dsi_transfer *xfer; }; static const struct regmap_config nwl_dsi_regmap_config = { .reg_bits = 16, .val_bits = 32, .reg_stride = 4, .max_register = NWL_DSI_IRQ_MASK2, .name = DRV_NAME, }; static inline struct nwl_dsi *bridge_to_dsi(struct drm_bridge *bridge) { return container_of(bridge, struct nwl_dsi, bridge); } static int nwl_dsi_clear_error(struct nwl_dsi *dsi) { int ret = dsi->error; dsi->error = 0; return ret; } static void nwl_dsi_write(struct nwl_dsi *dsi, unsigned int reg, u32 val) { int ret; if (dsi->error) return; ret = regmap_write(dsi->regmap, reg, val); if (ret < 0) { DRM_DEV_ERROR(dsi->dev, "Failed to write NWL DSI reg 0x%x: %d\n", reg, ret); dsi->error = ret; } } static u32 nwl_dsi_read(struct nwl_dsi *dsi, u32 reg) { unsigned int val; int ret; if (dsi->error) return 0; ret = regmap_read(dsi->regmap, reg, &val); if (ret < 0) { DRM_DEV_ERROR(dsi->dev, "Failed to read NWL DSI reg 0x%x: %d\n", reg, ret); dsi->error = ret; } return val; } static int nwl_dsi_get_dpi_pixel_format(enum mipi_dsi_pixel_format format) { switch (format) { case MIPI_DSI_FMT_RGB565: return NWL_DSI_PIXEL_FORMAT_16; case MIPI_DSI_FMT_RGB666: return NWL_DSI_PIXEL_FORMAT_18L; case MIPI_DSI_FMT_RGB666_PACKED: return NWL_DSI_PIXEL_FORMAT_18; case MIPI_DSI_FMT_RGB888: return NWL_DSI_PIXEL_FORMAT_24; default: return -EINVAL; } } /* * ps2bc - Picoseconds to byte clock cycles */ static u32 ps2bc(struct nwl_dsi *dsi, unsigned long long ps) { u32 bpp = mipi_dsi_pixel_format_to_bpp(dsi->format); return DIV64_U64_ROUND_UP(ps * dsi->mode.clock * bpp, dsi->lanes * 8ULL * NSEC_PER_SEC); } /* * ui2bc - UI time periods to byte clock cycles */ static u32 ui2bc(unsigned int ui) { return DIV_ROUND_UP(ui, BITS_PER_BYTE); } /* * us2bc - micro seconds to lp clock cycles */ static u32 us2lp(u32 lp_clk_rate, unsigned long us) { return DIV_ROUND_UP(us * lp_clk_rate, USEC_PER_SEC); } static int nwl_dsi_config_host(struct nwl_dsi *dsi) { u32 cycles; struct phy_configure_opts_mipi_dphy *cfg = &dsi->phy_cfg.mipi_dphy; if (dsi->lanes < 1 || dsi->lanes > 4) return -EINVAL; DRM_DEV_DEBUG_DRIVER(dsi->dev, "DSI Lanes %d\n", dsi->lanes); nwl_dsi_write(dsi, NWL_DSI_CFG_NUM_LANES, dsi->lanes - 1); if (dsi->dsi_mode_flags & MIPI_DSI_CLOCK_NON_CONTINUOUS) { nwl_dsi_write(dsi, NWL_DSI_CFG_NONCONTINUOUS_CLK, 0x01); nwl_dsi_write(dsi, NWL_DSI_CFG_AUTOINSERT_EOTP, 0x01); } else { nwl_dsi_write(dsi, NWL_DSI_CFG_NONCONTINUOUS_CLK, 0x00); nwl_dsi_write(dsi, NWL_DSI_CFG_AUTOINSERT_EOTP, 0x00); } /* values in byte clock cycles */ cycles = ui2bc(cfg->clk_pre); DRM_DEV_DEBUG_DRIVER(dsi->dev, "cfg_t_pre: 0x%x\n", cycles); nwl_dsi_write(dsi, NWL_DSI_CFG_T_PRE, cycles); cycles = ps2bc(dsi, cfg->lpx + cfg->clk_prepare + cfg->clk_zero); DRM_DEV_DEBUG_DRIVER(dsi->dev, "cfg_tx_gap (pre): 0x%x\n", cycles); cycles += ui2bc(cfg->clk_pre); DRM_DEV_DEBUG_DRIVER(dsi->dev, "cfg_t_post: 0x%x\n", cycles); nwl_dsi_write(dsi, NWL_DSI_CFG_T_POST, cycles); cycles = ps2bc(dsi, cfg->hs_exit); DRM_DEV_DEBUG_DRIVER(dsi->dev, "cfg_tx_gap: 0x%x\n", cycles); nwl_dsi_write(dsi, NWL_DSI_CFG_TX_GAP, cycles); nwl_dsi_write(dsi, NWL_DSI_CFG_EXTRA_CMDS_AFTER_EOTP, 0x01); nwl_dsi_write(dsi, NWL_DSI_CFG_HTX_TO_COUNT, 0x00); nwl_dsi_write(dsi, NWL_DSI_CFG_LRX_H_TO_COUNT, 0x00); nwl_dsi_write(dsi, NWL_DSI_CFG_BTA_H_TO_COUNT, 0x00); /* In LP clock cycles */ cycles = us2lp(cfg->lp_clk_rate, cfg->wakeup); DRM_DEV_DEBUG_DRIVER(dsi->dev, "cfg_twakeup: 0x%x\n", cycles); nwl_dsi_write(dsi, NWL_DSI_CFG_TWAKEUP, cycles); return nwl_dsi_clear_error(dsi); } static int nwl_dsi_config_dpi(struct nwl_dsi *dsi) { u32 mode; int color_format; bool burst_mode; int hfront_porch, hback_porch, vfront_porch, vback_porch; int hsync_len, vsync_len; hfront_porch = dsi->mode.hsync_start - dsi->mode.hdisplay; hsync_len = dsi->mode.hsync_end - dsi->mode.hsync_start; hback_porch = dsi->mode.htotal - dsi->mode.hsync_end; vfront_porch = dsi->mode.vsync_start - dsi->mode.vdisplay; vsync_len = dsi->mode.vsync_end - dsi->mode.vsync_start; vback_porch = dsi->mode.vtotal - dsi->mode.vsync_end; DRM_DEV_DEBUG_DRIVER(dsi->dev, "hfront_porch = %d\n", hfront_porch); DRM_DEV_DEBUG_DRIVER(dsi->dev, "hback_porch = %d\n", hback_porch); DRM_DEV_DEBUG_DRIVER(dsi->dev, "hsync_len = %d\n", hsync_len); DRM_DEV_DEBUG_DRIVER(dsi->dev, "hdisplay = %d\n", dsi->mode.hdisplay); DRM_DEV_DEBUG_DRIVER(dsi->dev, "vfront_porch = %d\n", vfront_porch); DRM_DEV_DEBUG_DRIVER(dsi->dev, "vback_porch = %d\n", vback_porch); DRM_DEV_DEBUG_DRIVER(dsi->dev, "vsync_len = %d\n", vsync_len); DRM_DEV_DEBUG_DRIVER(dsi->dev, "vactive = %d\n", dsi->mode.vdisplay); DRM_DEV_DEBUG_DRIVER(dsi->dev, "clock = %d kHz\n", dsi->mode.clock); color_format = nwl_dsi_get_dpi_pixel_format(dsi->format); if (color_format < 0) { DRM_DEV_ERROR(dsi->dev, "Invalid color format 0x%x\n", dsi->format); return color_format; } DRM_DEV_DEBUG_DRIVER(dsi->dev, "pixel fmt = %d\n", dsi->format); nwl_dsi_write(dsi, NWL_DSI_INTERFACE_COLOR_CODING, NWL_DSI_DPI_24_BIT); nwl_dsi_write(dsi, NWL_DSI_PIXEL_FORMAT, color_format); /* * Adjusting input polarity based on the video mode results in * a black screen so always pick active low: */ nwl_dsi_write(dsi, NWL_DSI_VSYNC_POLARITY, NWL_DSI_VSYNC_POLARITY_ACTIVE_LOW); nwl_dsi_write(dsi, NWL_DSI_HSYNC_POLARITY, NWL_DSI_HSYNC_POLARITY_ACTIVE_LOW); burst_mode = (dsi->dsi_mode_flags & MIPI_DSI_MODE_VIDEO_BURST) && !(dsi->dsi_mode_flags & MIPI_DSI_MODE_VIDEO_SYNC_PULSE); if (burst_mode) { nwl_dsi_write(dsi, NWL_DSI_VIDEO_MODE, NWL_DSI_VM_BURST_MODE); nwl_dsi_write(dsi, NWL_DSI_PIXEL_FIFO_SEND_LEVEL, 256); } else { mode = ((dsi->dsi_mode_flags & MIPI_DSI_MODE_VIDEO_SYNC_PULSE) ? NWL_DSI_VM_BURST_MODE_WITH_SYNC_PULSES : NWL_DSI_VM_NON_BURST_MODE_WITH_SYNC_EVENTS); nwl_dsi_write(dsi, NWL_DSI_VIDEO_MODE, mode); nwl_dsi_write(dsi, NWL_DSI_PIXEL_FIFO_SEND_LEVEL, dsi->mode.hdisplay); } nwl_dsi_write(dsi, NWL_DSI_HFP, hfront_porch); nwl_dsi_write(dsi, NWL_DSI_HBP, hback_porch); nwl_dsi_write(dsi, NWL_DSI_HSA, hsync_len); nwl_dsi_write(dsi, NWL_DSI_ENABLE_MULT_PKTS, 0x0); nwl_dsi_write(dsi, NWL_DSI_BLLP_MODE, 0x1); nwl_dsi_write(dsi, NWL_DSI_USE_NULL_PKT_BLLP, 0x0); nwl_dsi_write(dsi, NWL_DSI_VC, 0x0); nwl_dsi_write(dsi, NWL_DSI_PIXEL_PAYLOAD_SIZE, dsi->mode.hdisplay); nwl_dsi_write(dsi, NWL_DSI_VACTIVE, dsi->mode.vdisplay - 1); nwl_dsi_write(dsi, NWL_DSI_VBP, vback_porch); nwl_dsi_write(dsi, NWL_DSI_VFP, vfront_porch); return nwl_dsi_clear_error(dsi); } static int nwl_dsi_init_interrupts(struct nwl_dsi *dsi) { u32 irq_enable = ~(u32)(NWL_DSI_TX_PKT_DONE_MASK | NWL_DSI_RX_PKT_HDR_RCVD_MASK | NWL_DSI_TX_FIFO_OVFLW_MASK | NWL_DSI_HS_TX_TIMEOUT_MASK); nwl_dsi_write(dsi, NWL_DSI_IRQ_MASK, irq_enable); nwl_dsi_write(dsi, NWL_DSI_IRQ_MASK2, 0x7); return nwl_dsi_clear_error(dsi); } static int nwl_dsi_host_attach(struct mipi_dsi_host *dsi_host, struct mipi_dsi_device *device) { struct nwl_dsi *dsi = container_of(dsi_host, struct nwl_dsi, dsi_host); struct device *dev = dsi->dev; DRM_DEV_INFO(dev, "lanes=%u, format=0x%x flags=0x%lx\n", device->lanes, device->format, device->mode_flags); if (device->lanes < 1 || device->lanes > 4) return -EINVAL; dsi->lanes = device->lanes; dsi->format = device->format; dsi->dsi_mode_flags = device->mode_flags; return 0; } static bool nwl_dsi_read_packet(struct nwl_dsi *dsi, u32 status) { struct device *dev = dsi->dev; struct nwl_dsi_transfer *xfer = dsi->xfer; int err; u8 *payload = xfer->msg->rx_buf; u32 val; u16 word_count; u8 channel; u8 data_type; xfer->status = 0; if (xfer->rx_word_count == 0) { if (!(status & NWL_DSI_RX_PKT_HDR_RCVD)) return false; /* Get the RX header and parse it */ val = nwl_dsi_read(dsi, NWL_DSI_RX_PKT_HEADER); err = nwl_dsi_clear_error(dsi); if (err) xfer->status = err; word_count = NWL_DSI_WC(val); channel = NWL_DSI_RX_VC(val); data_type = NWL_DSI_RX_DT(val); if (channel != xfer->msg->channel) { DRM_DEV_ERROR(dev, "[%02X] Channel mismatch (%u != %u)\n", xfer->cmd, channel, xfer->msg->channel); xfer->status = -EINVAL; return true; } switch (data_type) { case MIPI_DSI_RX_GENERIC_SHORT_READ_RESPONSE_2BYTE: case MIPI_DSI_RX_DCS_SHORT_READ_RESPONSE_2BYTE: if (xfer->msg->rx_len > 1) { /* read second byte */ payload[1] = word_count >> 8; ++xfer->rx_len; } fallthrough; case MIPI_DSI_RX_GENERIC_SHORT_READ_RESPONSE_1BYTE: case MIPI_DSI_RX_DCS_SHORT_READ_RESPONSE_1BYTE: if (xfer->msg->rx_len > 0) { /* read first byte */ payload[0] = word_count & 0xff; ++xfer->rx_len; } xfer->status = xfer->rx_len; return true; case MIPI_DSI_RX_ACKNOWLEDGE_AND_ERROR_REPORT: word_count &= 0xff; DRM_DEV_ERROR(dev, "[%02X] DSI error report: 0x%02x\n", xfer->cmd, word_count); xfer->status = -EPROTO; return true; } if (word_count > xfer->msg->rx_len) { DRM_DEV_ERROR(dev, "[%02X] Receive buffer too small: %zu (< %u)\n", xfer->cmd, xfer->msg->rx_len, word_count); xfer->status = -EINVAL; return true; } xfer->rx_word_count = word_count; } else { /* Set word_count from previous header read */ word_count = xfer->rx_word_count; } /* If RX payload is not yet received, wait for it */ if (!(status & NWL_DSI_RX_PKT_PAYLOAD_DATA_RCVD)) return false; /* Read the RX payload */ while (word_count >= 4) { val = nwl_dsi_read(dsi, NWL_DSI_RX_PAYLOAD); payload[0] = (val >> 0) & 0xff; payload[1] = (val >> 8) & 0xff; payload[2] = (val >> 16) & 0xff; payload[3] = (val >> 24) & 0xff; payload += 4; xfer->rx_len += 4; word_count -= 4; } if (word_count > 0) { val = nwl_dsi_read(dsi, NWL_DSI_RX_PAYLOAD); switch (word_count) { case 3: payload[2] = (val >> 16) & 0xff; ++xfer->rx_len; fallthrough; case 2: payload[1] = (val >> 8) & 0xff; ++xfer->rx_len; fallthrough; case 1: payload[0] = (val >> 0) & 0xff; ++xfer->rx_len; break; } } xfer->status = xfer->rx_len; err = nwl_dsi_clear_error(dsi); if (err) xfer->status = err; return true; } static void nwl_dsi_finish_transmission(struct nwl_dsi *dsi, u32 status) { struct nwl_dsi_transfer *xfer = dsi->xfer; bool end_packet = false; if (!xfer) return; if (xfer->direction == DSI_PACKET_SEND && status & NWL_DSI_TX_PKT_DONE) { xfer->status = xfer->tx_len; end_packet = true; } else if (status & NWL_DSI_DPHY_DIRECTION && ((status & (NWL_DSI_RX_PKT_HDR_RCVD | NWL_DSI_RX_PKT_PAYLOAD_DATA_RCVD)))) { end_packet = nwl_dsi_read_packet(dsi, status); } if (end_packet) complete(&xfer->completed); } static void nwl_dsi_begin_transmission(struct nwl_dsi *dsi) { struct nwl_dsi_transfer *xfer = dsi->xfer; struct mipi_dsi_packet *pkt = &xfer->packet; const u8 *payload; size_t length; u16 word_count; u8 hs_mode; u32 val; u32 hs_workaround = 0; /* Send the payload, if any */ length = pkt->payload_length; payload = pkt->payload; while (length >= 4) { val = *(u32 *)payload; hs_workaround |= !(val & 0xFFFF00); nwl_dsi_write(dsi, NWL_DSI_TX_PAYLOAD, val); payload += 4; length -= 4; } /* Send the rest of the payload */ val = 0; switch (length) { case 3: val |= payload[2] << 16; fallthrough; case 2: val |= payload[1] << 8; hs_workaround |= !(val & 0xFFFF00); fallthrough; case 1: val |= payload[0]; nwl_dsi_write(dsi, NWL_DSI_TX_PAYLOAD, val); break; } xfer->tx_len = pkt->payload_length; /* * Send the header * header[0] = Virtual Channel + Data Type * header[1] = Word Count LSB (LP) or first param (SP) * header[2] = Word Count MSB (LP) or second param (SP) */ word_count = pkt->header[1] | (pkt->header[2] << 8); if (hs_workaround && (dsi->quirks & E11418_HS_MODE_QUIRK)) { DRM_DEV_DEBUG_DRIVER(dsi->dev, "Using hs mode workaround for cmd 0x%x\n", xfer->cmd); hs_mode = 1; } else { hs_mode = (xfer->msg->flags & MIPI_DSI_MSG_USE_LPM) ? 0 : 1; } val = NWL_DSI_WC(word_count) | NWL_DSI_TX_VC(xfer->msg->channel) | NWL_DSI_TX_DT(xfer->msg->type) | NWL_DSI_HS_SEL(hs_mode) | NWL_DSI_BTA_TX(xfer->need_bta); nwl_dsi_write(dsi, NWL_DSI_PKT_CONTROL, val); /* Send packet command */ nwl_dsi_write(dsi, NWL_DSI_SEND_PACKET, 0x1); } static ssize_t nwl_dsi_host_transfer(struct mipi_dsi_host *dsi_host, const struct mipi_dsi_msg *msg) { struct nwl_dsi *dsi = container_of(dsi_host, struct nwl_dsi, dsi_host); struct nwl_dsi_transfer xfer; ssize_t ret = 0; /* Create packet to be sent */ dsi->xfer = &xfer; ret = mipi_dsi_create_packet(&xfer.packet, msg); if (ret < 0) { dsi->xfer = NULL; return ret; } if ((msg->type & MIPI_DSI_GENERIC_READ_REQUEST_0_PARAM || msg->type & MIPI_DSI_GENERIC_READ_REQUEST_1_PARAM || msg->type & MIPI_DSI_GENERIC_READ_REQUEST_2_PARAM || msg->type & MIPI_DSI_DCS_READ) && msg->rx_len > 0 && msg->rx_buf) xfer.direction = DSI_PACKET_RECEIVE; else xfer.direction = DSI_PACKET_SEND; xfer.need_bta = (xfer.direction == DSI_PACKET_RECEIVE); xfer.need_bta |= (msg->flags & MIPI_DSI_MSG_REQ_ACK) ? 1 : 0; xfer.msg = msg; xfer.status = -ETIMEDOUT; xfer.rx_word_count = 0; xfer.rx_len = 0; xfer.cmd = 0x00; if (msg->tx_len > 0) xfer.cmd = ((u8 *)(msg->tx_buf))[0]; init_completion(&xfer.completed); ret = clk_prepare_enable(dsi->rx_esc_clk); if (ret < 0) { DRM_DEV_ERROR(dsi->dev, "Failed to enable rx_esc clk: %zd\n", ret); return ret; } DRM_DEV_DEBUG_DRIVER(dsi->dev, "Enabled rx_esc clk @%lu Hz\n", clk_get_rate(dsi->rx_esc_clk)); /* Initiate the DSI packet transmision */ nwl_dsi_begin_transmission(dsi); if (!wait_for_completion_timeout(&xfer.completed, NWL_DSI_MIPI_FIFO_TIMEOUT)) { DRM_DEV_ERROR(dsi_host->dev, "[%02X] DSI transfer timed out\n", xfer.cmd); ret = -ETIMEDOUT; } else { ret = xfer.status; } clk_disable_unprepare(dsi->rx_esc_clk); return ret; } static const struct mipi_dsi_host_ops nwl_dsi_host_ops = { .attach = nwl_dsi_host_attach, .transfer = nwl_dsi_host_transfer, }; static irqreturn_t nwl_dsi_irq_handler(int irq, void *data) { u32 irq_status; struct nwl_dsi *dsi = data; irq_status = nwl_dsi_read(dsi, NWL_DSI_IRQ_STATUS); if (irq_status & NWL_DSI_TX_FIFO_OVFLW) DRM_DEV_ERROR_RATELIMITED(dsi->dev, "tx fifo overflow\n"); if (irq_status & NWL_DSI_HS_TX_TIMEOUT) DRM_DEV_ERROR_RATELIMITED(dsi->dev, "HS tx timeout\n"); if (irq_status & NWL_DSI_TX_PKT_DONE || irq_status & NWL_DSI_RX_PKT_HDR_RCVD || irq_status & NWL_DSI_RX_PKT_PAYLOAD_DATA_RCVD) nwl_dsi_finish_transmission(dsi, irq_status); return IRQ_HANDLED; } static int nwl_dsi_mode_set(struct nwl_dsi *dsi) { struct device *dev = dsi->dev; union phy_configure_opts *phy_cfg = &dsi->phy_cfg; int ret; if (!dsi->lanes) { DRM_DEV_ERROR(dev, "Need DSI lanes: %d\n", dsi->lanes); return -EINVAL; } ret = phy_init(dsi->phy); if (ret < 0) { DRM_DEV_ERROR(dev, "Failed to init DSI phy: %d\n", ret); return ret; } ret = phy_set_mode(dsi->phy, PHY_MODE_MIPI_DPHY); if (ret < 0) { DRM_DEV_ERROR(dev, "Failed to set DSI phy mode: %d\n", ret); goto uninit_phy; } ret = phy_configure(dsi->phy, phy_cfg); if (ret < 0) { DRM_DEV_ERROR(dev, "Failed to configure DSI phy: %d\n", ret); goto uninit_phy; } ret = clk_prepare_enable(dsi->tx_esc_clk); if (ret < 0) { DRM_DEV_ERROR(dsi->dev, "Failed to enable tx_esc clk: %d\n", ret); goto uninit_phy; } DRM_DEV_DEBUG_DRIVER(dsi->dev, "Enabled tx_esc clk @%lu Hz\n", clk_get_rate(dsi->tx_esc_clk)); ret = nwl_dsi_config_host(dsi); if (ret < 0) { DRM_DEV_ERROR(dev, "Failed to set up DSI: %d", ret); goto disable_clock; } ret = nwl_dsi_config_dpi(dsi); if (ret < 0) { DRM_DEV_ERROR(dev, "Failed to set up DPI: %d", ret); goto disable_clock; } ret = phy_power_on(dsi->phy); if (ret < 0) { DRM_DEV_ERROR(dev, "Failed to power on DPHY (%d)\n", ret); goto disable_clock; } ret = nwl_dsi_init_interrupts(dsi); if (ret < 0) goto power_off_phy; return ret; power_off_phy: phy_power_off(dsi->phy); disable_clock: clk_disable_unprepare(dsi->tx_esc_clk); uninit_phy: phy_exit(dsi->phy); return ret; } static int nwl_dsi_disable(struct nwl_dsi *dsi) { struct device *dev = dsi->dev; DRM_DEV_DEBUG_DRIVER(dev, "Disabling clocks and phy\n"); phy_power_off(dsi->phy); phy_exit(dsi->phy); /* Disabling the clock before the phy breaks enabling dsi again */ clk_disable_unprepare(dsi->tx_esc_clk); return 0; } static void nwl_dsi_bridge_atomic_disable(struct drm_bridge *bridge, struct drm_bridge_state *old_bridge_state) { struct nwl_dsi *dsi = bridge_to_dsi(bridge); int ret; nwl_dsi_disable(dsi); ret = reset_control_assert(dsi->rst_dpi); if (ret < 0) { DRM_DEV_ERROR(dsi->dev, "Failed to assert DPI: %d\n", ret); return; } ret = reset_control_assert(dsi->rst_byte); if (ret < 0) { DRM_DEV_ERROR(dsi->dev, "Failed to assert ESC: %d\n", ret); return; } ret = reset_control_assert(dsi->rst_esc); if (ret < 0) { DRM_DEV_ERROR(dsi->dev, "Failed to assert BYTE: %d\n", ret); return; } ret = reset_control_assert(dsi->rst_pclk); if (ret < 0) { DRM_DEV_ERROR(dsi->dev, "Failed to assert PCLK: %d\n", ret); return; } clk_disable_unprepare(dsi->core_clk); clk_disable_unprepare(dsi->lcdif_clk); pm_runtime_put(dsi->dev); } static int nwl_dsi_get_dphy_params(struct nwl_dsi *dsi, const struct drm_display_mode *mode, union phy_configure_opts *phy_opts) { unsigned long rate; int ret; if (dsi->lanes < 1 || dsi->lanes > 4) return -EINVAL; /* * So far the DPHY spec minimal timings work for both mixel * dphy and nwl dsi host */ ret = phy_mipi_dphy_get_default_config(mode->clock * 1000, mipi_dsi_pixel_format_to_bpp(dsi->format), dsi->lanes, &phy_opts->mipi_dphy); if (ret < 0) return ret; rate = clk_get_rate(dsi->tx_esc_clk); DRM_DEV_DEBUG_DRIVER(dsi->dev, "LP clk is @%lu Hz\n", rate); phy_opts->mipi_dphy.lp_clk_rate = rate; return 0; } static enum drm_mode_status nwl_dsi_bridge_mode_valid(struct drm_bridge *bridge, const struct drm_display_info *info, const struct drm_display_mode *mode) { struct nwl_dsi *dsi = bridge_to_dsi(bridge); int bpp = mipi_dsi_pixel_format_to_bpp(dsi->format); if (mode->clock * bpp > 15000000 * dsi->lanes) return MODE_CLOCK_HIGH; if (mode->clock * bpp < 80000 * dsi->lanes) return MODE_CLOCK_LOW; return MODE_OK; } static int nwl_dsi_bridge_atomic_check(struct drm_bridge *bridge, struct drm_bridge_state *bridge_state, struct drm_crtc_state *crtc_state, struct drm_connector_state *conn_state) { struct drm_display_mode *adjusted_mode = &crtc_state->adjusted_mode; /* At least LCDIF + NWL needs active high sync */ adjusted_mode->flags |= (DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC); adjusted_mode->flags &= ~(DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC); /* * Do a full modeset if crtc_state->active is changed to be true. * This ensures our ->mode_set() is called to get the DSI controller * and the PHY ready to send DCS commands, when only the connector's * DPMS is brought out of "Off" status. */ if (crtc_state->active_changed && crtc_state->active) crtc_state->mode_changed = true; return 0; } static void nwl_dsi_bridge_mode_set(struct drm_bridge *bridge, const struct drm_display_mode *mode, const struct drm_display_mode *adjusted_mode) { struct nwl_dsi *dsi = bridge_to_dsi(bridge); struct device *dev = dsi->dev; union phy_configure_opts new_cfg; unsigned long phy_ref_rate; int ret; ret = nwl_dsi_get_dphy_params(dsi, adjusted_mode, &new_cfg); if (ret < 0) return; phy_ref_rate = clk_get_rate(dsi->phy_ref_clk); DRM_DEV_DEBUG_DRIVER(dev, "PHY at ref rate: %lu\n", phy_ref_rate); /* Save the new desired phy config */ memcpy(&dsi->phy_cfg, &new_cfg, sizeof(new_cfg)); drm_mode_copy(&dsi->mode, adjusted_mode); drm_mode_debug_printmodeline(adjusted_mode); if (pm_runtime_resume_and_get(dev) < 0) return; if (clk_prepare_enable(dsi->lcdif_clk) < 0) goto runtime_put; if (clk_prepare_enable(dsi->core_clk) < 0) goto runtime_put; /* Step 1 from DSI reset-out instructions */ ret = reset_control_deassert(dsi->rst_pclk); if (ret < 0) { DRM_DEV_ERROR(dev, "Failed to deassert PCLK: %d\n", ret); goto runtime_put; } /* Step 2 from DSI reset-out instructions */ nwl_dsi_mode_set(dsi); /* Step 3 from DSI reset-out instructions */ ret = reset_control_deassert(dsi->rst_esc); if (ret < 0) { DRM_DEV_ERROR(dev, "Failed to deassert ESC: %d\n", ret); goto runtime_put; } ret = reset_control_deassert(dsi->rst_byte); if (ret < 0) { DRM_DEV_ERROR(dev, "Failed to deassert BYTE: %d\n", ret); goto runtime_put; } return; runtime_put: pm_runtime_put_sync(dev); } static void nwl_dsi_bridge_atomic_enable(struct drm_bridge *bridge, struct drm_bridge_state *old_bridge_state) { struct nwl_dsi *dsi = bridge_to_dsi(bridge); int ret; /* Step 5 from DSI reset-out instructions */ ret = reset_control_deassert(dsi->rst_dpi); if (ret < 0) DRM_DEV_ERROR(dsi->dev, "Failed to deassert DPI: %d\n", ret); } static int nwl_dsi_bridge_attach(struct drm_bridge *bridge, enum drm_bridge_attach_flags flags) { struct nwl_dsi *dsi = bridge_to_dsi(bridge); struct drm_bridge *panel_bridge; panel_bridge = devm_drm_of_get_bridge(dsi->dev, dsi->dev->of_node, 1, 0); if (IS_ERR(panel_bridge)) return PTR_ERR(panel_bridge); return drm_bridge_attach(bridge->encoder, panel_bridge, bridge, flags); } static u32 *nwl_bridge_atomic_get_input_bus_fmts(struct drm_bridge *bridge, struct drm_bridge_state *bridge_state, struct drm_crtc_state *crtc_state, struct drm_connector_state *conn_state, u32 output_fmt, unsigned int *num_input_fmts) { u32 *input_fmts, input_fmt; *num_input_fmts = 0; switch (output_fmt) { /* If MEDIA_BUS_FMT_FIXED is tested, return default bus format */ case MEDIA_BUS_FMT_FIXED: input_fmt = MEDIA_BUS_FMT_RGB888_1X24; break; case MEDIA_BUS_FMT_RGB888_1X24: case MEDIA_BUS_FMT_RGB666_1X18: case MEDIA_BUS_FMT_RGB565_1X16: input_fmt = output_fmt; break; default: return NULL; } input_fmts = kcalloc(1, sizeof(*input_fmts), GFP_KERNEL); if (!input_fmts) return NULL; input_fmts[0] = input_fmt; *num_input_fmts = 1; return input_fmts; } static const struct drm_bridge_funcs nwl_dsi_bridge_funcs = { .atomic_duplicate_state = drm_atomic_helper_bridge_duplicate_state, .atomic_destroy_state = drm_atomic_helper_bridge_destroy_state, .atomic_reset = drm_atomic_helper_bridge_reset, .atomic_check = nwl_dsi_bridge_atomic_check, .atomic_enable = nwl_dsi_bridge_atomic_enable, .atomic_disable = nwl_dsi_bridge_atomic_disable, .atomic_get_input_bus_fmts = nwl_bridge_atomic_get_input_bus_fmts, .mode_set = nwl_dsi_bridge_mode_set, .mode_valid = nwl_dsi_bridge_mode_valid, .attach = nwl_dsi_bridge_attach, }; static int nwl_dsi_parse_dt(struct nwl_dsi *dsi) { struct platform_device *pdev = to_platform_device(dsi->dev); struct clk *clk; void __iomem *base; int ret; dsi->phy = devm_phy_get(dsi->dev, "dphy"); if (IS_ERR(dsi->phy)) { ret = PTR_ERR(dsi->phy); if (ret != -EPROBE_DEFER) DRM_DEV_ERROR(dsi->dev, "Could not get PHY: %d\n", ret); return ret; } clk = devm_clk_get(dsi->dev, "lcdif"); if (IS_ERR(clk)) { ret = PTR_ERR(clk); DRM_DEV_ERROR(dsi->dev, "Failed to get lcdif clock: %d\n", ret); return ret; } dsi->lcdif_clk = clk; clk = devm_clk_get(dsi->dev, "core"); if (IS_ERR(clk)) { ret = PTR_ERR(clk); DRM_DEV_ERROR(dsi->dev, "Failed to get core clock: %d\n", ret); return ret; } dsi->core_clk = clk; clk = devm_clk_get(dsi->dev, "phy_ref"); if (IS_ERR(clk)) { ret = PTR_ERR(clk); DRM_DEV_ERROR(dsi->dev, "Failed to get phy_ref clock: %d\n", ret); return ret; } dsi->phy_ref_clk = clk; clk = devm_clk_get(dsi->dev, "rx_esc"); if (IS_ERR(clk)) { ret = PTR_ERR(clk); DRM_DEV_ERROR(dsi->dev, "Failed to get rx_esc clock: %d\n", ret); return ret; } dsi->rx_esc_clk = clk; clk = devm_clk_get(dsi->dev, "tx_esc"); if (IS_ERR(clk)) { ret = PTR_ERR(clk); DRM_DEV_ERROR(dsi->dev, "Failed to get tx_esc clock: %d\n", ret); return ret; } dsi->tx_esc_clk = clk; dsi->mux = devm_mux_control_get(dsi->dev, NULL); if (IS_ERR(dsi->mux)) { ret = PTR_ERR(dsi->mux); if (ret != -EPROBE_DEFER) DRM_DEV_ERROR(dsi->dev, "Failed to get mux: %d\n", ret); return ret; } base = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(base)) return PTR_ERR(base); dsi->regmap = devm_regmap_init_mmio(dsi->dev, base, &nwl_dsi_regmap_config); if (IS_ERR(dsi->regmap)) { ret = PTR_ERR(dsi->regmap); DRM_DEV_ERROR(dsi->dev, "Failed to create NWL DSI regmap: %d\n", ret); return ret; } dsi->irq = platform_get_irq(pdev, 0); if (dsi->irq < 0) { DRM_DEV_ERROR(dsi->dev, "Failed to get device IRQ: %d\n", dsi->irq); return dsi->irq; } dsi->rst_pclk = devm_reset_control_get_exclusive(dsi->dev, "pclk"); if (IS_ERR(dsi->rst_pclk)) { DRM_DEV_ERROR(dsi->dev, "Failed to get pclk reset: %ld\n", PTR_ERR(dsi->rst_pclk)); return PTR_ERR(dsi->rst_pclk); } dsi->rst_byte = devm_reset_control_get_exclusive(dsi->dev, "byte"); if (IS_ERR(dsi->rst_byte)) { DRM_DEV_ERROR(dsi->dev, "Failed to get byte reset: %ld\n", PTR_ERR(dsi->rst_byte)); return PTR_ERR(dsi->rst_byte); } dsi->rst_esc = devm_reset_control_get_exclusive(dsi->dev, "esc"); if (IS_ERR(dsi->rst_esc)) { DRM_DEV_ERROR(dsi->dev, "Failed to get esc reset: %ld\n", PTR_ERR(dsi->rst_esc)); return PTR_ERR(dsi->rst_esc); } dsi->rst_dpi = devm_reset_control_get_exclusive(dsi->dev, "dpi"); if (IS_ERR(dsi->rst_dpi)) { DRM_DEV_ERROR(dsi->dev, "Failed to get dpi reset: %ld\n", PTR_ERR(dsi->rst_dpi)); return PTR_ERR(dsi->rst_dpi); } return 0; } static int nwl_dsi_select_input(struct nwl_dsi *dsi) { struct device_node *remote; u32 use_dcss = 1; int ret; remote = of_graph_get_remote_node(dsi->dev->of_node, 0, NWL_DSI_ENDPOINT_LCDIF); if (remote) { use_dcss = 0; } else { remote = of_graph_get_remote_node(dsi->dev->of_node, 0, NWL_DSI_ENDPOINT_DCSS); if (!remote) { DRM_DEV_ERROR(dsi->dev, "No valid input endpoint found\n"); return -EINVAL; } } DRM_DEV_INFO(dsi->dev, "Using %s as input source\n", (use_dcss) ? "DCSS" : "LCDIF"); ret = mux_control_try_select(dsi->mux, use_dcss); if (ret < 0) DRM_DEV_ERROR(dsi->dev, "Failed to select input: %d\n", ret); of_node_put(remote); return ret; } static int nwl_dsi_deselect_input(struct nwl_dsi *dsi) { int ret; ret = mux_control_deselect(dsi->mux); if (ret < 0) DRM_DEV_ERROR(dsi->dev, "Failed to deselect input: %d\n", ret); return ret; } static const struct drm_bridge_timings nwl_dsi_timings = { .input_bus_flags = DRM_BUS_FLAG_DE_LOW, }; static const struct of_device_id nwl_dsi_dt_ids[] = { { .compatible = "fsl,imx8mq-nwl-dsi", }, { /* sentinel */ } }; MODULE_DEVICE_TABLE(of, nwl_dsi_dt_ids); static const struct soc_device_attribute nwl_dsi_quirks_match[] = { { .soc_id = "i.MX8MQ", .revision = "2.0", .data = (void *)E11418_HS_MODE_QUIRK }, { /* sentinel. */ } }; static int nwl_dsi_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; const struct soc_device_attribute *attr; struct nwl_dsi *dsi; int ret; dsi = devm_kzalloc(dev, sizeof(*dsi), GFP_KERNEL); if (!dsi) return -ENOMEM; dsi->dev = dev; ret = nwl_dsi_parse_dt(dsi); if (ret) return ret; ret = devm_request_irq(dev, dsi->irq, nwl_dsi_irq_handler, 0, dev_name(dev), dsi); if (ret < 0) { DRM_DEV_ERROR(dev, "Failed to request IRQ %d: %d\n", dsi->irq, ret); return ret; } dsi->dsi_host.ops = &nwl_dsi_host_ops; dsi->dsi_host.dev = dev; ret = mipi_dsi_host_register(&dsi->dsi_host); if (ret) { DRM_DEV_ERROR(dev, "Failed to register MIPI host: %d\n", ret); return ret; } attr = soc_device_match(nwl_dsi_quirks_match); if (attr) dsi->quirks = (uintptr_t)attr->data; dsi->bridge.driver_private = dsi; dsi->bridge.funcs = &nwl_dsi_bridge_funcs; dsi->bridge.of_node = dev->of_node; dsi->bridge.timings = &nwl_dsi_timings; dev_set_drvdata(dev, dsi); pm_runtime_enable(dev); ret = nwl_dsi_select_input(dsi); if (ret < 0) { pm_runtime_disable(dev); mipi_dsi_host_unregister(&dsi->dsi_host); return ret; } drm_bridge_add(&dsi->bridge); return 0; } static void nwl_dsi_remove(struct platform_device *pdev) { struct nwl_dsi *dsi = platform_get_drvdata(pdev); nwl_dsi_deselect_input(dsi); mipi_dsi_host_unregister(&dsi->dsi_host); drm_bridge_remove(&dsi->bridge); pm_runtime_disable(&pdev->dev); } static struct platform_driver nwl_dsi_driver = { .probe = nwl_dsi_probe, .remove_new = nwl_dsi_remove, .driver = { .of_match_table = nwl_dsi_dt_ids, .name = DRV_NAME, }, }; module_platform_driver(nwl_dsi_driver); MODULE_AUTHOR("NXP Semiconductor"); MODULE_AUTHOR("Purism SPC"); MODULE_DESCRIPTION("Northwest Logic MIPI-DSI driver"); MODULE_LICENSE("GPL"); /* GPLv2 or later */
linux-master
drivers/gpu/drm/bridge/nwl-dsi.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * Copyright (C) 2018 Renesas Electronics * * Copyright (C) 2016 Atmel * Bo Shen <[email protected]> * * Authors: Bo Shen <[email protected]> * Boris Brezillon <[email protected]> * Wu, Songjun <[email protected]> * * Copyright (C) 2010-2011 Freescale Semiconductor, Inc. All Rights Reserved. */ #include <linux/gpio/consumer.h> #include <linux/i2c-mux.h> #include <linux/i2c.h> #include <linux/media-bus-format.h> #include <linux/module.h> #include <linux/regmap.h> #include <linux/regulator/consumer.h> #include <linux/clk.h> #include <drm/drm_atomic_helper.h> #include <drm/drm_bridge.h> #include <drm/drm_drv.h> #include <drm/drm_edid.h> #include <drm/drm_print.h> #include <drm/drm_probe_helper.h> #include <sound/hdmi-codec.h> #define SII902X_TPI_VIDEO_DATA 0x0 #define SII902X_TPI_PIXEL_REPETITION 0x8 #define SII902X_TPI_AVI_PIXEL_REP_BUS_24BIT BIT(5) #define SII902X_TPI_AVI_PIXEL_REP_RISING_EDGE BIT(4) #define SII902X_TPI_AVI_PIXEL_REP_4X 3 #define SII902X_TPI_AVI_PIXEL_REP_2X 1 #define SII902X_TPI_AVI_PIXEL_REP_NONE 0 #define SII902X_TPI_CLK_RATIO_HALF (0 << 6) #define SII902X_TPI_CLK_RATIO_1X (1 << 6) #define SII902X_TPI_CLK_RATIO_2X (2 << 6) #define SII902X_TPI_CLK_RATIO_4X (3 << 6) #define SII902X_TPI_AVI_IN_FORMAT 0x9 #define SII902X_TPI_AVI_INPUT_BITMODE_12BIT BIT(7) #define SII902X_TPI_AVI_INPUT_DITHER BIT(6) #define SII902X_TPI_AVI_INPUT_RANGE_LIMITED (2 << 2) #define SII902X_TPI_AVI_INPUT_RANGE_FULL (1 << 2) #define SII902X_TPI_AVI_INPUT_RANGE_AUTO (0 << 2) #define SII902X_TPI_AVI_INPUT_COLORSPACE_BLACK (3 << 0) #define SII902X_TPI_AVI_INPUT_COLORSPACE_YUV422 (2 << 0) #define SII902X_TPI_AVI_INPUT_COLORSPACE_YUV444 (1 << 0) #define SII902X_TPI_AVI_INPUT_COLORSPACE_RGB (0 << 0) #define SII902X_TPI_AVI_INFOFRAME 0x0c #define SII902X_SYS_CTRL_DATA 0x1a #define SII902X_SYS_CTRL_PWR_DWN BIT(4) #define SII902X_SYS_CTRL_AV_MUTE BIT(3) #define SII902X_SYS_CTRL_DDC_BUS_REQ BIT(2) #define SII902X_SYS_CTRL_DDC_BUS_GRTD BIT(1) #define SII902X_SYS_CTRL_OUTPUT_MODE BIT(0) #define SII902X_SYS_CTRL_OUTPUT_HDMI 1 #define SII902X_SYS_CTRL_OUTPUT_DVI 0 #define SII902X_REG_CHIPID(n) (0x1b + (n)) #define SII902X_PWR_STATE_CTRL 0x1e #define SII902X_AVI_POWER_STATE_MSK GENMASK(1, 0) #define SII902X_AVI_POWER_STATE_D(l) ((l) & SII902X_AVI_POWER_STATE_MSK) /* Audio */ #define SII902X_TPI_I2S_ENABLE_MAPPING_REG 0x1f #define SII902X_TPI_I2S_CONFIG_FIFO0 (0 << 0) #define SII902X_TPI_I2S_CONFIG_FIFO1 (1 << 0) #define SII902X_TPI_I2S_CONFIG_FIFO2 (2 << 0) #define SII902X_TPI_I2S_CONFIG_FIFO3 (3 << 0) #define SII902X_TPI_I2S_LEFT_RIGHT_SWAP (1 << 2) #define SII902X_TPI_I2S_AUTO_DOWNSAMPLE (1 << 3) #define SII902X_TPI_I2S_SELECT_SD0 (0 << 4) #define SII902X_TPI_I2S_SELECT_SD1 (1 << 4) #define SII902X_TPI_I2S_SELECT_SD2 (2 << 4) #define SII902X_TPI_I2S_SELECT_SD3 (3 << 4) #define SII902X_TPI_I2S_FIFO_ENABLE (1 << 7) #define SII902X_TPI_I2S_INPUT_CONFIG_REG 0x20 #define SII902X_TPI_I2S_FIRST_BIT_SHIFT_YES (0 << 0) #define SII902X_TPI_I2S_FIRST_BIT_SHIFT_NO (1 << 0) #define SII902X_TPI_I2S_SD_DIRECTION_MSB_FIRST (0 << 1) #define SII902X_TPI_I2S_SD_DIRECTION_LSB_FIRST (1 << 1) #define SII902X_TPI_I2S_SD_JUSTIFY_LEFT (0 << 2) #define SII902X_TPI_I2S_SD_JUSTIFY_RIGHT (1 << 2) #define SII902X_TPI_I2S_WS_POLARITY_LOW (0 << 3) #define SII902X_TPI_I2S_WS_POLARITY_HIGH (1 << 3) #define SII902X_TPI_I2S_MCLK_MULTIPLIER_128 (0 << 4) #define SII902X_TPI_I2S_MCLK_MULTIPLIER_256 (1 << 4) #define SII902X_TPI_I2S_MCLK_MULTIPLIER_384 (2 << 4) #define SII902X_TPI_I2S_MCLK_MULTIPLIER_512 (3 << 4) #define SII902X_TPI_I2S_MCLK_MULTIPLIER_768 (4 << 4) #define SII902X_TPI_I2S_MCLK_MULTIPLIER_1024 (5 << 4) #define SII902X_TPI_I2S_MCLK_MULTIPLIER_1152 (6 << 4) #define SII902X_TPI_I2S_MCLK_MULTIPLIER_192 (7 << 4) #define SII902X_TPI_I2S_SCK_EDGE_FALLING (0 << 7) #define SII902X_TPI_I2S_SCK_EDGE_RISING (1 << 7) #define SII902X_TPI_I2S_STRM_HDR_BASE 0x21 #define SII902X_TPI_I2S_STRM_HDR_SIZE 5 #define SII902X_TPI_AUDIO_CONFIG_BYTE2_REG 0x26 #define SII902X_TPI_AUDIO_CODING_STREAM_HEADER (0 << 0) #define SII902X_TPI_AUDIO_CODING_PCM (1 << 0) #define SII902X_TPI_AUDIO_CODING_AC3 (2 << 0) #define SII902X_TPI_AUDIO_CODING_MPEG1 (3 << 0) #define SII902X_TPI_AUDIO_CODING_MP3 (4 << 0) #define SII902X_TPI_AUDIO_CODING_MPEG2 (5 << 0) #define SII902X_TPI_AUDIO_CODING_AAC (6 << 0) #define SII902X_TPI_AUDIO_CODING_DTS (7 << 0) #define SII902X_TPI_AUDIO_CODING_ATRAC (8 << 0) #define SII902X_TPI_AUDIO_MUTE_DISABLE (0 << 4) #define SII902X_TPI_AUDIO_MUTE_ENABLE (1 << 4) #define SII902X_TPI_AUDIO_LAYOUT_2_CHANNELS (0 << 5) #define SII902X_TPI_AUDIO_LAYOUT_8_CHANNELS (1 << 5) #define SII902X_TPI_AUDIO_INTERFACE_DISABLE (0 << 6) #define SII902X_TPI_AUDIO_INTERFACE_SPDIF (1 << 6) #define SII902X_TPI_AUDIO_INTERFACE_I2S (2 << 6) #define SII902X_TPI_AUDIO_CONFIG_BYTE3_REG 0x27 #define SII902X_TPI_AUDIO_FREQ_STREAM (0 << 3) #define SII902X_TPI_AUDIO_FREQ_32KHZ (1 << 3) #define SII902X_TPI_AUDIO_FREQ_44KHZ (2 << 3) #define SII902X_TPI_AUDIO_FREQ_48KHZ (3 << 3) #define SII902X_TPI_AUDIO_FREQ_88KHZ (4 << 3) #define SII902X_TPI_AUDIO_FREQ_96KHZ (5 << 3) #define SII902X_TPI_AUDIO_FREQ_176KHZ (6 << 3) #define SII902X_TPI_AUDIO_FREQ_192KHZ (7 << 3) #define SII902X_TPI_AUDIO_SAMPLE_SIZE_STREAM (0 << 6) #define SII902X_TPI_AUDIO_SAMPLE_SIZE_16 (1 << 6) #define SII902X_TPI_AUDIO_SAMPLE_SIZE_20 (2 << 6) #define SII902X_TPI_AUDIO_SAMPLE_SIZE_24 (3 << 6) #define SII902X_TPI_AUDIO_CONFIG_BYTE4_REG 0x28 #define SII902X_INT_ENABLE 0x3c #define SII902X_INT_STATUS 0x3d #define SII902X_HOTPLUG_EVENT BIT(0) #define SII902X_PLUGGED_STATUS BIT(2) #define SII902X_REG_TPI_RQB 0xc7 /* Indirect internal register access */ #define SII902X_IND_SET_PAGE 0xbc #define SII902X_IND_OFFSET 0xbd #define SII902X_IND_VALUE 0xbe #define SII902X_TPI_MISC_INFOFRAME_BASE 0xbf #define SII902X_TPI_MISC_INFOFRAME_END 0xde #define SII902X_TPI_MISC_INFOFRAME_SIZE \ (SII902X_TPI_MISC_INFOFRAME_END - SII902X_TPI_MISC_INFOFRAME_BASE) #define SII902X_I2C_BUS_ACQUISITION_TIMEOUT_MS 500 #define SII902X_AUDIO_PORT_INDEX 3 struct sii902x { struct i2c_client *i2c; struct regmap *regmap; struct drm_bridge bridge; struct drm_bridge *next_bridge; struct drm_connector connector; struct gpio_desc *reset_gpio; struct i2c_mux_core *i2cmux; bool sink_is_hdmi; /* * Mutex protects audio and video functions from interfering * each other, by keeping their i2c command sequences atomic. */ struct mutex mutex; struct sii902x_audio { struct platform_device *pdev; struct clk *mclk; u32 i2s_fifo_sequence[4]; } audio; }; static int sii902x_read_unlocked(struct i2c_client *i2c, u8 reg, u8 *val) { union i2c_smbus_data data; int ret; ret = __i2c_smbus_xfer(i2c->adapter, i2c->addr, i2c->flags, I2C_SMBUS_READ, reg, I2C_SMBUS_BYTE_DATA, &data); if (ret < 0) return ret; *val = data.byte; return 0; } static int sii902x_write_unlocked(struct i2c_client *i2c, u8 reg, u8 val) { union i2c_smbus_data data; data.byte = val; return __i2c_smbus_xfer(i2c->adapter, i2c->addr, i2c->flags, I2C_SMBUS_WRITE, reg, I2C_SMBUS_BYTE_DATA, &data); } static int sii902x_update_bits_unlocked(struct i2c_client *i2c, u8 reg, u8 mask, u8 val) { int ret; u8 status; ret = sii902x_read_unlocked(i2c, reg, &status); if (ret) return ret; status &= ~mask; status |= val & mask; return sii902x_write_unlocked(i2c, reg, status); } static inline struct sii902x *bridge_to_sii902x(struct drm_bridge *bridge) { return container_of(bridge, struct sii902x, bridge); } static inline struct sii902x *connector_to_sii902x(struct drm_connector *con) { return container_of(con, struct sii902x, connector); } static void sii902x_reset(struct sii902x *sii902x) { if (!sii902x->reset_gpio) return; gpiod_set_value_cansleep(sii902x->reset_gpio, 1); /* The datasheet says treset-min = 100us. Make it 150us to be sure. */ usleep_range(150, 200); gpiod_set_value_cansleep(sii902x->reset_gpio, 0); } static enum drm_connector_status sii902x_detect(struct sii902x *sii902x) { unsigned int status; mutex_lock(&sii902x->mutex); regmap_read(sii902x->regmap, SII902X_INT_STATUS, &status); mutex_unlock(&sii902x->mutex); return (status & SII902X_PLUGGED_STATUS) ? connector_status_connected : connector_status_disconnected; } static enum drm_connector_status sii902x_connector_detect(struct drm_connector *connector, bool force) { struct sii902x *sii902x = connector_to_sii902x(connector); return sii902x_detect(sii902x); } static const struct drm_connector_funcs sii902x_connector_funcs = { .detect = sii902x_connector_detect, .fill_modes = drm_helper_probe_single_connector_modes, .destroy = drm_connector_cleanup, .reset = drm_atomic_helper_connector_reset, .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state, .atomic_destroy_state = drm_atomic_helper_connector_destroy_state, }; static struct edid *sii902x_get_edid(struct sii902x *sii902x, struct drm_connector *connector) { struct edid *edid; mutex_lock(&sii902x->mutex); edid = drm_get_edid(connector, sii902x->i2cmux->adapter[0]); if (edid) { if (drm_detect_hdmi_monitor(edid)) sii902x->sink_is_hdmi = true; else sii902x->sink_is_hdmi = false; } mutex_unlock(&sii902x->mutex); return edid; } static int sii902x_get_modes(struct drm_connector *connector) { struct sii902x *sii902x = connector_to_sii902x(connector); struct edid *edid; int num = 0; edid = sii902x_get_edid(sii902x, connector); drm_connector_update_edid_property(connector, edid); if (edid) { num = drm_add_edid_modes(connector, edid); kfree(edid); } return num; } static enum drm_mode_status sii902x_mode_valid(struct drm_connector *connector, struct drm_display_mode *mode) { /* TODO: check mode */ return MODE_OK; } static const struct drm_connector_helper_funcs sii902x_connector_helper_funcs = { .get_modes = sii902x_get_modes, .mode_valid = sii902x_mode_valid, }; static void sii902x_bridge_disable(struct drm_bridge *bridge) { struct sii902x *sii902x = bridge_to_sii902x(bridge); mutex_lock(&sii902x->mutex); regmap_update_bits(sii902x->regmap, SII902X_SYS_CTRL_DATA, SII902X_SYS_CTRL_PWR_DWN, SII902X_SYS_CTRL_PWR_DWN); mutex_unlock(&sii902x->mutex); } static void sii902x_bridge_enable(struct drm_bridge *bridge) { struct sii902x *sii902x = bridge_to_sii902x(bridge); mutex_lock(&sii902x->mutex); regmap_update_bits(sii902x->regmap, SII902X_PWR_STATE_CTRL, SII902X_AVI_POWER_STATE_MSK, SII902X_AVI_POWER_STATE_D(0)); regmap_update_bits(sii902x->regmap, SII902X_SYS_CTRL_DATA, SII902X_SYS_CTRL_PWR_DWN, 0); mutex_unlock(&sii902x->mutex); } static void sii902x_bridge_mode_set(struct drm_bridge *bridge, const struct drm_display_mode *mode, const struct drm_display_mode *adj) { struct sii902x *sii902x = bridge_to_sii902x(bridge); u8 output_mode = SII902X_SYS_CTRL_OUTPUT_DVI; struct regmap *regmap = sii902x->regmap; u8 buf[HDMI_INFOFRAME_SIZE(AVI)]; struct hdmi_avi_infoframe frame; u16 pixel_clock_10kHz = adj->clock / 10; int ret; if (sii902x->sink_is_hdmi) output_mode = SII902X_SYS_CTRL_OUTPUT_HDMI; buf[0] = pixel_clock_10kHz & 0xff; buf[1] = pixel_clock_10kHz >> 8; buf[2] = drm_mode_vrefresh(adj); buf[3] = 0x00; buf[4] = adj->hdisplay; buf[5] = adj->hdisplay >> 8; buf[6] = adj->vdisplay; buf[7] = adj->vdisplay >> 8; buf[8] = SII902X_TPI_CLK_RATIO_1X | SII902X_TPI_AVI_PIXEL_REP_NONE | SII902X_TPI_AVI_PIXEL_REP_BUS_24BIT; buf[9] = SII902X_TPI_AVI_INPUT_RANGE_AUTO | SII902X_TPI_AVI_INPUT_COLORSPACE_RGB; mutex_lock(&sii902x->mutex); ret = regmap_update_bits(sii902x->regmap, SII902X_SYS_CTRL_DATA, SII902X_SYS_CTRL_OUTPUT_MODE, output_mode); if (ret) goto out; ret = regmap_bulk_write(regmap, SII902X_TPI_VIDEO_DATA, buf, 10); if (ret) goto out; ret = drm_hdmi_avi_infoframe_from_display_mode(&frame, &sii902x->connector, adj); if (ret < 0) { DRM_ERROR("couldn't fill AVI infoframe\n"); goto out; } ret = hdmi_avi_infoframe_pack(&frame, buf, sizeof(buf)); if (ret < 0) { DRM_ERROR("failed to pack AVI infoframe: %d\n", ret); goto out; } /* Do not send the infoframe header, but keep the CRC field. */ regmap_bulk_write(regmap, SII902X_TPI_AVI_INFOFRAME, buf + HDMI_INFOFRAME_HEADER_SIZE - 1, HDMI_AVI_INFOFRAME_SIZE + 1); out: mutex_unlock(&sii902x->mutex); } static int sii902x_bridge_attach(struct drm_bridge *bridge, enum drm_bridge_attach_flags flags) { struct sii902x *sii902x = bridge_to_sii902x(bridge); u32 bus_format = MEDIA_BUS_FMT_RGB888_1X24; struct drm_device *drm = bridge->dev; int ret; if (flags & DRM_BRIDGE_ATTACH_NO_CONNECTOR) return drm_bridge_attach(bridge->encoder, sii902x->next_bridge, bridge, flags); drm_connector_helper_add(&sii902x->connector, &sii902x_connector_helper_funcs); if (!drm_core_check_feature(drm, DRIVER_ATOMIC)) { dev_err(&sii902x->i2c->dev, "sii902x driver is only compatible with DRM devices supporting atomic updates\n"); return -ENOTSUPP; } ret = drm_connector_init(drm, &sii902x->connector, &sii902x_connector_funcs, DRM_MODE_CONNECTOR_HDMIA); if (ret) return ret; if (sii902x->i2c->irq > 0) sii902x->connector.polled = DRM_CONNECTOR_POLL_HPD; else sii902x->connector.polled = DRM_CONNECTOR_POLL_CONNECT; ret = drm_display_info_set_bus_formats(&sii902x->connector.display_info, &bus_format, 1); if (ret) return ret; drm_connector_attach_encoder(&sii902x->connector, bridge->encoder); return 0; } static enum drm_connector_status sii902x_bridge_detect(struct drm_bridge *bridge) { struct sii902x *sii902x = bridge_to_sii902x(bridge); return sii902x_detect(sii902x); } static struct edid *sii902x_bridge_get_edid(struct drm_bridge *bridge, struct drm_connector *connector) { struct sii902x *sii902x = bridge_to_sii902x(bridge); return sii902x_get_edid(sii902x, connector); } static u32 *sii902x_bridge_atomic_get_input_bus_fmts(struct drm_bridge *bridge, struct drm_bridge_state *bridge_state, struct drm_crtc_state *crtc_state, struct drm_connector_state *conn_state, u32 output_fmt, unsigned int *num_input_fmts) { u32 *input_fmts; *num_input_fmts = 0; input_fmts = kcalloc(1, sizeof(*input_fmts), GFP_KERNEL); if (!input_fmts) return NULL; input_fmts[0] = MEDIA_BUS_FMT_RGB888_1X24; *num_input_fmts = 1; return input_fmts; } static int sii902x_bridge_atomic_check(struct drm_bridge *bridge, struct drm_bridge_state *bridge_state, struct drm_crtc_state *crtc_state, struct drm_connector_state *conn_state) { /* * There might be flags negotiation supported in future but * set the bus flags in atomic_check statically for now. */ bridge_state->input_bus_cfg.flags = bridge->timings->input_bus_flags; return 0; } static const struct drm_bridge_funcs sii902x_bridge_funcs = { .attach = sii902x_bridge_attach, .mode_set = sii902x_bridge_mode_set, .disable = sii902x_bridge_disable, .enable = sii902x_bridge_enable, .detect = sii902x_bridge_detect, .get_edid = sii902x_bridge_get_edid, .atomic_reset = drm_atomic_helper_bridge_reset, .atomic_duplicate_state = drm_atomic_helper_bridge_duplicate_state, .atomic_destroy_state = drm_atomic_helper_bridge_destroy_state, .atomic_get_input_bus_fmts = sii902x_bridge_atomic_get_input_bus_fmts, .atomic_check = sii902x_bridge_atomic_check, }; static int sii902x_mute(struct sii902x *sii902x, bool mute) { struct device *dev = &sii902x->i2c->dev; unsigned int val = mute ? SII902X_TPI_AUDIO_MUTE_ENABLE : SII902X_TPI_AUDIO_MUTE_DISABLE; dev_dbg(dev, "%s: %s\n", __func__, mute ? "Muted" : "Unmuted"); return regmap_update_bits(sii902x->regmap, SII902X_TPI_AUDIO_CONFIG_BYTE2_REG, SII902X_TPI_AUDIO_MUTE_ENABLE, val); } static const int sii902x_mclk_div_table[] = { 128, 256, 384, 512, 768, 1024, 1152, 192 }; static int sii902x_select_mclk_div(u8 *i2s_config_reg, unsigned int rate, unsigned int mclk) { int div = mclk / rate; int distance = 100000; u8 i, nearest = 0; for (i = 0; i < ARRAY_SIZE(sii902x_mclk_div_table); i++) { unsigned int d = abs(div - sii902x_mclk_div_table[i]); if (d >= distance) continue; nearest = i; distance = d; if (d == 0) break; } *i2s_config_reg |= nearest << 4; return sii902x_mclk_div_table[nearest]; } static const struct sii902x_sample_freq { u32 freq; u8 val; } sii902x_sample_freq[] = { { .freq = 32000, .val = SII902X_TPI_AUDIO_FREQ_32KHZ }, { .freq = 44000, .val = SII902X_TPI_AUDIO_FREQ_44KHZ }, { .freq = 48000, .val = SII902X_TPI_AUDIO_FREQ_48KHZ }, { .freq = 88000, .val = SII902X_TPI_AUDIO_FREQ_88KHZ }, { .freq = 96000, .val = SII902X_TPI_AUDIO_FREQ_96KHZ }, { .freq = 176000, .val = SII902X_TPI_AUDIO_FREQ_176KHZ }, { .freq = 192000, .val = SII902X_TPI_AUDIO_FREQ_192KHZ }, }; static int sii902x_audio_hw_params(struct device *dev, void *data, struct hdmi_codec_daifmt *daifmt, struct hdmi_codec_params *params) { struct sii902x *sii902x = dev_get_drvdata(dev); u8 i2s_config_reg = SII902X_TPI_I2S_SD_DIRECTION_MSB_FIRST; u8 config_byte2_reg = (SII902X_TPI_AUDIO_INTERFACE_I2S | SII902X_TPI_AUDIO_MUTE_ENABLE | SII902X_TPI_AUDIO_CODING_PCM); u8 config_byte3_reg = 0; u8 infoframe_buf[HDMI_INFOFRAME_SIZE(AUDIO)]; unsigned long mclk_rate; int i, ret; if (daifmt->bit_clk_provider || daifmt->frame_clk_provider) { dev_dbg(dev, "%s: I2S clock provider mode not supported\n", __func__); return -EINVAL; } switch (daifmt->fmt) { case HDMI_I2S: i2s_config_reg |= SII902X_TPI_I2S_FIRST_BIT_SHIFT_YES | SII902X_TPI_I2S_SD_JUSTIFY_LEFT; break; case HDMI_RIGHT_J: i2s_config_reg |= SII902X_TPI_I2S_SD_JUSTIFY_RIGHT; break; case HDMI_LEFT_J: i2s_config_reg |= SII902X_TPI_I2S_SD_JUSTIFY_LEFT; break; default: dev_dbg(dev, "%s: Unsupported i2s format %u\n", __func__, daifmt->fmt); return -EINVAL; } if (daifmt->bit_clk_inv) i2s_config_reg |= SII902X_TPI_I2S_SCK_EDGE_FALLING; else i2s_config_reg |= SII902X_TPI_I2S_SCK_EDGE_RISING; if (daifmt->frame_clk_inv) i2s_config_reg |= SII902X_TPI_I2S_WS_POLARITY_LOW; else i2s_config_reg |= SII902X_TPI_I2S_WS_POLARITY_HIGH; if (params->channels > 2) config_byte2_reg |= SII902X_TPI_AUDIO_LAYOUT_8_CHANNELS; else config_byte2_reg |= SII902X_TPI_AUDIO_LAYOUT_2_CHANNELS; switch (params->sample_width) { case 16: config_byte3_reg |= SII902X_TPI_AUDIO_SAMPLE_SIZE_16; break; case 20: config_byte3_reg |= SII902X_TPI_AUDIO_SAMPLE_SIZE_20; break; case 24: case 32: config_byte3_reg |= SII902X_TPI_AUDIO_SAMPLE_SIZE_24; break; default: dev_err(dev, "%s: Unsupported sample width %u\n", __func__, params->sample_width); return -EINVAL; } for (i = 0; i < ARRAY_SIZE(sii902x_sample_freq); i++) { if (params->sample_rate == sii902x_sample_freq[i].freq) { config_byte3_reg |= sii902x_sample_freq[i].val; break; } } ret = clk_prepare_enable(sii902x->audio.mclk); if (ret) { dev_err(dev, "Enabling mclk failed: %d\n", ret); return ret; } if (sii902x->audio.mclk) { mclk_rate = clk_get_rate(sii902x->audio.mclk); ret = sii902x_select_mclk_div(&i2s_config_reg, params->sample_rate, mclk_rate); if (mclk_rate != ret * params->sample_rate) dev_dbg(dev, "Inaccurate reference clock (%ld/%d != %u)\n", mclk_rate, ret, params->sample_rate); } mutex_lock(&sii902x->mutex); ret = regmap_write(sii902x->regmap, SII902X_TPI_AUDIO_CONFIG_BYTE2_REG, config_byte2_reg); if (ret < 0) goto out; ret = regmap_write(sii902x->regmap, SII902X_TPI_I2S_INPUT_CONFIG_REG, i2s_config_reg); if (ret) goto out; for (i = 0; i < ARRAY_SIZE(sii902x->audio.i2s_fifo_sequence) && sii902x->audio.i2s_fifo_sequence[i]; i++) regmap_write(sii902x->regmap, SII902X_TPI_I2S_ENABLE_MAPPING_REG, sii902x->audio.i2s_fifo_sequence[i]); ret = regmap_write(sii902x->regmap, SII902X_TPI_AUDIO_CONFIG_BYTE3_REG, config_byte3_reg); if (ret) goto out; ret = regmap_bulk_write(sii902x->regmap, SII902X_TPI_I2S_STRM_HDR_BASE, params->iec.status, min((size_t) SII902X_TPI_I2S_STRM_HDR_SIZE, sizeof(params->iec.status))); if (ret) goto out; ret = hdmi_audio_infoframe_pack(&params->cea, infoframe_buf, sizeof(infoframe_buf)); if (ret < 0) { dev_err(dev, "%s: Failed to pack audio infoframe: %d\n", __func__, ret); goto out; } ret = regmap_bulk_write(sii902x->regmap, SII902X_TPI_MISC_INFOFRAME_BASE, infoframe_buf, min(ret, SII902X_TPI_MISC_INFOFRAME_SIZE)); if (ret) goto out; /* Decode Level 0 Packets */ ret = regmap_write(sii902x->regmap, SII902X_IND_SET_PAGE, 0x02); if (ret) goto out; ret = regmap_write(sii902x->regmap, SII902X_IND_OFFSET, 0x24); if (ret) goto out; ret = regmap_write(sii902x->regmap, SII902X_IND_VALUE, 0x02); if (ret) goto out; dev_dbg(dev, "%s: hdmi audio enabled\n", __func__); out: mutex_unlock(&sii902x->mutex); if (ret) { clk_disable_unprepare(sii902x->audio.mclk); dev_err(dev, "%s: hdmi audio enable failed: %d\n", __func__, ret); } return ret; } static void sii902x_audio_shutdown(struct device *dev, void *data) { struct sii902x *sii902x = dev_get_drvdata(dev); mutex_lock(&sii902x->mutex); regmap_write(sii902x->regmap, SII902X_TPI_AUDIO_CONFIG_BYTE2_REG, SII902X_TPI_AUDIO_INTERFACE_DISABLE); mutex_unlock(&sii902x->mutex); clk_disable_unprepare(sii902x->audio.mclk); } static int sii902x_audio_mute(struct device *dev, void *data, bool enable, int direction) { struct sii902x *sii902x = dev_get_drvdata(dev); mutex_lock(&sii902x->mutex); sii902x_mute(sii902x, enable); mutex_unlock(&sii902x->mutex); return 0; } static int sii902x_audio_get_eld(struct device *dev, void *data, uint8_t *buf, size_t len) { struct sii902x *sii902x = dev_get_drvdata(dev); mutex_lock(&sii902x->mutex); memcpy(buf, sii902x->connector.eld, min(sizeof(sii902x->connector.eld), len)); mutex_unlock(&sii902x->mutex); return 0; } static int sii902x_audio_get_dai_id(struct snd_soc_component *component, struct device_node *endpoint) { struct of_endpoint of_ep; int ret; ret = of_graph_parse_endpoint(endpoint, &of_ep); if (ret < 0) return ret; /* * HDMI sound should be located at reg = <3> * Return expected DAI index 0. */ if (of_ep.port == SII902X_AUDIO_PORT_INDEX) return 0; return -EINVAL; } static const struct hdmi_codec_ops sii902x_audio_codec_ops = { .hw_params = sii902x_audio_hw_params, .audio_shutdown = sii902x_audio_shutdown, .mute_stream = sii902x_audio_mute, .get_eld = sii902x_audio_get_eld, .get_dai_id = sii902x_audio_get_dai_id, .no_capture_mute = 1, }; static int sii902x_audio_codec_init(struct sii902x *sii902x, struct device *dev) { static const u8 audio_fifo_id[] = { SII902X_TPI_I2S_CONFIG_FIFO0, SII902X_TPI_I2S_CONFIG_FIFO1, SII902X_TPI_I2S_CONFIG_FIFO2, SII902X_TPI_I2S_CONFIG_FIFO3, }; static const u8 i2s_lane_id[] = { SII902X_TPI_I2S_SELECT_SD0, SII902X_TPI_I2S_SELECT_SD1, SII902X_TPI_I2S_SELECT_SD2, SII902X_TPI_I2S_SELECT_SD3, }; struct hdmi_codec_pdata codec_data = { .ops = &sii902x_audio_codec_ops, .i2s = 1, /* Only i2s support for now. */ .spdif = 0, .max_i2s_channels = 0, }; u8 lanes[4]; int num_lanes, i; if (!of_property_read_bool(dev->of_node, "#sound-dai-cells")) { dev_dbg(dev, "%s: No \"#sound-dai-cells\", no audio\n", __func__); return 0; } num_lanes = of_property_read_variable_u8_array(dev->of_node, "sil,i2s-data-lanes", lanes, 1, ARRAY_SIZE(lanes)); if (num_lanes == -EINVAL) { dev_dbg(dev, "%s: No \"sil,i2s-data-lanes\", use default <0>\n", __func__); num_lanes = 1; lanes[0] = 0; } else if (num_lanes < 0) { dev_err(dev, "%s: Error gettin \"sil,i2s-data-lanes\": %d\n", __func__, num_lanes); return num_lanes; } codec_data.max_i2s_channels = 2 * num_lanes; for (i = 0; i < num_lanes; i++) sii902x->audio.i2s_fifo_sequence[i] |= audio_fifo_id[i] | i2s_lane_id[lanes[i]] | SII902X_TPI_I2S_FIFO_ENABLE; sii902x->audio.mclk = devm_clk_get_optional(dev, "mclk"); if (IS_ERR(sii902x->audio.mclk)) { dev_err(dev, "%s: No clock (audio mclk) found: %ld\n", __func__, PTR_ERR(sii902x->audio.mclk)); return PTR_ERR(sii902x->audio.mclk); } sii902x->audio.pdev = platform_device_register_data( dev, HDMI_CODEC_DRV_NAME, PLATFORM_DEVID_AUTO, &codec_data, sizeof(codec_data)); return PTR_ERR_OR_ZERO(sii902x->audio.pdev); } static const struct regmap_range sii902x_volatile_ranges[] = { { .range_min = 0, .range_max = 0xff }, }; static const struct regmap_access_table sii902x_volatile_table = { .yes_ranges = sii902x_volatile_ranges, .n_yes_ranges = ARRAY_SIZE(sii902x_volatile_ranges), }; static const struct regmap_config sii902x_regmap_config = { .reg_bits = 8, .val_bits = 8, .disable_locking = true, /* struct sii902x mutex should be enough */ .max_register = SII902X_TPI_MISC_INFOFRAME_END, .volatile_table = &sii902x_volatile_table, .cache_type = REGCACHE_NONE, }; static irqreturn_t sii902x_interrupt(int irq, void *data) { struct sii902x *sii902x = data; unsigned int status = 0; mutex_lock(&sii902x->mutex); regmap_read(sii902x->regmap, SII902X_INT_STATUS, &status); regmap_write(sii902x->regmap, SII902X_INT_STATUS, status); mutex_unlock(&sii902x->mutex); if ((status & SII902X_HOTPLUG_EVENT) && sii902x->bridge.dev) { drm_helper_hpd_irq_event(sii902x->bridge.dev); drm_bridge_hpd_notify(&sii902x->bridge, (status & SII902X_PLUGGED_STATUS) ? connector_status_connected : connector_status_disconnected); } return IRQ_HANDLED; } /* * The purpose of sii902x_i2c_bypass_select is to enable the pass through * mode of the HDMI transmitter. Do not use regmap from within this function, * only use sii902x_*_unlocked functions to read/modify/write registers. * We are holding the parent adapter lock here, keep this in mind before * adding more i2c transactions. * * Also, since SII902X_SYS_CTRL_DATA is used with regmap_update_bits elsewhere * in this driver, we need to make sure that we only touch 0x1A[2:1] from * within sii902x_i2c_bypass_select and sii902x_i2c_bypass_deselect, and that * we leave the remaining bits as we have found them. */ static int sii902x_i2c_bypass_select(struct i2c_mux_core *mux, u32 chan_id) { struct sii902x *sii902x = i2c_mux_priv(mux); struct device *dev = &sii902x->i2c->dev; unsigned long timeout; u8 status; int ret; ret = sii902x_update_bits_unlocked(sii902x->i2c, SII902X_SYS_CTRL_DATA, SII902X_SYS_CTRL_DDC_BUS_REQ, SII902X_SYS_CTRL_DDC_BUS_REQ); if (ret) return ret; timeout = jiffies + msecs_to_jiffies(SII902X_I2C_BUS_ACQUISITION_TIMEOUT_MS); do { ret = sii902x_read_unlocked(sii902x->i2c, SII902X_SYS_CTRL_DATA, &status); if (ret) return ret; } while (!(status & SII902X_SYS_CTRL_DDC_BUS_GRTD) && time_before(jiffies, timeout)); if (!(status & SII902X_SYS_CTRL_DDC_BUS_GRTD)) { dev_err(dev, "Failed to acquire the i2c bus\n"); return -ETIMEDOUT; } return sii902x_write_unlocked(sii902x->i2c, SII902X_SYS_CTRL_DATA, status); } /* * The purpose of sii902x_i2c_bypass_deselect is to disable the pass through * mode of the HDMI transmitter. Do not use regmap from within this function, * only use sii902x_*_unlocked functions to read/modify/write registers. * We are holding the parent adapter lock here, keep this in mind before * adding more i2c transactions. * * Also, since SII902X_SYS_CTRL_DATA is used with regmap_update_bits elsewhere * in this driver, we need to make sure that we only touch 0x1A[2:1] from * within sii902x_i2c_bypass_select and sii902x_i2c_bypass_deselect, and that * we leave the remaining bits as we have found them. */ static int sii902x_i2c_bypass_deselect(struct i2c_mux_core *mux, u32 chan_id) { struct sii902x *sii902x = i2c_mux_priv(mux); struct device *dev = &sii902x->i2c->dev; unsigned long timeout; unsigned int retries; u8 status; int ret; /* * When the HDMI transmitter is in pass through mode, we need an * (undocumented) additional delay between STOP and START conditions * to guarantee the bus won't get stuck. */ udelay(30); /* * Sometimes the I2C bus can stall after failure to use the * EDID channel. Retry a few times to see if things clear * up, else continue anyway. */ retries = 5; do { ret = sii902x_read_unlocked(sii902x->i2c, SII902X_SYS_CTRL_DATA, &status); retries--; } while (ret && retries); if (ret) { dev_err(dev, "failed to read status (%d)\n", ret); return ret; } ret = sii902x_update_bits_unlocked(sii902x->i2c, SII902X_SYS_CTRL_DATA, SII902X_SYS_CTRL_DDC_BUS_REQ | SII902X_SYS_CTRL_DDC_BUS_GRTD, 0); if (ret) return ret; timeout = jiffies + msecs_to_jiffies(SII902X_I2C_BUS_ACQUISITION_TIMEOUT_MS); do { ret = sii902x_read_unlocked(sii902x->i2c, SII902X_SYS_CTRL_DATA, &status); if (ret) return ret; } while (status & (SII902X_SYS_CTRL_DDC_BUS_REQ | SII902X_SYS_CTRL_DDC_BUS_GRTD) && time_before(jiffies, timeout)); if (status & (SII902X_SYS_CTRL_DDC_BUS_REQ | SII902X_SYS_CTRL_DDC_BUS_GRTD)) { dev_err(dev, "failed to release the i2c bus\n"); return -ETIMEDOUT; } return 0; } static const struct drm_bridge_timings default_sii902x_timings = { .input_bus_flags = DRM_BUS_FLAG_PIXDATA_SAMPLE_NEGEDGE | DRM_BUS_FLAG_SYNC_SAMPLE_NEGEDGE | DRM_BUS_FLAG_DE_HIGH, }; static int sii902x_init(struct sii902x *sii902x) { struct device *dev = &sii902x->i2c->dev; unsigned int status = 0; u8 chipid[4]; int ret; sii902x_reset(sii902x); ret = regmap_write(sii902x->regmap, SII902X_REG_TPI_RQB, 0x0); if (ret) return ret; ret = regmap_bulk_read(sii902x->regmap, SII902X_REG_CHIPID(0), &chipid, 4); if (ret) { dev_err(dev, "regmap_read failed %d\n", ret); return ret; } if (chipid[0] != 0xb0) { dev_err(dev, "Invalid chipid: %02x (expecting 0xb0)\n", chipid[0]); return -EINVAL; } /* Clear all pending interrupts */ regmap_read(sii902x->regmap, SII902X_INT_STATUS, &status); regmap_write(sii902x->regmap, SII902X_INT_STATUS, status); if (sii902x->i2c->irq > 0) { regmap_write(sii902x->regmap, SII902X_INT_ENABLE, SII902X_HOTPLUG_EVENT); ret = devm_request_threaded_irq(dev, sii902x->i2c->irq, NULL, sii902x_interrupt, IRQF_ONESHOT, dev_name(dev), sii902x); if (ret) return ret; } sii902x->bridge.funcs = &sii902x_bridge_funcs; sii902x->bridge.of_node = dev->of_node; sii902x->bridge.timings = &default_sii902x_timings; sii902x->bridge.ops = DRM_BRIDGE_OP_DETECT | DRM_BRIDGE_OP_EDID; if (sii902x->i2c->irq > 0) sii902x->bridge.ops |= DRM_BRIDGE_OP_HPD; drm_bridge_add(&sii902x->bridge); sii902x_audio_codec_init(sii902x, dev); i2c_set_clientdata(sii902x->i2c, sii902x); sii902x->i2cmux = i2c_mux_alloc(sii902x->i2c->adapter, dev, 1, 0, I2C_MUX_GATE, sii902x_i2c_bypass_select, sii902x_i2c_bypass_deselect); if (!sii902x->i2cmux) return -ENOMEM; sii902x->i2cmux->priv = sii902x; return i2c_mux_add_adapter(sii902x->i2cmux, 0, 0, 0); } static int sii902x_probe(struct i2c_client *client) { struct device *dev = &client->dev; struct device_node *endpoint; struct sii902x *sii902x; static const char * const supplies[] = {"iovcc", "cvcc12"}; int ret; ret = i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_BYTE_DATA); if (!ret) { dev_err(dev, "I2C adapter not suitable\n"); return -EIO; } sii902x = devm_kzalloc(dev, sizeof(*sii902x), GFP_KERNEL); if (!sii902x) return -ENOMEM; sii902x->i2c = client; sii902x->regmap = devm_regmap_init_i2c(client, &sii902x_regmap_config); if (IS_ERR(sii902x->regmap)) return PTR_ERR(sii902x->regmap); sii902x->reset_gpio = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_LOW); if (IS_ERR(sii902x->reset_gpio)) { dev_err(dev, "Failed to retrieve/request reset gpio: %ld\n", PTR_ERR(sii902x->reset_gpio)); return PTR_ERR(sii902x->reset_gpio); } endpoint = of_graph_get_endpoint_by_regs(dev->of_node, 1, -1); if (endpoint) { struct device_node *remote = of_graph_get_remote_port_parent(endpoint); of_node_put(endpoint); if (!remote) { dev_err(dev, "Endpoint in port@1 unconnected\n"); return -ENODEV; } if (!of_device_is_available(remote)) { dev_err(dev, "port@1 remote device is disabled\n"); of_node_put(remote); return -ENODEV; } sii902x->next_bridge = of_drm_find_bridge(remote); of_node_put(remote); if (!sii902x->next_bridge) return dev_err_probe(dev, -EPROBE_DEFER, "Failed to find remote bridge\n"); } mutex_init(&sii902x->mutex); ret = devm_regulator_bulk_get_enable(dev, ARRAY_SIZE(supplies), supplies); if (ret < 0) return dev_err_probe(dev, ret, "Failed to enable supplies"); return sii902x_init(sii902x); } static void sii902x_remove(struct i2c_client *client) { struct sii902x *sii902x = i2c_get_clientdata(client); i2c_mux_del_adapters(sii902x->i2cmux); drm_bridge_remove(&sii902x->bridge); } static const struct of_device_id sii902x_dt_ids[] = { { .compatible = "sil,sii9022", }, { } }; MODULE_DEVICE_TABLE(of, sii902x_dt_ids); static const struct i2c_device_id sii902x_i2c_ids[] = { { "sii9022", 0 }, { }, }; MODULE_DEVICE_TABLE(i2c, sii902x_i2c_ids); static struct i2c_driver sii902x_driver = { .probe = sii902x_probe, .remove = sii902x_remove, .driver = { .name = "sii902x", .of_match_table = sii902x_dt_ids, }, .id_table = sii902x_i2c_ids, }; module_i2c_driver(sii902x_driver); MODULE_AUTHOR("Boris Brezillon <[email protected]>"); MODULE_DESCRIPTION("SII902x RGB -> HDMI bridges"); MODULE_LICENSE("GPL");
linux-master
drivers/gpu/drm/bridge/sii902x.c
// SPDX-License-Identifier: GPL-2.0 /* * Copyright (c) 2018, The Linux Foundation. All rights reserved. * Copyright (c) 2019-2020. Linaro Limited. */ #include <linux/gpio/consumer.h> #include <linux/i2c.h> #include <linux/interrupt.h> #include <linux/media-bus-format.h> #include <linux/module.h> #include <linux/of_graph.h> #include <linux/platform_device.h> #include <linux/regmap.h> #include <linux/regulator/consumer.h> #include <sound/hdmi-codec.h> #include <drm/drm_atomic_helper.h> #include <drm/drm_bridge.h> #include <drm/drm_mipi_dsi.h> #include <drm/drm_of.h> #include <drm/drm_print.h> #include <drm/drm_probe_helper.h> #define EDID_SEG_SIZE 256 #define EDID_LEN 32 #define EDID_LOOP 8 #define KEY_DDC_ACCS_DONE 0x02 #define DDC_NO_ACK 0x50 #define LT9611_4LANES 0 struct lt9611 { struct device *dev; struct drm_bridge bridge; struct drm_bridge *next_bridge; struct regmap *regmap; struct device_node *dsi0_node; struct device_node *dsi1_node; struct mipi_dsi_device *dsi0; struct mipi_dsi_device *dsi1; struct platform_device *audio_pdev; bool ac_mode; struct gpio_desc *reset_gpio; struct gpio_desc *enable_gpio; bool power_on; bool sleep; struct regulator_bulk_data supplies[2]; struct i2c_client *client; enum drm_connector_status status; u8 edid_buf[EDID_SEG_SIZE]; }; #define LT9611_PAGE_CONTROL 0xff static const struct regmap_range_cfg lt9611_ranges[] = { { .name = "register_range", .range_min = 0, .range_max = 0x85ff, .selector_reg = LT9611_PAGE_CONTROL, .selector_mask = 0xff, .selector_shift = 0, .window_start = 0, .window_len = 0x100, }, }; static const struct regmap_config lt9611_regmap_config = { .reg_bits = 8, .val_bits = 8, .max_register = 0xffff, .ranges = lt9611_ranges, .num_ranges = ARRAY_SIZE(lt9611_ranges), }; static struct lt9611 *bridge_to_lt9611(struct drm_bridge *bridge) { return container_of(bridge, struct lt9611, bridge); } static int lt9611_mipi_input_analog(struct lt9611 *lt9611) { const struct reg_sequence reg_cfg[] = { { 0x8106, 0x40 }, /* port A rx current */ { 0x810a, 0xfe }, /* port A ldo voltage set */ { 0x810b, 0xbf }, /* enable port A lprx */ { 0x8111, 0x40 }, /* port B rx current */ { 0x8115, 0xfe }, /* port B ldo voltage set */ { 0x8116, 0xbf }, /* enable port B lprx */ { 0x811c, 0x03 }, /* PortA clk lane no-LP mode */ { 0x8120, 0x03 }, /* PortB clk lane with-LP mode */ }; return regmap_multi_reg_write(lt9611->regmap, reg_cfg, ARRAY_SIZE(reg_cfg)); } static int lt9611_mipi_input_digital(struct lt9611 *lt9611, const struct drm_display_mode *mode) { struct reg_sequence reg_cfg[] = { { 0x8300, LT9611_4LANES }, { 0x830a, 0x00 }, { 0x824f, 0x80 }, { 0x8250, 0x10 }, { 0x8302, 0x0a }, { 0x8306, 0x0a }, }; if (lt9611->dsi1_node) reg_cfg[1].def = 0x03; return regmap_multi_reg_write(lt9611->regmap, reg_cfg, ARRAY_SIZE(reg_cfg)); } static void lt9611_mipi_video_setup(struct lt9611 *lt9611, const struct drm_display_mode *mode) { u32 h_total, hactive, hsync_len, hfront_porch, hsync_porch; u32 v_total, vactive, vsync_len, vfront_porch, vsync_porch; h_total = mode->htotal; v_total = mode->vtotal; hactive = mode->hdisplay; hsync_len = mode->hsync_end - mode->hsync_start; hfront_porch = mode->hsync_start - mode->hdisplay; hsync_porch = mode->htotal - mode->hsync_start; vactive = mode->vdisplay; vsync_len = mode->vsync_end - mode->vsync_start; vfront_porch = mode->vsync_start - mode->vdisplay; vsync_porch = mode->vtotal - mode->vsync_start; regmap_write(lt9611->regmap, 0x830d, (u8)(v_total / 256)); regmap_write(lt9611->regmap, 0x830e, (u8)(v_total % 256)); regmap_write(lt9611->regmap, 0x830f, (u8)(vactive / 256)); regmap_write(lt9611->regmap, 0x8310, (u8)(vactive % 256)); regmap_write(lt9611->regmap, 0x8311, (u8)(h_total / 256)); regmap_write(lt9611->regmap, 0x8312, (u8)(h_total % 256)); regmap_write(lt9611->regmap, 0x8313, (u8)(hactive / 256)); regmap_write(lt9611->regmap, 0x8314, (u8)(hactive % 256)); regmap_write(lt9611->regmap, 0x8315, (u8)(vsync_len % 256)); regmap_write(lt9611->regmap, 0x8316, (u8)(hsync_len % 256)); regmap_write(lt9611->regmap, 0x8317, (u8)(vfront_porch % 256)); regmap_write(lt9611->regmap, 0x8318, (u8)(vsync_porch % 256)); regmap_write(lt9611->regmap, 0x8319, (u8)(hfront_porch % 256)); regmap_write(lt9611->regmap, 0x831a, (u8)(hsync_porch / 256) | ((hfront_porch / 256) << 4)); regmap_write(lt9611->regmap, 0x831b, (u8)(hsync_porch % 256)); } static void lt9611_pcr_setup(struct lt9611 *lt9611, const struct drm_display_mode *mode, unsigned int postdiv) { unsigned int pcr_m = mode->clock * 5 * postdiv / 27000; const struct reg_sequence reg_cfg[] = { { 0x830b, 0x01 }, { 0x830c, 0x10 }, { 0x8348, 0x00 }, { 0x8349, 0x81 }, /* stage 1 */ { 0x8321, 0x4a }, { 0x8324, 0x71 }, { 0x8325, 0x30 }, { 0x832a, 0x01 }, /* stage 2 */ { 0x834a, 0x40 }, /* MK limit */ { 0x832d, 0x38 }, { 0x8331, 0x08 }, }; u8 pol = 0x10; if (mode->flags & DRM_MODE_FLAG_NHSYNC) pol |= 0x2; if (mode->flags & DRM_MODE_FLAG_NVSYNC) pol |= 0x1; regmap_write(lt9611->regmap, 0x831d, pol); regmap_multi_reg_write(lt9611->regmap, reg_cfg, ARRAY_SIZE(reg_cfg)); if (lt9611->dsi1_node) { unsigned int hact = mode->hdisplay; hact >>= 2; hact += 0x50; hact = min(hact, 0x3e0U); regmap_write(lt9611->regmap, 0x830b, hact / 256); regmap_write(lt9611->regmap, 0x830c, hact % 256); regmap_write(lt9611->regmap, 0x8348, hact / 256); regmap_write(lt9611->regmap, 0x8349, hact % 256); } regmap_write(lt9611->regmap, 0x8326, pcr_m); /* pcr rst */ regmap_write(lt9611->regmap, 0x8011, 0x5a); regmap_write(lt9611->regmap, 0x8011, 0xfa); } static int lt9611_pll_setup(struct lt9611 *lt9611, const struct drm_display_mode *mode, unsigned int *postdiv) { unsigned int pclk = mode->clock; const struct reg_sequence reg_cfg[] = { /* txpll init */ { 0x8123, 0x40 }, { 0x8124, 0x64 }, { 0x8125, 0x80 }, { 0x8126, 0x55 }, { 0x812c, 0x37 }, { 0x812f, 0x01 }, { 0x8126, 0x55 }, { 0x8127, 0x66 }, { 0x8128, 0x88 }, { 0x812a, 0x20 }, }; regmap_multi_reg_write(lt9611->regmap, reg_cfg, ARRAY_SIZE(reg_cfg)); if (pclk > 150000) { regmap_write(lt9611->regmap, 0x812d, 0x88); *postdiv = 1; } else if (pclk > 70000) { regmap_write(lt9611->regmap, 0x812d, 0x99); *postdiv = 2; } else { regmap_write(lt9611->regmap, 0x812d, 0xaa); *postdiv = 4; } /* * first divide pclk by 2 first * - write divide by 64k to 19:16 bits which means shift by 17 * - write divide by 256 to 15:8 bits which means shift by 9 * - write remainder to 7:0 bits, which means shift by 1 */ regmap_write(lt9611->regmap, 0x82e3, pclk >> 17); /* pclk[19:16] */ regmap_write(lt9611->regmap, 0x82e4, pclk >> 9); /* pclk[15:8] */ regmap_write(lt9611->regmap, 0x82e5, pclk >> 1); /* pclk[7:0] */ regmap_write(lt9611->regmap, 0x82de, 0x20); regmap_write(lt9611->regmap, 0x82de, 0xe0); regmap_write(lt9611->regmap, 0x8016, 0xf1); regmap_write(lt9611->regmap, 0x8016, 0xf3); return 0; } static int lt9611_read_video_check(struct lt9611 *lt9611, unsigned int reg) { unsigned int temp, temp2; int ret; ret = regmap_read(lt9611->regmap, reg, &temp); if (ret) return ret; temp <<= 8; ret = regmap_read(lt9611->regmap, reg + 1, &temp2); if (ret) return ret; return (temp + temp2); } static int lt9611_video_check(struct lt9611 *lt9611) { u32 v_total, vactive, hactive_a, hactive_b, h_total_sysclk; int temp; /* top module video check */ /* vactive */ temp = lt9611_read_video_check(lt9611, 0x8282); if (temp < 0) goto end; vactive = temp; /* v_total */ temp = lt9611_read_video_check(lt9611, 0x826c); if (temp < 0) goto end; v_total = temp; /* h_total_sysclk */ temp = lt9611_read_video_check(lt9611, 0x8286); if (temp < 0) goto end; h_total_sysclk = temp; /* hactive_a */ temp = lt9611_read_video_check(lt9611, 0x8382); if (temp < 0) goto end; hactive_a = temp / 3; /* hactive_b */ temp = lt9611_read_video_check(lt9611, 0x8386); if (temp < 0) goto end; hactive_b = temp / 3; dev_info(lt9611->dev, "video check: hactive_a=%d, hactive_b=%d, vactive=%d, v_total=%d, h_total_sysclk=%d\n", hactive_a, hactive_b, vactive, v_total, h_total_sysclk); return 0; end: dev_err(lt9611->dev, "read video check error\n"); return temp; } static void lt9611_hdmi_set_infoframes(struct lt9611 *lt9611, struct drm_connector *connector, struct drm_display_mode *mode) { union hdmi_infoframe infoframe; ssize_t len; u8 iframes = 0x0a; /* UD1 infoframe */ u8 buf[32]; int ret; int i; ret = drm_hdmi_avi_infoframe_from_display_mode(&infoframe.avi, connector, mode); if (ret < 0) goto out; len = hdmi_infoframe_pack(&infoframe, buf, sizeof(buf)); if (len < 0) goto out; for (i = 0; i < len; i++) regmap_write(lt9611->regmap, 0x8440 + i, buf[i]); ret = drm_hdmi_vendor_infoframe_from_display_mode(&infoframe.vendor.hdmi, connector, mode); if (ret < 0) goto out; len = hdmi_infoframe_pack(&infoframe, buf, sizeof(buf)); if (len < 0) goto out; for (i = 0; i < len; i++) regmap_write(lt9611->regmap, 0x8474 + i, buf[i]); iframes |= 0x20; out: regmap_write(lt9611->regmap, 0x843d, iframes); /* UD1 infoframe */ } static void lt9611_hdmi_tx_digital(struct lt9611 *lt9611, bool is_hdmi) { if (is_hdmi) regmap_write(lt9611->regmap, 0x82d6, 0x8c); else regmap_write(lt9611->regmap, 0x82d6, 0x0c); regmap_write(lt9611->regmap, 0x82d7, 0x04); } static void lt9611_hdmi_tx_phy(struct lt9611 *lt9611) { struct reg_sequence reg_cfg[] = { { 0x8130, 0x6a }, { 0x8131, 0x44 }, /* HDMI DC mode */ { 0x8132, 0x4a }, { 0x8133, 0x0b }, { 0x8134, 0x00 }, { 0x8135, 0x00 }, { 0x8136, 0x00 }, { 0x8137, 0x44 }, { 0x813f, 0x0f }, { 0x8140, 0xa0 }, { 0x8141, 0xa0 }, { 0x8142, 0xa0 }, { 0x8143, 0xa0 }, { 0x8144, 0x0a }, }; /* HDMI AC mode */ if (lt9611->ac_mode) reg_cfg[2].def = 0x73; regmap_multi_reg_write(lt9611->regmap, reg_cfg, ARRAY_SIZE(reg_cfg)); } static irqreturn_t lt9611_irq_thread_handler(int irq, void *dev_id) { struct lt9611 *lt9611 = dev_id; unsigned int irq_flag0 = 0; unsigned int irq_flag3 = 0; regmap_read(lt9611->regmap, 0x820f, &irq_flag3); regmap_read(lt9611->regmap, 0x820c, &irq_flag0); /* hpd changed low */ if (irq_flag3 & 0x80) { dev_info(lt9611->dev, "hdmi cable disconnected\n"); regmap_write(lt9611->regmap, 0x8207, 0xbf); regmap_write(lt9611->regmap, 0x8207, 0x3f); } /* hpd changed high */ if (irq_flag3 & 0x40) { dev_info(lt9611->dev, "hdmi cable connected\n"); regmap_write(lt9611->regmap, 0x8207, 0x7f); regmap_write(lt9611->regmap, 0x8207, 0x3f); } if (irq_flag3 & 0xc0 && lt9611->bridge.dev) drm_kms_helper_hotplug_event(lt9611->bridge.dev); /* video input changed */ if (irq_flag0 & 0x01) { dev_info(lt9611->dev, "video input changed\n"); regmap_write(lt9611->regmap, 0x829e, 0xff); regmap_write(lt9611->regmap, 0x829e, 0xf7); regmap_write(lt9611->regmap, 0x8204, 0xff); regmap_write(lt9611->regmap, 0x8204, 0xfe); } return IRQ_HANDLED; } static void lt9611_enable_hpd_interrupts(struct lt9611 *lt9611) { unsigned int val; regmap_read(lt9611->regmap, 0x8203, &val); val &= ~0xc0; regmap_write(lt9611->regmap, 0x8203, val); regmap_write(lt9611->regmap, 0x8207, 0xff); /* clear */ regmap_write(lt9611->regmap, 0x8207, 0x3f); } static void lt9611_sleep_setup(struct lt9611 *lt9611) { const struct reg_sequence sleep_setup[] = { { 0x8024, 0x76 }, { 0x8023, 0x01 }, { 0x8157, 0x03 }, /* set addr pin as output */ { 0x8149, 0x0b }, { 0x8102, 0x48 }, /* MIPI Rx power down */ { 0x8123, 0x80 }, { 0x8130, 0x00 }, { 0x8011, 0x0a }, }; regmap_multi_reg_write(lt9611->regmap, sleep_setup, ARRAY_SIZE(sleep_setup)); lt9611->sleep = true; } static int lt9611_power_on(struct lt9611 *lt9611) { int ret; const struct reg_sequence seq[] = { /* LT9611_System_Init */ { 0x8101, 0x18 }, /* sel xtal clock */ /* timer for frequency meter */ { 0x821b, 0x69 }, /* timer 2 */ { 0x821c, 0x78 }, { 0x82cb, 0x69 }, /* timer 1 */ { 0x82cc, 0x78 }, /* irq init */ { 0x8251, 0x01 }, { 0x8258, 0x0a }, /* hpd irq */ { 0x8259, 0x80 }, /* hpd debounce width */ { 0x829e, 0xf7 }, /* video check irq */ /* power consumption for work */ { 0x8004, 0xf0 }, { 0x8006, 0xf0 }, { 0x800a, 0x80 }, { 0x800b, 0x40 }, { 0x800d, 0xef }, { 0x8011, 0xfa }, }; if (lt9611->power_on) return 0; ret = regmap_multi_reg_write(lt9611->regmap, seq, ARRAY_SIZE(seq)); if (!ret) lt9611->power_on = true; return ret; } static int lt9611_power_off(struct lt9611 *lt9611) { int ret; ret = regmap_write(lt9611->regmap, 0x8130, 0x6a); if (!ret) lt9611->power_on = false; return ret; } static void lt9611_reset(struct lt9611 *lt9611) { gpiod_set_value_cansleep(lt9611->reset_gpio, 1); msleep(20); gpiod_set_value_cansleep(lt9611->reset_gpio, 0); msleep(20); gpiod_set_value_cansleep(lt9611->reset_gpio, 1); msleep(100); } static void lt9611_assert_5v(struct lt9611 *lt9611) { if (!lt9611->enable_gpio) return; gpiod_set_value_cansleep(lt9611->enable_gpio, 1); msleep(20); } static int lt9611_regulator_init(struct lt9611 *lt9611) { int ret; lt9611->supplies[0].supply = "vdd"; lt9611->supplies[1].supply = "vcc"; ret = devm_regulator_bulk_get(lt9611->dev, 2, lt9611->supplies); if (ret < 0) return ret; return regulator_set_load(lt9611->supplies[0].consumer, 300000); } static int lt9611_regulator_enable(struct lt9611 *lt9611) { int ret; ret = regulator_enable(lt9611->supplies[0].consumer); if (ret < 0) return ret; usleep_range(1000, 10000); ret = regulator_enable(lt9611->supplies[1].consumer); if (ret < 0) { regulator_disable(lt9611->supplies[0].consumer); return ret; } return 0; } static enum drm_connector_status lt9611_bridge_detect(struct drm_bridge *bridge) { struct lt9611 *lt9611 = bridge_to_lt9611(bridge); unsigned int reg_val = 0; int connected = 0; regmap_read(lt9611->regmap, 0x825e, &reg_val); connected = (reg_val & (BIT(2) | BIT(0))); lt9611->status = connected ? connector_status_connected : connector_status_disconnected; return lt9611->status; } static int lt9611_read_edid(struct lt9611 *lt9611) { unsigned int temp; int ret = 0; int i, j; /* memset to clear old buffer, if any */ memset(lt9611->edid_buf, 0, sizeof(lt9611->edid_buf)); regmap_write(lt9611->regmap, 0x8503, 0xc9); /* 0xA0 is EDID device address */ regmap_write(lt9611->regmap, 0x8504, 0xa0); /* 0x00 is EDID offset address */ regmap_write(lt9611->regmap, 0x8505, 0x00); /* length for read */ regmap_write(lt9611->regmap, 0x8506, EDID_LEN); regmap_write(lt9611->regmap, 0x8514, 0x7f); for (i = 0; i < EDID_LOOP; i++) { /* offset address */ regmap_write(lt9611->regmap, 0x8505, i * EDID_LEN); regmap_write(lt9611->regmap, 0x8507, 0x36); regmap_write(lt9611->regmap, 0x8507, 0x31); regmap_write(lt9611->regmap, 0x8507, 0x37); usleep_range(5000, 10000); regmap_read(lt9611->regmap, 0x8540, &temp); if (temp & KEY_DDC_ACCS_DONE) { for (j = 0; j < EDID_LEN; j++) { regmap_read(lt9611->regmap, 0x8583, &temp); lt9611->edid_buf[i * EDID_LEN + j] = temp; } } else if (temp & DDC_NO_ACK) { /* DDC No Ack or Abitration lost */ dev_err(lt9611->dev, "read edid failed: no ack\n"); ret = -EIO; goto end; } else { dev_err(lt9611->dev, "read edid failed: access not done\n"); ret = -EIO; goto end; } } end: regmap_write(lt9611->regmap, 0x8507, 0x1f); return ret; } static int lt9611_get_edid_block(void *data, u8 *buf, unsigned int block, size_t len) { struct lt9611 *lt9611 = data; int ret; if (len > 128) return -EINVAL; /* supports up to 1 extension block */ /* TODO: add support for more extension blocks */ if (block > 1) return -EINVAL; if (block == 0) { ret = lt9611_read_edid(lt9611); if (ret) { dev_err(lt9611->dev, "edid read failed\n"); return ret; } } block %= 2; memcpy(buf, lt9611->edid_buf + (block * 128), len); return 0; } /* bridge funcs */ static void lt9611_bridge_atomic_enable(struct drm_bridge *bridge, struct drm_bridge_state *old_bridge_state) { struct lt9611 *lt9611 = bridge_to_lt9611(bridge); struct drm_atomic_state *state = old_bridge_state->base.state; struct drm_connector *connector; struct drm_connector_state *conn_state; struct drm_crtc_state *crtc_state; struct drm_display_mode *mode; unsigned int postdiv; connector = drm_atomic_get_new_connector_for_encoder(state, bridge->encoder); if (WARN_ON(!connector)) return; conn_state = drm_atomic_get_new_connector_state(state, connector); if (WARN_ON(!conn_state)) return; crtc_state = drm_atomic_get_new_crtc_state(state, conn_state->crtc); if (WARN_ON(!crtc_state)) return; mode = &crtc_state->adjusted_mode; lt9611_mipi_input_digital(lt9611, mode); lt9611_pll_setup(lt9611, mode, &postdiv); lt9611_mipi_video_setup(lt9611, mode); lt9611_pcr_setup(lt9611, mode, postdiv); if (lt9611_power_on(lt9611)) { dev_err(lt9611->dev, "power on failed\n"); return; } lt9611_mipi_input_analog(lt9611); lt9611_hdmi_set_infoframes(lt9611, connector, mode); lt9611_hdmi_tx_digital(lt9611, connector->display_info.is_hdmi); lt9611_hdmi_tx_phy(lt9611); msleep(500); lt9611_video_check(lt9611); /* Enable HDMI output */ regmap_write(lt9611->regmap, 0x8130, 0xea); } static void lt9611_bridge_atomic_disable(struct drm_bridge *bridge, struct drm_bridge_state *old_bridge_state) { struct lt9611 *lt9611 = bridge_to_lt9611(bridge); int ret; /* Disable HDMI output */ ret = regmap_write(lt9611->regmap, 0x8130, 0x6a); if (ret) { dev_err(lt9611->dev, "video on failed\n"); return; } if (lt9611_power_off(lt9611)) { dev_err(lt9611->dev, "power on failed\n"); return; } } static struct mipi_dsi_device *lt9611_attach_dsi(struct lt9611 *lt9611, struct device_node *dsi_node) { const struct mipi_dsi_device_info info = { "lt9611", 0, lt9611->dev->of_node}; struct mipi_dsi_device *dsi; struct mipi_dsi_host *host; struct device *dev = lt9611->dev; int ret; host = of_find_mipi_dsi_host_by_node(dsi_node); if (!host) { dev_err(lt9611->dev, "failed to find dsi host\n"); return ERR_PTR(-EPROBE_DEFER); } dsi = devm_mipi_dsi_device_register_full(dev, host, &info); if (IS_ERR(dsi)) { dev_err(lt9611->dev, "failed to create dsi device\n"); return dsi; } dsi->lanes = 4; dsi->format = MIPI_DSI_FMT_RGB888; dsi->mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_SYNC_PULSE | MIPI_DSI_MODE_VIDEO_HSE; ret = devm_mipi_dsi_attach(dev, dsi); if (ret < 0) { dev_err(dev, "failed to attach dsi to host\n"); return ERR_PTR(ret); } return dsi; } static int lt9611_bridge_attach(struct drm_bridge *bridge, enum drm_bridge_attach_flags flags) { struct lt9611 *lt9611 = bridge_to_lt9611(bridge); return drm_bridge_attach(bridge->encoder, lt9611->next_bridge, bridge, flags); } static enum drm_mode_status lt9611_bridge_mode_valid(struct drm_bridge *bridge, const struct drm_display_info *info, const struct drm_display_mode *mode) { struct lt9611 *lt9611 = bridge_to_lt9611(bridge); if (mode->hdisplay > 3840) return MODE_BAD_HVALUE; if (mode->vdisplay > 2160) return MODE_BAD_VVALUE; if (mode->hdisplay == 3840 && mode->vdisplay == 2160 && drm_mode_vrefresh(mode) > 30) return MODE_CLOCK_HIGH; if (mode->hdisplay > 2000 && !lt9611->dsi1_node) return MODE_PANEL; else return MODE_OK; } static void lt9611_bridge_atomic_pre_enable(struct drm_bridge *bridge, struct drm_bridge_state *old_bridge_state) { struct lt9611 *lt9611 = bridge_to_lt9611(bridge); static const struct reg_sequence reg_cfg[] = { { 0x8102, 0x12 }, { 0x8123, 0x40 }, { 0x8130, 0xea }, { 0x8011, 0xfa }, }; if (!lt9611->sleep) return; regmap_multi_reg_write(lt9611->regmap, reg_cfg, ARRAY_SIZE(reg_cfg)); lt9611->sleep = false; } static void lt9611_bridge_atomic_post_disable(struct drm_bridge *bridge, struct drm_bridge_state *old_bridge_state) { struct lt9611 *lt9611 = bridge_to_lt9611(bridge); lt9611_sleep_setup(lt9611); } static struct edid *lt9611_bridge_get_edid(struct drm_bridge *bridge, struct drm_connector *connector) { struct lt9611 *lt9611 = bridge_to_lt9611(bridge); lt9611_power_on(lt9611); return drm_do_get_edid(connector, lt9611_get_edid_block, lt9611); } static void lt9611_bridge_hpd_enable(struct drm_bridge *bridge) { struct lt9611 *lt9611 = bridge_to_lt9611(bridge); lt9611_enable_hpd_interrupts(lt9611); } #define MAX_INPUT_SEL_FORMATS 1 static u32 * lt9611_atomic_get_input_bus_fmts(struct drm_bridge *bridge, struct drm_bridge_state *bridge_state, struct drm_crtc_state *crtc_state, struct drm_connector_state *conn_state, u32 output_fmt, unsigned int *num_input_fmts) { u32 *input_fmts; *num_input_fmts = 0; input_fmts = kcalloc(MAX_INPUT_SEL_FORMATS, sizeof(*input_fmts), GFP_KERNEL); if (!input_fmts) return NULL; /* This is the DSI-end bus format */ input_fmts[0] = MEDIA_BUS_FMT_RGB888_1X24; *num_input_fmts = 1; return input_fmts; } static const struct drm_bridge_funcs lt9611_bridge_funcs = { .attach = lt9611_bridge_attach, .mode_valid = lt9611_bridge_mode_valid, .detect = lt9611_bridge_detect, .get_edid = lt9611_bridge_get_edid, .hpd_enable = lt9611_bridge_hpd_enable, .atomic_pre_enable = lt9611_bridge_atomic_pre_enable, .atomic_enable = lt9611_bridge_atomic_enable, .atomic_disable = lt9611_bridge_atomic_disable, .atomic_post_disable = lt9611_bridge_atomic_post_disable, .atomic_duplicate_state = drm_atomic_helper_bridge_duplicate_state, .atomic_destroy_state = drm_atomic_helper_bridge_destroy_state, .atomic_reset = drm_atomic_helper_bridge_reset, .atomic_get_input_bus_fmts = lt9611_atomic_get_input_bus_fmts, }; static int lt9611_parse_dt(struct device *dev, struct lt9611 *lt9611) { lt9611->dsi0_node = of_graph_get_remote_node(dev->of_node, 0, -1); if (!lt9611->dsi0_node) { dev_err(lt9611->dev, "failed to get remote node for primary dsi\n"); return -ENODEV; } lt9611->dsi1_node = of_graph_get_remote_node(dev->of_node, 1, -1); lt9611->ac_mode = of_property_read_bool(dev->of_node, "lt,ac-mode"); return drm_of_find_panel_or_bridge(dev->of_node, 2, -1, NULL, &lt9611->next_bridge); } static int lt9611_gpio_init(struct lt9611 *lt9611) { struct device *dev = lt9611->dev; lt9611->reset_gpio = devm_gpiod_get(dev, "reset", GPIOD_OUT_HIGH); if (IS_ERR(lt9611->reset_gpio)) { dev_err(dev, "failed to acquire reset gpio\n"); return PTR_ERR(lt9611->reset_gpio); } lt9611->enable_gpio = devm_gpiod_get_optional(dev, "enable", GPIOD_OUT_LOW); if (IS_ERR(lt9611->enable_gpio)) { dev_err(dev, "failed to acquire enable gpio\n"); return PTR_ERR(lt9611->enable_gpio); } return 0; } static int lt9611_read_device_rev(struct lt9611 *lt9611) { unsigned int rev; int ret; regmap_write(lt9611->regmap, 0x80ee, 0x01); ret = regmap_read(lt9611->regmap, 0x8002, &rev); if (ret) dev_err(lt9611->dev, "failed to read revision: %d\n", ret); else dev_info(lt9611->dev, "LT9611 revision: 0x%x\n", rev); return ret; } static int lt9611_hdmi_hw_params(struct device *dev, void *data, struct hdmi_codec_daifmt *fmt, struct hdmi_codec_params *hparms) { struct lt9611 *lt9611 = data; if (hparms->sample_rate == 48000) regmap_write(lt9611->regmap, 0x840f, 0x2b); else if (hparms->sample_rate == 96000) regmap_write(lt9611->regmap, 0x840f, 0xab); else return -EINVAL; regmap_write(lt9611->regmap, 0x8435, 0x00); regmap_write(lt9611->regmap, 0x8436, 0x18); regmap_write(lt9611->regmap, 0x8437, 0x00); return 0; } static int lt9611_audio_startup(struct device *dev, void *data) { struct lt9611 *lt9611 = data; regmap_write(lt9611->regmap, 0x82d6, 0x8c); regmap_write(lt9611->regmap, 0x82d7, 0x04); regmap_write(lt9611->regmap, 0x8406, 0x08); regmap_write(lt9611->regmap, 0x8407, 0x10); regmap_write(lt9611->regmap, 0x8434, 0xd5); return 0; } static void lt9611_audio_shutdown(struct device *dev, void *data) { struct lt9611 *lt9611 = data; regmap_write(lt9611->regmap, 0x8406, 0x00); regmap_write(lt9611->regmap, 0x8407, 0x00); } static int lt9611_hdmi_i2s_get_dai_id(struct snd_soc_component *component, struct device_node *endpoint) { struct of_endpoint of_ep; int ret; ret = of_graph_parse_endpoint(endpoint, &of_ep); if (ret < 0) return ret; /* * HDMI sound should be located as reg = <2> * Then, it is sound port 0 */ if (of_ep.port == 2) return 0; return -EINVAL; } static const struct hdmi_codec_ops lt9611_codec_ops = { .hw_params = lt9611_hdmi_hw_params, .audio_shutdown = lt9611_audio_shutdown, .audio_startup = lt9611_audio_startup, .get_dai_id = lt9611_hdmi_i2s_get_dai_id, }; static struct hdmi_codec_pdata codec_data = { .ops = &lt9611_codec_ops, .max_i2s_channels = 8, .i2s = 1, }; static int lt9611_audio_init(struct device *dev, struct lt9611 *lt9611) { codec_data.data = lt9611; lt9611->audio_pdev = platform_device_register_data(dev, HDMI_CODEC_DRV_NAME, PLATFORM_DEVID_AUTO, &codec_data, sizeof(codec_data)); return PTR_ERR_OR_ZERO(lt9611->audio_pdev); } static void lt9611_audio_exit(struct lt9611 *lt9611) { if (lt9611->audio_pdev) { platform_device_unregister(lt9611->audio_pdev); lt9611->audio_pdev = NULL; } } static int lt9611_probe(struct i2c_client *client) { struct lt9611 *lt9611; struct device *dev = &client->dev; int ret; if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) { dev_err(dev, "device doesn't support I2C\n"); return -ENODEV; } lt9611 = devm_kzalloc(dev, sizeof(*lt9611), GFP_KERNEL); if (!lt9611) return -ENOMEM; lt9611->dev = dev; lt9611->client = client; lt9611->sleep = false; lt9611->regmap = devm_regmap_init_i2c(client, &lt9611_regmap_config); if (IS_ERR(lt9611->regmap)) { dev_err(lt9611->dev, "regmap i2c init failed\n"); return PTR_ERR(lt9611->regmap); } ret = lt9611_parse_dt(dev, lt9611); if (ret) { dev_err(dev, "failed to parse device tree\n"); return ret; } ret = lt9611_gpio_init(lt9611); if (ret < 0) goto err_of_put; ret = lt9611_regulator_init(lt9611); if (ret < 0) goto err_of_put; lt9611_assert_5v(lt9611); ret = lt9611_regulator_enable(lt9611); if (ret) goto err_of_put; lt9611_reset(lt9611); ret = lt9611_read_device_rev(lt9611); if (ret) { dev_err(dev, "failed to read chip rev\n"); goto err_disable_regulators; } ret = devm_request_threaded_irq(dev, client->irq, NULL, lt9611_irq_thread_handler, IRQF_ONESHOT, "lt9611", lt9611); if (ret) { dev_err(dev, "failed to request irq\n"); goto err_disable_regulators; } i2c_set_clientdata(client, lt9611); lt9611->bridge.funcs = &lt9611_bridge_funcs; lt9611->bridge.of_node = client->dev.of_node; lt9611->bridge.ops = DRM_BRIDGE_OP_DETECT | DRM_BRIDGE_OP_EDID | DRM_BRIDGE_OP_HPD | DRM_BRIDGE_OP_MODES; lt9611->bridge.type = DRM_MODE_CONNECTOR_HDMIA; drm_bridge_add(&lt9611->bridge); /* Attach primary DSI */ lt9611->dsi0 = lt9611_attach_dsi(lt9611, lt9611->dsi0_node); if (IS_ERR(lt9611->dsi0)) { ret = PTR_ERR(lt9611->dsi0); goto err_remove_bridge; } /* Attach secondary DSI, if specified */ if (lt9611->dsi1_node) { lt9611->dsi1 = lt9611_attach_dsi(lt9611, lt9611->dsi1_node); if (IS_ERR(lt9611->dsi1)) { ret = PTR_ERR(lt9611->dsi1); goto err_remove_bridge; } } lt9611_enable_hpd_interrupts(lt9611); ret = lt9611_audio_init(dev, lt9611); if (ret) goto err_remove_bridge; return 0; err_remove_bridge: drm_bridge_remove(&lt9611->bridge); err_disable_regulators: regulator_bulk_disable(ARRAY_SIZE(lt9611->supplies), lt9611->supplies); err_of_put: of_node_put(lt9611->dsi1_node); of_node_put(lt9611->dsi0_node); return ret; } static void lt9611_remove(struct i2c_client *client) { struct lt9611 *lt9611 = i2c_get_clientdata(client); disable_irq(client->irq); lt9611_audio_exit(lt9611); drm_bridge_remove(&lt9611->bridge); regulator_bulk_disable(ARRAY_SIZE(lt9611->supplies), lt9611->supplies); of_node_put(lt9611->dsi1_node); of_node_put(lt9611->dsi0_node); } static struct i2c_device_id lt9611_id[] = { { "lontium,lt9611", 0 }, {} }; MODULE_DEVICE_TABLE(i2c, lt9611_id); static const struct of_device_id lt9611_match_table[] = { { .compatible = "lontium,lt9611" }, { } }; MODULE_DEVICE_TABLE(of, lt9611_match_table); static struct i2c_driver lt9611_driver = { .driver = { .name = "lt9611", .of_match_table = lt9611_match_table, }, .probe = lt9611_probe, .remove = lt9611_remove, .id_table = lt9611_id, }; module_i2c_driver(lt9611_driver); MODULE_LICENSE("GPL v2");
linux-master
drivers/gpu/drm/bridge/lontium-lt9611.c
// SPDX-License-Identifier: GPL-2.0-only /* * NXP PTN3460 DP/LVDS bridge driver * * Copyright (C) 2013 Google, Inc. */ #include <linux/delay.h> #include <linux/gpio/consumer.h> #include <linux/i2c.h> #include <linux/module.h> #include <linux/of.h> #include <drm/drm_atomic_helper.h> #include <drm/drm_bridge.h> #include <drm/drm_crtc.h> #include <drm/drm_edid.h> #include <drm/drm_of.h> #include <drm/drm_panel.h> #include <drm/drm_print.h> #include <drm/drm_probe_helper.h> #define PTN3460_EDID_ADDR 0x0 #define PTN3460_EDID_EMULATION_ADDR 0x84 #define PTN3460_EDID_ENABLE_EMULATION 0 #define PTN3460_EDID_EMULATION_SELECTION 1 #define PTN3460_EDID_SRAM_LOAD_ADDR 0x85 struct ptn3460_bridge { struct drm_connector connector; struct i2c_client *client; struct drm_bridge bridge; struct drm_bridge *panel_bridge; struct gpio_desc *gpio_pd_n; struct gpio_desc *gpio_rst_n; u32 edid_emulation; bool enabled; }; static inline struct ptn3460_bridge * bridge_to_ptn3460(struct drm_bridge *bridge) { return container_of(bridge, struct ptn3460_bridge, bridge); } static inline struct ptn3460_bridge * connector_to_ptn3460(struct drm_connector *connector) { return container_of(connector, struct ptn3460_bridge, connector); } static int ptn3460_read_bytes(struct ptn3460_bridge *ptn_bridge, char addr, u8 *buf, int len) { int ret; ret = i2c_master_send(ptn_bridge->client, &addr, 1); if (ret <= 0) { DRM_ERROR("Failed to send i2c command, ret=%d\n", ret); return ret; } ret = i2c_master_recv(ptn_bridge->client, buf, len); if (ret <= 0) { DRM_ERROR("Failed to recv i2c data, ret=%d\n", ret); return ret; } return 0; } static int ptn3460_write_byte(struct ptn3460_bridge *ptn_bridge, char addr, char val) { int ret; char buf[2]; buf[0] = addr; buf[1] = val; ret = i2c_master_send(ptn_bridge->client, buf, ARRAY_SIZE(buf)); if (ret <= 0) { DRM_ERROR("Failed to send i2c command, ret=%d\n", ret); return ret; } return 0; } static int ptn3460_select_edid(struct ptn3460_bridge *ptn_bridge) { int ret; char val; /* Load the selected edid into SRAM (accessed at PTN3460_EDID_ADDR) */ ret = ptn3460_write_byte(ptn_bridge, PTN3460_EDID_SRAM_LOAD_ADDR, ptn_bridge->edid_emulation); if (ret) { DRM_ERROR("Failed to transfer EDID to sram, ret=%d\n", ret); return ret; } /* Enable EDID emulation and select the desired EDID */ val = 1 << PTN3460_EDID_ENABLE_EMULATION | ptn_bridge->edid_emulation << PTN3460_EDID_EMULATION_SELECTION; ret = ptn3460_write_byte(ptn_bridge, PTN3460_EDID_EMULATION_ADDR, val); if (ret) { DRM_ERROR("Failed to write EDID value, ret=%d\n", ret); return ret; } return 0; } static void ptn3460_pre_enable(struct drm_bridge *bridge) { struct ptn3460_bridge *ptn_bridge = bridge_to_ptn3460(bridge); int ret; if (ptn_bridge->enabled) return; gpiod_set_value(ptn_bridge->gpio_pd_n, 1); gpiod_set_value(ptn_bridge->gpio_rst_n, 0); usleep_range(10, 20); gpiod_set_value(ptn_bridge->gpio_rst_n, 1); /* * There's a bug in the PTN chip where it falsely asserts hotplug before * it is fully functional. We're forced to wait for the maximum start up * time specified in the chip's datasheet to make sure we're really up. */ msleep(90); ret = ptn3460_select_edid(ptn_bridge); if (ret) DRM_ERROR("Select EDID failed ret=%d\n", ret); ptn_bridge->enabled = true; } static void ptn3460_disable(struct drm_bridge *bridge) { struct ptn3460_bridge *ptn_bridge = bridge_to_ptn3460(bridge); if (!ptn_bridge->enabled) return; ptn_bridge->enabled = false; gpiod_set_value(ptn_bridge->gpio_rst_n, 1); gpiod_set_value(ptn_bridge->gpio_pd_n, 0); } static struct edid *ptn3460_get_edid(struct drm_bridge *bridge, struct drm_connector *connector) { struct ptn3460_bridge *ptn_bridge = bridge_to_ptn3460(bridge); bool power_off; u8 *edid; int ret; power_off = !ptn_bridge->enabled; ptn3460_pre_enable(&ptn_bridge->bridge); edid = kmalloc(EDID_LENGTH, GFP_KERNEL); if (!edid) { DRM_ERROR("Failed to allocate EDID\n"); goto out; } ret = ptn3460_read_bytes(ptn_bridge, PTN3460_EDID_ADDR, edid, EDID_LENGTH); if (ret) { kfree(edid); edid = NULL; goto out; } out: if (power_off) ptn3460_disable(&ptn_bridge->bridge); return (struct edid *)edid; } static int ptn3460_connector_get_modes(struct drm_connector *connector) { struct ptn3460_bridge *ptn_bridge = connector_to_ptn3460(connector); struct edid *edid; int num_modes; edid = ptn3460_get_edid(&ptn_bridge->bridge, connector); drm_connector_update_edid_property(connector, edid); num_modes = drm_add_edid_modes(connector, edid); kfree(edid); return num_modes; } static const struct drm_connector_helper_funcs ptn3460_connector_helper_funcs = { .get_modes = ptn3460_connector_get_modes, }; static const struct drm_connector_funcs ptn3460_connector_funcs = { .fill_modes = drm_helper_probe_single_connector_modes, .destroy = drm_connector_cleanup, .reset = drm_atomic_helper_connector_reset, .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state, .atomic_destroy_state = drm_atomic_helper_connector_destroy_state, }; static int ptn3460_bridge_attach(struct drm_bridge *bridge, enum drm_bridge_attach_flags flags) { struct ptn3460_bridge *ptn_bridge = bridge_to_ptn3460(bridge); int ret; /* Let this driver create connector if requested */ ret = drm_bridge_attach(bridge->encoder, ptn_bridge->panel_bridge, bridge, flags | DRM_BRIDGE_ATTACH_NO_CONNECTOR); if (ret < 0) return ret; if (flags & DRM_BRIDGE_ATTACH_NO_CONNECTOR) return 0; if (!bridge->encoder) { DRM_ERROR("Parent encoder object not found"); return -ENODEV; } ptn_bridge->connector.polled = DRM_CONNECTOR_POLL_HPD; ret = drm_connector_init(bridge->dev, &ptn_bridge->connector, &ptn3460_connector_funcs, DRM_MODE_CONNECTOR_LVDS); if (ret) { DRM_ERROR("Failed to initialize connector with drm\n"); return ret; } drm_connector_helper_add(&ptn_bridge->connector, &ptn3460_connector_helper_funcs); drm_connector_register(&ptn_bridge->connector); drm_connector_attach_encoder(&ptn_bridge->connector, bridge->encoder); drm_helper_hpd_irq_event(ptn_bridge->connector.dev); return ret; } static const struct drm_bridge_funcs ptn3460_bridge_funcs = { .pre_enable = ptn3460_pre_enable, .disable = ptn3460_disable, .attach = ptn3460_bridge_attach, .get_edid = ptn3460_get_edid, }; static int ptn3460_probe(struct i2c_client *client) { struct device *dev = &client->dev; struct ptn3460_bridge *ptn_bridge; struct drm_bridge *panel_bridge; int ret; ptn_bridge = devm_kzalloc(dev, sizeof(*ptn_bridge), GFP_KERNEL); if (!ptn_bridge) { return -ENOMEM; } panel_bridge = devm_drm_of_get_bridge(dev, dev->of_node, 0, 0); if (IS_ERR(panel_bridge)) return PTR_ERR(panel_bridge); ptn_bridge->panel_bridge = panel_bridge; ptn_bridge->client = client; ptn_bridge->gpio_pd_n = devm_gpiod_get(&client->dev, "powerdown", GPIOD_OUT_HIGH); if (IS_ERR(ptn_bridge->gpio_pd_n)) { ret = PTR_ERR(ptn_bridge->gpio_pd_n); dev_err(dev, "cannot get gpio_pd_n %d\n", ret); return ret; } /* * Request the reset pin low to avoid the bridge being * initialized prematurely */ ptn_bridge->gpio_rst_n = devm_gpiod_get(&client->dev, "reset", GPIOD_OUT_LOW); if (IS_ERR(ptn_bridge->gpio_rst_n)) { ret = PTR_ERR(ptn_bridge->gpio_rst_n); DRM_ERROR("cannot get gpio_rst_n %d\n", ret); return ret; } ret = of_property_read_u32(dev->of_node, "edid-emulation", &ptn_bridge->edid_emulation); if (ret) { dev_err(dev, "Can't read EDID emulation value\n"); return ret; } ptn_bridge->bridge.funcs = &ptn3460_bridge_funcs; ptn_bridge->bridge.ops = DRM_BRIDGE_OP_EDID; ptn_bridge->bridge.type = DRM_MODE_CONNECTOR_LVDS; ptn_bridge->bridge.of_node = dev->of_node; drm_bridge_add(&ptn_bridge->bridge); i2c_set_clientdata(client, ptn_bridge); return 0; } static void ptn3460_remove(struct i2c_client *client) { struct ptn3460_bridge *ptn_bridge = i2c_get_clientdata(client); drm_bridge_remove(&ptn_bridge->bridge); } static const struct i2c_device_id ptn3460_i2c_table[] = { {"ptn3460", 0}, {}, }; MODULE_DEVICE_TABLE(i2c, ptn3460_i2c_table); static const struct of_device_id ptn3460_match[] = { { .compatible = "nxp,ptn3460" }, {}, }; MODULE_DEVICE_TABLE(of, ptn3460_match); static struct i2c_driver ptn3460_driver = { .id_table = ptn3460_i2c_table, .probe = ptn3460_probe, .remove = ptn3460_remove, .driver = { .name = "nxp,ptn3460", .of_match_table = ptn3460_match, }, }; module_i2c_driver(ptn3460_driver); MODULE_AUTHOR("Sean Paul <[email protected]>"); MODULE_DESCRIPTION("NXP ptn3460 eDP-LVDS converter driver"); MODULE_LICENSE("GPL v2");
linux-master
drivers/gpu/drm/bridge/nxp-ptn3460.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * Copyright (C) 2022 Marek Vasut <[email protected]> */ #include <linux/clk.h> #include <linux/media-bus-format.h> #include <linux/mfd/syscon.h> #include <linux/module.h> #include <linux/of.h> #include <linux/of_graph.h> #include <linux/platform_device.h> #include <linux/regmap.h> #include <drm/drm_atomic_helper.h> #include <drm/drm_bridge.h> #include <drm/drm_of.h> #include <drm/drm_panel.h> #define LDB_CTRL_CH0_ENABLE BIT(0) #define LDB_CTRL_CH0_DI_SELECT BIT(1) #define LDB_CTRL_CH1_ENABLE BIT(2) #define LDB_CTRL_CH1_DI_SELECT BIT(3) #define LDB_CTRL_SPLIT_MODE BIT(4) #define LDB_CTRL_CH0_DATA_WIDTH BIT(5) #define LDB_CTRL_CH0_BIT_MAPPING BIT(6) #define LDB_CTRL_CH1_DATA_WIDTH BIT(7) #define LDB_CTRL_CH1_BIT_MAPPING BIT(8) #define LDB_CTRL_DI0_VSYNC_POLARITY BIT(9) #define LDB_CTRL_DI1_VSYNC_POLARITY BIT(10) #define LDB_CTRL_REG_CH0_FIFO_RESET BIT(11) #define LDB_CTRL_REG_CH1_FIFO_RESET BIT(12) #define LDB_CTRL_ASYNC_FIFO_ENABLE BIT(24) #define LDB_CTRL_ASYNC_FIFO_THRESHOLD_MASK GENMASK(27, 25) #define LVDS_CTRL_CH0_EN BIT(0) #define LVDS_CTRL_CH1_EN BIT(1) /* * LVDS_CTRL_LVDS_EN bit is poorly named in i.MX93 reference manual. * Clear it to enable LVDS and set it to disable LVDS. */ #define LVDS_CTRL_LVDS_EN BIT(1) #define LVDS_CTRL_VBG_EN BIT(2) #define LVDS_CTRL_HS_EN BIT(3) #define LVDS_CTRL_PRE_EMPH_EN BIT(4) #define LVDS_CTRL_PRE_EMPH_ADJ(n) (((n) & 0x7) << 5) #define LVDS_CTRL_PRE_EMPH_ADJ_MASK GENMASK(7, 5) #define LVDS_CTRL_CM_ADJ(n) (((n) & 0x7) << 8) #define LVDS_CTRL_CM_ADJ_MASK GENMASK(10, 8) #define LVDS_CTRL_CC_ADJ(n) (((n) & 0x7) << 11) #define LVDS_CTRL_CC_ADJ_MASK GENMASK(13, 11) #define LVDS_CTRL_SLEW_ADJ(n) (((n) & 0x7) << 14) #define LVDS_CTRL_SLEW_ADJ_MASK GENMASK(16, 14) #define LVDS_CTRL_VBG_ADJ(n) (((n) & 0x7) << 17) #define LVDS_CTRL_VBG_ADJ_MASK GENMASK(19, 17) enum fsl_ldb_devtype { IMX6SX_LDB, IMX8MP_LDB, IMX93_LDB, }; struct fsl_ldb_devdata { u32 ldb_ctrl; u32 lvds_ctrl; bool lvds_en_bit; bool single_ctrl_reg; }; static const struct fsl_ldb_devdata fsl_ldb_devdata[] = { [IMX6SX_LDB] = { .ldb_ctrl = 0x18, .single_ctrl_reg = true, }, [IMX8MP_LDB] = { .ldb_ctrl = 0x5c, .lvds_ctrl = 0x128, }, [IMX93_LDB] = { .ldb_ctrl = 0x20, .lvds_ctrl = 0x24, .lvds_en_bit = true, }, }; struct fsl_ldb { struct device *dev; struct drm_bridge bridge; struct drm_bridge *panel_bridge; struct clk *clk; struct regmap *regmap; const struct fsl_ldb_devdata *devdata; bool ch0_enabled; bool ch1_enabled; }; static bool fsl_ldb_is_dual(const struct fsl_ldb *fsl_ldb) { return (fsl_ldb->ch0_enabled && fsl_ldb->ch1_enabled); } static inline struct fsl_ldb *to_fsl_ldb(struct drm_bridge *bridge) { return container_of(bridge, struct fsl_ldb, bridge); } static unsigned long fsl_ldb_link_frequency(struct fsl_ldb *fsl_ldb, int clock) { if (fsl_ldb_is_dual(fsl_ldb)) return clock * 3500; else return clock * 7000; } static int fsl_ldb_attach(struct drm_bridge *bridge, enum drm_bridge_attach_flags flags) { struct fsl_ldb *fsl_ldb = to_fsl_ldb(bridge); return drm_bridge_attach(bridge->encoder, fsl_ldb->panel_bridge, bridge, flags); } static void fsl_ldb_atomic_enable(struct drm_bridge *bridge, struct drm_bridge_state *old_bridge_state) { struct fsl_ldb *fsl_ldb = to_fsl_ldb(bridge); struct drm_atomic_state *state = old_bridge_state->base.state; const struct drm_bridge_state *bridge_state; const struct drm_crtc_state *crtc_state; const struct drm_display_mode *mode; struct drm_connector *connector; struct drm_crtc *crtc; unsigned long configured_link_freq; unsigned long requested_link_freq; bool lvds_format_24bpp; bool lvds_format_jeida; u32 reg; /* Get the LVDS format from the bridge state. */ bridge_state = drm_atomic_get_new_bridge_state(state, bridge); switch (bridge_state->output_bus_cfg.format) { case MEDIA_BUS_FMT_RGB666_1X7X3_SPWG: lvds_format_24bpp = false; lvds_format_jeida = true; break; case MEDIA_BUS_FMT_RGB888_1X7X4_JEIDA: lvds_format_24bpp = true; lvds_format_jeida = true; break; case MEDIA_BUS_FMT_RGB888_1X7X4_SPWG: lvds_format_24bpp = true; lvds_format_jeida = false; break; default: /* * Some bridges still don't set the correct LVDS bus pixel * format, use SPWG24 default format until those are fixed. */ lvds_format_24bpp = true; lvds_format_jeida = false; dev_warn(fsl_ldb->dev, "Unsupported LVDS bus format 0x%04x, please check output bridge driver. Falling back to SPWG24.\n", bridge_state->output_bus_cfg.format); break; } /* * Retrieve the CRTC adjusted mode. This requires a little dance to go * from the bridge to the encoder, to the connector and to the CRTC. */ connector = drm_atomic_get_new_connector_for_encoder(state, bridge->encoder); crtc = drm_atomic_get_new_connector_state(state, connector)->crtc; crtc_state = drm_atomic_get_new_crtc_state(state, crtc); mode = &crtc_state->adjusted_mode; requested_link_freq = fsl_ldb_link_frequency(fsl_ldb, mode->clock); clk_set_rate(fsl_ldb->clk, requested_link_freq); configured_link_freq = clk_get_rate(fsl_ldb->clk); if (configured_link_freq != requested_link_freq) dev_warn(fsl_ldb->dev, "Configured LDB clock (%lu Hz) does not match requested LVDS clock: %lu Hz\n", configured_link_freq, requested_link_freq); clk_prepare_enable(fsl_ldb->clk); /* Program LDB_CTRL */ reg = (fsl_ldb->ch0_enabled ? LDB_CTRL_CH0_ENABLE : 0) | (fsl_ldb->ch1_enabled ? LDB_CTRL_CH1_ENABLE : 0) | (fsl_ldb_is_dual(fsl_ldb) ? LDB_CTRL_SPLIT_MODE : 0); if (lvds_format_24bpp) reg |= (fsl_ldb->ch0_enabled ? LDB_CTRL_CH0_DATA_WIDTH : 0) | (fsl_ldb->ch1_enabled ? LDB_CTRL_CH1_DATA_WIDTH : 0); if (lvds_format_jeida) reg |= (fsl_ldb->ch0_enabled ? LDB_CTRL_CH0_BIT_MAPPING : 0) | (fsl_ldb->ch1_enabled ? LDB_CTRL_CH1_BIT_MAPPING : 0); if (mode->flags & DRM_MODE_FLAG_PVSYNC) reg |= (fsl_ldb->ch0_enabled ? LDB_CTRL_DI0_VSYNC_POLARITY : 0) | (fsl_ldb->ch1_enabled ? LDB_CTRL_DI1_VSYNC_POLARITY : 0); regmap_write(fsl_ldb->regmap, fsl_ldb->devdata->ldb_ctrl, reg); if (fsl_ldb->devdata->single_ctrl_reg) return; /* Program LVDS_CTRL */ reg = LVDS_CTRL_CC_ADJ(2) | LVDS_CTRL_PRE_EMPH_EN | LVDS_CTRL_PRE_EMPH_ADJ(3) | LVDS_CTRL_VBG_EN; regmap_write(fsl_ldb->regmap, fsl_ldb->devdata->lvds_ctrl, reg); /* Wait for VBG to stabilize. */ usleep_range(15, 20); reg |= (fsl_ldb->ch0_enabled ? LVDS_CTRL_CH0_EN : 0) | (fsl_ldb->ch1_enabled ? LVDS_CTRL_CH1_EN : 0); regmap_write(fsl_ldb->regmap, fsl_ldb->devdata->lvds_ctrl, reg); } static void fsl_ldb_atomic_disable(struct drm_bridge *bridge, struct drm_bridge_state *old_bridge_state) { struct fsl_ldb *fsl_ldb = to_fsl_ldb(bridge); /* Stop channel(s). */ if (fsl_ldb->devdata->lvds_en_bit) /* Set LVDS_CTRL_LVDS_EN bit to disable. */ regmap_write(fsl_ldb->regmap, fsl_ldb->devdata->lvds_ctrl, LVDS_CTRL_LVDS_EN); else if (!fsl_ldb->devdata->single_ctrl_reg) regmap_write(fsl_ldb->regmap, fsl_ldb->devdata->lvds_ctrl, 0); regmap_write(fsl_ldb->regmap, fsl_ldb->devdata->ldb_ctrl, 0); clk_disable_unprepare(fsl_ldb->clk); } #define MAX_INPUT_SEL_FORMATS 1 static u32 * fsl_ldb_atomic_get_input_bus_fmts(struct drm_bridge *bridge, struct drm_bridge_state *bridge_state, struct drm_crtc_state *crtc_state, struct drm_connector_state *conn_state, u32 output_fmt, unsigned int *num_input_fmts) { u32 *input_fmts; *num_input_fmts = 0; input_fmts = kcalloc(MAX_INPUT_SEL_FORMATS, sizeof(*input_fmts), GFP_KERNEL); if (!input_fmts) return NULL; input_fmts[0] = MEDIA_BUS_FMT_RGB888_1X24; *num_input_fmts = MAX_INPUT_SEL_FORMATS; return input_fmts; } static enum drm_mode_status fsl_ldb_mode_valid(struct drm_bridge *bridge, const struct drm_display_info *info, const struct drm_display_mode *mode) { struct fsl_ldb *fsl_ldb = to_fsl_ldb(bridge); if (mode->clock > (fsl_ldb_is_dual(fsl_ldb) ? 160000 : 80000)) return MODE_CLOCK_HIGH; return MODE_OK; } static const struct drm_bridge_funcs funcs = { .attach = fsl_ldb_attach, .atomic_enable = fsl_ldb_atomic_enable, .atomic_disable = fsl_ldb_atomic_disable, .atomic_duplicate_state = drm_atomic_helper_bridge_duplicate_state, .atomic_destroy_state = drm_atomic_helper_bridge_destroy_state, .atomic_get_input_bus_fmts = fsl_ldb_atomic_get_input_bus_fmts, .atomic_reset = drm_atomic_helper_bridge_reset, .mode_valid = fsl_ldb_mode_valid, }; static int fsl_ldb_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; struct device_node *panel_node; struct device_node *remote1, *remote2; struct drm_panel *panel; struct fsl_ldb *fsl_ldb; int dual_link; fsl_ldb = devm_kzalloc(dev, sizeof(*fsl_ldb), GFP_KERNEL); if (!fsl_ldb) return -ENOMEM; fsl_ldb->devdata = of_device_get_match_data(dev); if (!fsl_ldb->devdata) return -EINVAL; fsl_ldb->dev = &pdev->dev; fsl_ldb->bridge.funcs = &funcs; fsl_ldb->bridge.of_node = dev->of_node; fsl_ldb->clk = devm_clk_get(dev, "ldb"); if (IS_ERR(fsl_ldb->clk)) return PTR_ERR(fsl_ldb->clk); fsl_ldb->regmap = syscon_node_to_regmap(dev->of_node->parent); if (IS_ERR(fsl_ldb->regmap)) return PTR_ERR(fsl_ldb->regmap); /* Locate the remote ports and the panel node */ remote1 = of_graph_get_remote_node(dev->of_node, 1, 0); remote2 = of_graph_get_remote_node(dev->of_node, 2, 0); fsl_ldb->ch0_enabled = (remote1 != NULL); fsl_ldb->ch1_enabled = (remote2 != NULL); panel_node = of_node_get(remote1 ? remote1 : remote2); of_node_put(remote1); of_node_put(remote2); if (!fsl_ldb->ch0_enabled && !fsl_ldb->ch1_enabled) { of_node_put(panel_node); return dev_err_probe(dev, -ENXIO, "No panel node found"); } dev_dbg(dev, "Using %s\n", fsl_ldb_is_dual(fsl_ldb) ? "dual-link mode" : fsl_ldb->ch0_enabled ? "channel 0" : "channel 1"); panel = of_drm_find_panel(panel_node); of_node_put(panel_node); if (IS_ERR(panel)) return PTR_ERR(panel); fsl_ldb->panel_bridge = devm_drm_panel_bridge_add(dev, panel); if (IS_ERR(fsl_ldb->panel_bridge)) return PTR_ERR(fsl_ldb->panel_bridge); if (fsl_ldb_is_dual(fsl_ldb)) { struct device_node *port1, *port2; port1 = of_graph_get_port_by_id(dev->of_node, 1); port2 = of_graph_get_port_by_id(dev->of_node, 2); dual_link = drm_of_lvds_get_dual_link_pixel_order(port1, port2); of_node_put(port1); of_node_put(port2); if (dual_link < 0) return dev_err_probe(dev, dual_link, "Error getting dual link configuration\n"); /* Only DRM_LVDS_DUAL_LINK_ODD_EVEN_PIXELS is supported */ if (dual_link == DRM_LVDS_DUAL_LINK_EVEN_ODD_PIXELS) { dev_err(dev, "LVDS channel pixel swap not supported.\n"); return -EINVAL; } } platform_set_drvdata(pdev, fsl_ldb); drm_bridge_add(&fsl_ldb->bridge); return 0; } static void fsl_ldb_remove(struct platform_device *pdev) { struct fsl_ldb *fsl_ldb = platform_get_drvdata(pdev); drm_bridge_remove(&fsl_ldb->bridge); } static const struct of_device_id fsl_ldb_match[] = { { .compatible = "fsl,imx6sx-ldb", .data = &fsl_ldb_devdata[IMX6SX_LDB], }, { .compatible = "fsl,imx8mp-ldb", .data = &fsl_ldb_devdata[IMX8MP_LDB], }, { .compatible = "fsl,imx93-ldb", .data = &fsl_ldb_devdata[IMX93_LDB], }, { /* sentinel */ }, }; MODULE_DEVICE_TABLE(of, fsl_ldb_match); static struct platform_driver fsl_ldb_driver = { .probe = fsl_ldb_probe, .remove_new = fsl_ldb_remove, .driver = { .name = "fsl-ldb", .of_match_table = fsl_ldb_match, }, }; module_platform_driver(fsl_ldb_driver); MODULE_AUTHOR("Marek Vasut <[email protected]>"); MODULE_DESCRIPTION("Freescale i.MX8MP LDB"); MODULE_LICENSE("GPL");
linux-master
drivers/gpu/drm/bridge/fsl-ldb.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * TC358767/TC358867/TC9595 DSI/DPI-to-DPI/(e)DP bridge driver * * The TC358767/TC358867/TC9595 can operate in multiple modes. * All modes are supported -- DPI->(e)DP / DSI->DPI / DSI->(e)DP . * * Copyright (C) 2016 CogentEmbedded Inc * Author: Andrey Gusakov <[email protected]> * * Copyright (C) 2016 Pengutronix, Philipp Zabel <[email protected]> * * Copyright (C) 2016 Zodiac Inflight Innovations * * Initially based on: drivers/gpu/drm/i2c/tda998x_drv.c * * Copyright (C) 2012 Texas Instruments * Author: Rob Clark <[email protected]> */ #include <linux/bitfield.h> #include <linux/clk.h> #include <linux/device.h> #include <linux/gpio/consumer.h> #include <linux/i2c.h> #include <linux/kernel.h> #include <linux/media-bus-format.h> #include <linux/module.h> #include <linux/regmap.h> #include <linux/slab.h> #include <drm/display/drm_dp_helper.h> #include <drm/drm_atomic_helper.h> #include <drm/drm_bridge.h> #include <drm/drm_edid.h> #include <drm/drm_mipi_dsi.h> #include <drm/drm_of.h> #include <drm/drm_panel.h> #include <drm/drm_print.h> #include <drm/drm_probe_helper.h> /* Registers */ /* PPI layer registers */ #define PPI_STARTPPI 0x0104 /* START control bit */ #define PPI_LPTXTIMECNT 0x0114 /* LPTX timing signal */ #define LPX_PERIOD 3 #define PPI_LANEENABLE 0x0134 #define PPI_TX_RX_TA 0x013c #define TTA_GET 0x40000 #define TTA_SURE 6 #define PPI_D0S_ATMR 0x0144 #define PPI_D1S_ATMR 0x0148 #define PPI_D0S_CLRSIPOCOUNT 0x0164 /* Assertion timer for Lane 0 */ #define PPI_D1S_CLRSIPOCOUNT 0x0168 /* Assertion timer for Lane 1 */ #define PPI_D2S_CLRSIPOCOUNT 0x016c /* Assertion timer for Lane 2 */ #define PPI_D3S_CLRSIPOCOUNT 0x0170 /* Assertion timer for Lane 3 */ #define PPI_START_FUNCTION BIT(0) /* DSI layer registers */ #define DSI_STARTDSI 0x0204 /* START control bit of DSI-TX */ #define DSI_LANEENABLE 0x0210 /* Enables each lane */ #define DSI_RX_START BIT(0) /* Lane enable PPI and DSI register bits */ #define LANEENABLE_CLEN BIT(0) #define LANEENABLE_L0EN BIT(1) #define LANEENABLE_L1EN BIT(2) #define LANEENABLE_L2EN BIT(1) #define LANEENABLE_L3EN BIT(2) /* Display Parallel Input Interface */ #define DPIPXLFMT 0x0440 #define VS_POL_ACTIVE_LOW (1 << 10) #define HS_POL_ACTIVE_LOW (1 << 9) #define DE_POL_ACTIVE_HIGH (0 << 8) #define SUB_CFG_TYPE_CONFIG1 (0 << 2) /* LSB aligned */ #define SUB_CFG_TYPE_CONFIG2 (1 << 2) /* Loosely Packed */ #define SUB_CFG_TYPE_CONFIG3 (2 << 2) /* LSB aligned 8-bit */ #define DPI_BPP_RGB888 (0 << 0) #define DPI_BPP_RGB666 (1 << 0) #define DPI_BPP_RGB565 (2 << 0) /* Display Parallel Output Interface */ #define POCTRL 0x0448 #define POCTRL_S2P BIT(7) #define POCTRL_PCLK_POL BIT(3) #define POCTRL_VS_POL BIT(2) #define POCTRL_HS_POL BIT(1) #define POCTRL_DE_POL BIT(0) /* Video Path */ #define VPCTRL0 0x0450 #define VSDELAY GENMASK(31, 20) #define OPXLFMT_RGB666 (0 << 8) #define OPXLFMT_RGB888 (1 << 8) #define FRMSYNC_DISABLED (0 << 4) /* Video Timing Gen Disabled */ #define FRMSYNC_ENABLED (1 << 4) /* Video Timing Gen Enabled */ #define MSF_DISABLED (0 << 0) /* Magic Square FRC disabled */ #define MSF_ENABLED (1 << 0) /* Magic Square FRC enabled */ #define HTIM01 0x0454 #define HPW GENMASK(8, 0) #define HBPR GENMASK(24, 16) #define HTIM02 0x0458 #define HDISPR GENMASK(10, 0) #define HFPR GENMASK(24, 16) #define VTIM01 0x045c #define VSPR GENMASK(7, 0) #define VBPR GENMASK(23, 16) #define VTIM02 0x0460 #define VFPR GENMASK(23, 16) #define VDISPR GENMASK(10, 0) #define VFUEN0 0x0464 #define VFUEN BIT(0) /* Video Frame Timing Upload */ /* System */ #define TC_IDREG 0x0500 #define SYSSTAT 0x0508 #define SYSCTRL 0x0510 #define DP0_AUDSRC_NO_INPUT (0 << 3) #define DP0_AUDSRC_I2S_RX (1 << 3) #define DP0_VIDSRC_NO_INPUT (0 << 0) #define DP0_VIDSRC_DSI_RX (1 << 0) #define DP0_VIDSRC_DPI_RX (2 << 0) #define DP0_VIDSRC_COLOR_BAR (3 << 0) #define SYSRSTENB 0x050c #define ENBI2C (1 << 0) #define ENBLCD0 (1 << 2) #define ENBBM (1 << 3) #define ENBDSIRX (1 << 4) #define ENBREG (1 << 5) #define ENBHDCP (1 << 8) #define GPIOM 0x0540 #define GPIOC 0x0544 #define GPIOO 0x0548 #define GPIOI 0x054c #define INTCTL_G 0x0560 #define INTSTS_G 0x0564 #define INT_SYSERR BIT(16) #define INT_GPIO_H(x) (1 << (x == 0 ? 2 : 10)) #define INT_GPIO_LC(x) (1 << (x == 0 ? 3 : 11)) #define INT_GP0_LCNT 0x0584 #define INT_GP1_LCNT 0x0588 /* Control */ #define DP0CTL 0x0600 #define VID_MN_GEN BIT(6) /* Auto-generate M/N values */ #define EF_EN BIT(5) /* Enable Enhanced Framing */ #define VID_EN BIT(1) /* Video transmission enable */ #define DP_EN BIT(0) /* Enable DPTX function */ /* Clocks */ #define DP0_VIDMNGEN0 0x0610 #define DP0_VIDMNGEN1 0x0614 #define DP0_VMNGENSTATUS 0x0618 /* Main Channel */ #define DP0_SECSAMPLE 0x0640 #define DP0_VIDSYNCDELAY 0x0644 #define VID_SYNC_DLY GENMASK(15, 0) #define THRESH_DLY GENMASK(31, 16) #define DP0_TOTALVAL 0x0648 #define H_TOTAL GENMASK(15, 0) #define V_TOTAL GENMASK(31, 16) #define DP0_STARTVAL 0x064c #define H_START GENMASK(15, 0) #define V_START GENMASK(31, 16) #define DP0_ACTIVEVAL 0x0650 #define H_ACT GENMASK(15, 0) #define V_ACT GENMASK(31, 16) #define DP0_SYNCVAL 0x0654 #define VS_WIDTH GENMASK(30, 16) #define HS_WIDTH GENMASK(14, 0) #define SYNCVAL_HS_POL_ACTIVE_LOW (1 << 15) #define SYNCVAL_VS_POL_ACTIVE_LOW (1 << 31) #define DP0_MISC 0x0658 #define TU_SIZE_RECOMMENDED (63) /* LSCLK cycles per TU */ #define MAX_TU_SYMBOL GENMASK(28, 23) #define TU_SIZE GENMASK(21, 16) #define BPC_6 (0 << 5) #define BPC_8 (1 << 5) /* AUX channel */ #define DP0_AUXCFG0 0x0660 #define DP0_AUXCFG0_BSIZE GENMASK(11, 8) #define DP0_AUXCFG0_ADDR_ONLY BIT(4) #define DP0_AUXCFG1 0x0664 #define AUX_RX_FILTER_EN BIT(16) #define DP0_AUXADDR 0x0668 #define DP0_AUXWDATA(i) (0x066c + (i) * 4) #define DP0_AUXRDATA(i) (0x067c + (i) * 4) #define DP0_AUXSTATUS 0x068c #define AUX_BYTES GENMASK(15, 8) #define AUX_STATUS GENMASK(7, 4) #define AUX_TIMEOUT BIT(1) #define AUX_BUSY BIT(0) #define DP0_AUXI2CADR 0x0698 /* Link Training */ #define DP0_SRCCTRL 0x06a0 #define DP0_SRCCTRL_SCRMBLDIS BIT(13) #define DP0_SRCCTRL_EN810B BIT(12) #define DP0_SRCCTRL_NOTP (0 << 8) #define DP0_SRCCTRL_TP1 (1 << 8) #define DP0_SRCCTRL_TP2 (2 << 8) #define DP0_SRCCTRL_LANESKEW BIT(7) #define DP0_SRCCTRL_SSCG BIT(3) #define DP0_SRCCTRL_LANES_1 (0 << 2) #define DP0_SRCCTRL_LANES_2 (1 << 2) #define DP0_SRCCTRL_BW27 (1 << 1) #define DP0_SRCCTRL_BW162 (0 << 1) #define DP0_SRCCTRL_AUTOCORRECT BIT(0) #define DP0_LTSTAT 0x06d0 #define LT_LOOPDONE BIT(13) #define LT_STATUS_MASK (0x1f << 8) #define LT_CHANNEL1_EQ_BITS (DP_CHANNEL_EQ_BITS << 4) #define LT_INTERLANE_ALIGN_DONE BIT(3) #define LT_CHANNEL0_EQ_BITS (DP_CHANNEL_EQ_BITS) #define DP0_SNKLTCHGREQ 0x06d4 #define DP0_LTLOOPCTRL 0x06d8 #define DP0_SNKLTCTRL 0x06e4 #define DP1_SRCCTRL 0x07a0 /* PHY */ #define DP_PHY_CTRL 0x0800 #define DP_PHY_RST BIT(28) /* DP PHY Global Soft Reset */ #define BGREN BIT(25) /* AUX PHY BGR Enable */ #define PWR_SW_EN BIT(24) /* PHY Power Switch Enable */ #define PHY_M1_RST BIT(12) /* Reset PHY1 Main Channel */ #define PHY_RDY BIT(16) /* PHY Main Channels Ready */ #define PHY_M0_RST BIT(8) /* Reset PHY0 Main Channel */ #define PHY_2LANE BIT(2) /* PHY Enable 2 lanes */ #define PHY_A0_EN BIT(1) /* PHY Aux Channel0 Enable */ #define PHY_M0_EN BIT(0) /* PHY Main Channel0 Enable */ /* PLL */ #define DP0_PLLCTRL 0x0900 #define DP1_PLLCTRL 0x0904 /* not defined in DS */ #define PXL_PLLCTRL 0x0908 #define PLLUPDATE BIT(2) #define PLLBYP BIT(1) #define PLLEN BIT(0) #define PXL_PLLPARAM 0x0914 #define IN_SEL_REFCLK (0 << 14) #define SYS_PLLPARAM 0x0918 #define REF_FREQ_38M4 (0 << 8) /* 38.4 MHz */ #define REF_FREQ_19M2 (1 << 8) /* 19.2 MHz */ #define REF_FREQ_26M (2 << 8) /* 26 MHz */ #define REF_FREQ_13M (3 << 8) /* 13 MHz */ #define SYSCLK_SEL_LSCLK (0 << 4) #define LSCLK_DIV_1 (0 << 0) #define LSCLK_DIV_2 (1 << 0) /* Test & Debug */ #define TSTCTL 0x0a00 #define COLOR_R GENMASK(31, 24) #define COLOR_G GENMASK(23, 16) #define COLOR_B GENMASK(15, 8) #define ENI2CFILTER BIT(4) #define COLOR_BAR_MODE GENMASK(1, 0) #define COLOR_BAR_MODE_BARS 2 #define PLL_DBG 0x0a04 static bool tc_test_pattern; module_param_named(test, tc_test_pattern, bool, 0644); struct tc_edp_link { u8 dpcd[DP_RECEIVER_CAP_SIZE]; unsigned int rate; u8 num_lanes; u8 assr; bool scrambler_dis; bool spread; }; struct tc_data { struct device *dev; struct regmap *regmap; struct drm_dp_aux aux; struct drm_bridge bridge; struct drm_bridge *panel_bridge; struct drm_connector connector; struct mipi_dsi_device *dsi; /* link settings */ struct tc_edp_link link; /* current mode */ struct drm_display_mode mode; u32 rev; u8 assr; struct gpio_desc *sd_gpio; struct gpio_desc *reset_gpio; struct clk *refclk; /* do we have IRQ */ bool have_irq; /* Input connector type, DSI and not DPI. */ bool input_connector_dsi; /* HPD pin number (0 or 1) or -ENODEV */ int hpd_pin; }; static inline struct tc_data *aux_to_tc(struct drm_dp_aux *a) { return container_of(a, struct tc_data, aux); } static inline struct tc_data *bridge_to_tc(struct drm_bridge *b) { return container_of(b, struct tc_data, bridge); } static inline struct tc_data *connector_to_tc(struct drm_connector *c) { return container_of(c, struct tc_data, connector); } static inline int tc_poll_timeout(struct tc_data *tc, unsigned int addr, unsigned int cond_mask, unsigned int cond_value, unsigned long sleep_us, u64 timeout_us) { unsigned int val; return regmap_read_poll_timeout(tc->regmap, addr, val, (val & cond_mask) == cond_value, sleep_us, timeout_us); } static int tc_aux_wait_busy(struct tc_data *tc) { return tc_poll_timeout(tc, DP0_AUXSTATUS, AUX_BUSY, 0, 100, 100000); } static int tc_aux_write_data(struct tc_data *tc, const void *data, size_t size) { u32 auxwdata[DP_AUX_MAX_PAYLOAD_BYTES / sizeof(u32)] = { 0 }; int ret, count = ALIGN(size, sizeof(u32)); memcpy(auxwdata, data, size); ret = regmap_raw_write(tc->regmap, DP0_AUXWDATA(0), auxwdata, count); if (ret) return ret; return size; } static int tc_aux_read_data(struct tc_data *tc, void *data, size_t size) { u32 auxrdata[DP_AUX_MAX_PAYLOAD_BYTES / sizeof(u32)]; int ret, count = ALIGN(size, sizeof(u32)); ret = regmap_raw_read(tc->regmap, DP0_AUXRDATA(0), auxrdata, count); if (ret) return ret; memcpy(data, auxrdata, size); return size; } static u32 tc_auxcfg0(struct drm_dp_aux_msg *msg, size_t size) { u32 auxcfg0 = msg->request; if (size) auxcfg0 |= FIELD_PREP(DP0_AUXCFG0_BSIZE, size - 1); else auxcfg0 |= DP0_AUXCFG0_ADDR_ONLY; return auxcfg0; } static ssize_t tc_aux_transfer(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg) { struct tc_data *tc = aux_to_tc(aux); size_t size = min_t(size_t, DP_AUX_MAX_PAYLOAD_BYTES - 1, msg->size); u8 request = msg->request & ~DP_AUX_I2C_MOT; u32 auxstatus; int ret; ret = tc_aux_wait_busy(tc); if (ret) return ret; switch (request) { case DP_AUX_NATIVE_READ: case DP_AUX_I2C_READ: break; case DP_AUX_NATIVE_WRITE: case DP_AUX_I2C_WRITE: if (size) { ret = tc_aux_write_data(tc, msg->buffer, size); if (ret < 0) return ret; } break; default: return -EINVAL; } /* Store address */ ret = regmap_write(tc->regmap, DP0_AUXADDR, msg->address); if (ret) return ret; /* Start transfer */ ret = regmap_write(tc->regmap, DP0_AUXCFG0, tc_auxcfg0(msg, size)); if (ret) return ret; ret = tc_aux_wait_busy(tc); if (ret) return ret; ret = regmap_read(tc->regmap, DP0_AUXSTATUS, &auxstatus); if (ret) return ret; if (auxstatus & AUX_TIMEOUT) return -ETIMEDOUT; /* * For some reason address-only DP_AUX_I2C_WRITE (MOT), still * reports 1 byte transferred in its status. To deal we that * we ignore aux_bytes field if we know that this was an * address-only transfer */ if (size) size = FIELD_GET(AUX_BYTES, auxstatus); msg->reply = FIELD_GET(AUX_STATUS, auxstatus); switch (request) { case DP_AUX_NATIVE_READ: case DP_AUX_I2C_READ: if (size) return tc_aux_read_data(tc, msg->buffer, size); break; } return size; } static const char * const training_pattern1_errors[] = { "No errors", "Aux write error", "Aux read error", "Max voltage reached error", "Loop counter expired error", "res", "res", "res" }; static const char * const training_pattern2_errors[] = { "No errors", "Aux write error", "Aux read error", "Clock recovery failed error", "Loop counter expired error", "res", "res", "res" }; static u32 tc_srcctrl(struct tc_data *tc) { /* * No training pattern, skew lane 1 data by two LSCLK cycles with * respect to lane 0 data, AutoCorrect Mode = 0 */ u32 reg = DP0_SRCCTRL_NOTP | DP0_SRCCTRL_LANESKEW | DP0_SRCCTRL_EN810B; if (tc->link.scrambler_dis) reg |= DP0_SRCCTRL_SCRMBLDIS; /* Scrambler Disabled */ if (tc->link.spread) reg |= DP0_SRCCTRL_SSCG; /* Spread Spectrum Enable */ if (tc->link.num_lanes == 2) reg |= DP0_SRCCTRL_LANES_2; /* Two Main Channel Lanes */ if (tc->link.rate != 162000) reg |= DP0_SRCCTRL_BW27; /* 2.7 Gbps link */ return reg; } static int tc_pllupdate(struct tc_data *tc, unsigned int pllctrl) { int ret; ret = regmap_write(tc->regmap, pllctrl, PLLUPDATE | PLLEN); if (ret) return ret; /* Wait for PLL to lock: up to 7.5 ms, depending on refclk */ usleep_range(15000, 20000); return 0; } static int tc_pxl_pll_en(struct tc_data *tc, u32 refclk, u32 pixelclock) { int ret; int i_pre, best_pre = 1; int i_post, best_post = 1; int div, best_div = 1; int mul, best_mul = 1; int delta, best_delta; int ext_div[] = {1, 2, 3, 5, 7}; int clk_min, clk_max; int best_pixelclock = 0; int vco_hi = 0; u32 pxl_pllparam; /* * refclk * mul / (ext_pre_div * pre_div) should be in range: * - DPI ..... 0 to 100 MHz * - (e)DP ... 150 to 650 MHz */ if (tc->bridge.type == DRM_MODE_CONNECTOR_DPI) { clk_min = 0; clk_max = 100000000; } else { clk_min = 150000000; clk_max = 650000000; } dev_dbg(tc->dev, "PLL: requested %d pixelclock, ref %d\n", pixelclock, refclk); best_delta = pixelclock; /* Loop over all possible ext_divs, skipping invalid configurations */ for (i_pre = 0; i_pre < ARRAY_SIZE(ext_div); i_pre++) { /* * refclk / ext_pre_div should be in the 1 to 200 MHz range. * We don't allow any refclk > 200 MHz, only check lower bounds. */ if (refclk / ext_div[i_pre] < 1000000) continue; for (i_post = 0; i_post < ARRAY_SIZE(ext_div); i_post++) { for (div = 1; div <= 16; div++) { u32 clk; u64 tmp; tmp = pixelclock * ext_div[i_pre] * ext_div[i_post] * div; do_div(tmp, refclk); mul = tmp; /* Check limits */ if ((mul < 1) || (mul > 128)) continue; clk = (refclk / ext_div[i_pre] / div) * mul; if ((clk > clk_max) || (clk < clk_min)) continue; clk = clk / ext_div[i_post]; delta = clk - pixelclock; if (abs(delta) < abs(best_delta)) { best_pre = i_pre; best_post = i_post; best_div = div; best_mul = mul; best_delta = delta; best_pixelclock = clk; } } } } if (best_pixelclock == 0) { dev_err(tc->dev, "Failed to calc clock for %d pixelclock\n", pixelclock); return -EINVAL; } dev_dbg(tc->dev, "PLL: got %d, delta %d\n", best_pixelclock, best_delta); dev_dbg(tc->dev, "PLL: %d / %d / %d * %d / %d\n", refclk, ext_div[best_pre], best_div, best_mul, ext_div[best_post]); /* if VCO >= 300 MHz */ if (refclk / ext_div[best_pre] / best_div * best_mul >= 300000000) vco_hi = 1; /* see DS */ if (best_div == 16) best_div = 0; if (best_mul == 128) best_mul = 0; /* Power up PLL and switch to bypass */ ret = regmap_write(tc->regmap, PXL_PLLCTRL, PLLBYP | PLLEN); if (ret) return ret; pxl_pllparam = vco_hi << 24; /* For PLL VCO >= 300 MHz = 1 */ pxl_pllparam |= ext_div[best_pre] << 20; /* External Pre-divider */ pxl_pllparam |= ext_div[best_post] << 16; /* External Post-divider */ pxl_pllparam |= IN_SEL_REFCLK; /* Use RefClk as PLL input */ pxl_pllparam |= best_div << 8; /* Divider for PLL RefClk */ pxl_pllparam |= best_mul; /* Multiplier for PLL */ ret = regmap_write(tc->regmap, PXL_PLLPARAM, pxl_pllparam); if (ret) return ret; /* Force PLL parameter update and disable bypass */ return tc_pllupdate(tc, PXL_PLLCTRL); } static int tc_pxl_pll_dis(struct tc_data *tc) { /* Enable PLL bypass, power down PLL */ return regmap_write(tc->regmap, PXL_PLLCTRL, PLLBYP); } static int tc_stream_clock_calc(struct tc_data *tc) { /* * If the Stream clock and Link Symbol clock are * asynchronous with each other, the value of M changes over * time. This way of generating link clock and stream * clock is called Asynchronous Clock mode. The value M * must change while the value N stays constant. The * value of N in this Asynchronous Clock mode must be set * to 2^15 or 32,768. * * LSCLK = 1/10 of high speed link clock * * f_STRMCLK = M/N * f_LSCLK * M/N = f_STRMCLK / f_LSCLK * */ return regmap_write(tc->regmap, DP0_VIDMNGEN1, 32768); } static int tc_set_syspllparam(struct tc_data *tc) { unsigned long rate; u32 pllparam = SYSCLK_SEL_LSCLK | LSCLK_DIV_2; rate = clk_get_rate(tc->refclk); switch (rate) { case 38400000: pllparam |= REF_FREQ_38M4; break; case 26000000: pllparam |= REF_FREQ_26M; break; case 19200000: pllparam |= REF_FREQ_19M2; break; case 13000000: pllparam |= REF_FREQ_13M; break; default: dev_err(tc->dev, "Invalid refclk rate: %lu Hz\n", rate); return -EINVAL; } return regmap_write(tc->regmap, SYS_PLLPARAM, pllparam); } static int tc_aux_link_setup(struct tc_data *tc) { int ret; u32 dp0_auxcfg1; /* Setup DP-PHY / PLL */ ret = tc_set_syspllparam(tc); if (ret) goto err; ret = regmap_write(tc->regmap, DP_PHY_CTRL, BGREN | PWR_SW_EN | PHY_A0_EN); if (ret) goto err; /* * Initially PLLs are in bypass. Force PLL parameter update, * disable PLL bypass, enable PLL */ ret = tc_pllupdate(tc, DP0_PLLCTRL); if (ret) goto err; ret = tc_pllupdate(tc, DP1_PLLCTRL); if (ret) goto err; ret = tc_poll_timeout(tc, DP_PHY_CTRL, PHY_RDY, PHY_RDY, 100, 100000); if (ret == -ETIMEDOUT) { dev_err(tc->dev, "Timeout waiting for PHY to become ready"); return ret; } else if (ret) { goto err; } /* Setup AUX link */ dp0_auxcfg1 = AUX_RX_FILTER_EN; dp0_auxcfg1 |= 0x06 << 8; /* Aux Bit Period Calculator Threshold */ dp0_auxcfg1 |= 0x3f << 0; /* Aux Response Timeout Timer */ ret = regmap_write(tc->regmap, DP0_AUXCFG1, dp0_auxcfg1); if (ret) goto err; /* Register DP AUX channel */ tc->aux.name = "TC358767 AUX i2c adapter"; tc->aux.dev = tc->dev; tc->aux.transfer = tc_aux_transfer; drm_dp_aux_init(&tc->aux); return 0; err: dev_err(tc->dev, "tc_aux_link_setup failed: %d\n", ret); return ret; } static int tc_get_display_props(struct tc_data *tc) { u8 revision, num_lanes; unsigned int rate; int ret; u8 reg; /* Read DP Rx Link Capability */ ret = drm_dp_dpcd_read(&tc->aux, DP_DPCD_REV, tc->link.dpcd, DP_RECEIVER_CAP_SIZE); if (ret < 0) goto err_dpcd_read; revision = tc->link.dpcd[DP_DPCD_REV]; rate = drm_dp_max_link_rate(tc->link.dpcd); num_lanes = drm_dp_max_lane_count(tc->link.dpcd); if (rate != 162000 && rate != 270000) { dev_dbg(tc->dev, "Falling to 2.7 Gbps rate\n"); rate = 270000; } tc->link.rate = rate; if (num_lanes > 2) { dev_dbg(tc->dev, "Falling to 2 lanes\n"); num_lanes = 2; } tc->link.num_lanes = num_lanes; ret = drm_dp_dpcd_readb(&tc->aux, DP_MAX_DOWNSPREAD, &reg); if (ret < 0) goto err_dpcd_read; tc->link.spread = reg & DP_MAX_DOWNSPREAD_0_5; ret = drm_dp_dpcd_readb(&tc->aux, DP_MAIN_LINK_CHANNEL_CODING, &reg); if (ret < 0) goto err_dpcd_read; tc->link.scrambler_dis = false; /* read assr */ ret = drm_dp_dpcd_readb(&tc->aux, DP_EDP_CONFIGURATION_SET, &reg); if (ret < 0) goto err_dpcd_read; tc->link.assr = reg & DP_ALTERNATE_SCRAMBLER_RESET_ENABLE; dev_dbg(tc->dev, "DPCD rev: %d.%d, rate: %s, lanes: %d, framing: %s\n", revision >> 4, revision & 0x0f, (tc->link.rate == 162000) ? "1.62Gbps" : "2.7Gbps", tc->link.num_lanes, drm_dp_enhanced_frame_cap(tc->link.dpcd) ? "enhanced" : "default"); dev_dbg(tc->dev, "Downspread: %s, scrambler: %s\n", tc->link.spread ? "0.5%" : "0.0%", tc->link.scrambler_dis ? "disabled" : "enabled"); dev_dbg(tc->dev, "Display ASSR: %d, TC358767 ASSR: %d\n", tc->link.assr, tc->assr); return 0; err_dpcd_read: dev_err(tc->dev, "failed to read DPCD: %d\n", ret); return ret; } static int tc_set_common_video_mode(struct tc_data *tc, const struct drm_display_mode *mode) { int left_margin = mode->htotal - mode->hsync_end; int right_margin = mode->hsync_start - mode->hdisplay; int hsync_len = mode->hsync_end - mode->hsync_start; int upper_margin = mode->vtotal - mode->vsync_end; int lower_margin = mode->vsync_start - mode->vdisplay; int vsync_len = mode->vsync_end - mode->vsync_start; int ret; dev_dbg(tc->dev, "set mode %dx%d\n", mode->hdisplay, mode->vdisplay); dev_dbg(tc->dev, "H margin %d,%d sync %d\n", left_margin, right_margin, hsync_len); dev_dbg(tc->dev, "V margin %d,%d sync %d\n", upper_margin, lower_margin, vsync_len); dev_dbg(tc->dev, "total: %dx%d\n", mode->htotal, mode->vtotal); /* * LCD Ctl Frame Size * datasheet is not clear of vsdelay in case of DPI * assume we do not need any delay when DPI is a source of * sync signals */ ret = regmap_write(tc->regmap, VPCTRL0, FIELD_PREP(VSDELAY, right_margin + 10) | OPXLFMT_RGB888 | FRMSYNC_DISABLED | MSF_DISABLED); if (ret) return ret; ret = regmap_write(tc->regmap, HTIM01, FIELD_PREP(HBPR, ALIGN(left_margin, 2)) | FIELD_PREP(HPW, ALIGN(hsync_len, 2))); if (ret) return ret; ret = regmap_write(tc->regmap, HTIM02, FIELD_PREP(HDISPR, ALIGN(mode->hdisplay, 2)) | FIELD_PREP(HFPR, ALIGN(right_margin, 2))); if (ret) return ret; ret = regmap_write(tc->regmap, VTIM01, FIELD_PREP(VBPR, upper_margin) | FIELD_PREP(VSPR, vsync_len)); if (ret) return ret; ret = regmap_write(tc->regmap, VTIM02, FIELD_PREP(VFPR, lower_margin) | FIELD_PREP(VDISPR, mode->vdisplay)); if (ret) return ret; ret = regmap_write(tc->regmap, VFUEN0, VFUEN); /* update settings */ if (ret) return ret; /* Test pattern settings */ ret = regmap_write(tc->regmap, TSTCTL, FIELD_PREP(COLOR_R, 120) | FIELD_PREP(COLOR_G, 20) | FIELD_PREP(COLOR_B, 99) | ENI2CFILTER | FIELD_PREP(COLOR_BAR_MODE, COLOR_BAR_MODE_BARS)); return ret; } static int tc_set_dpi_video_mode(struct tc_data *tc, const struct drm_display_mode *mode) { u32 value = POCTRL_S2P; if (tc->mode.flags & DRM_MODE_FLAG_NHSYNC) value |= POCTRL_HS_POL; if (tc->mode.flags & DRM_MODE_FLAG_NVSYNC) value |= POCTRL_VS_POL; return regmap_write(tc->regmap, POCTRL, value); } static int tc_set_edp_video_mode(struct tc_data *tc, const struct drm_display_mode *mode) { int ret; int vid_sync_dly; int max_tu_symbol; int left_margin = mode->htotal - mode->hsync_end; int hsync_len = mode->hsync_end - mode->hsync_start; int upper_margin = mode->vtotal - mode->vsync_end; int vsync_len = mode->vsync_end - mode->vsync_start; u32 dp0_syncval; u32 bits_per_pixel = 24; u32 in_bw, out_bw; u32 dpipxlfmt; /* * Recommended maximum number of symbols transferred in a transfer unit: * DIV_ROUND_UP((input active video bandwidth in bytes) * tu_size, * (output active video bandwidth in bytes)) * Must be less than tu_size. */ in_bw = mode->clock * bits_per_pixel / 8; out_bw = tc->link.num_lanes * tc->link.rate; max_tu_symbol = DIV_ROUND_UP(in_bw * TU_SIZE_RECOMMENDED, out_bw); /* DP Main Stream Attributes */ vid_sync_dly = hsync_len + left_margin + mode->hdisplay; ret = regmap_write(tc->regmap, DP0_VIDSYNCDELAY, FIELD_PREP(THRESH_DLY, max_tu_symbol) | FIELD_PREP(VID_SYNC_DLY, vid_sync_dly)); ret = regmap_write(tc->regmap, DP0_TOTALVAL, FIELD_PREP(H_TOTAL, mode->htotal) | FIELD_PREP(V_TOTAL, mode->vtotal)); if (ret) return ret; ret = regmap_write(tc->regmap, DP0_STARTVAL, FIELD_PREP(H_START, left_margin + hsync_len) | FIELD_PREP(V_START, upper_margin + vsync_len)); if (ret) return ret; ret = regmap_write(tc->regmap, DP0_ACTIVEVAL, FIELD_PREP(V_ACT, mode->vdisplay) | FIELD_PREP(H_ACT, mode->hdisplay)); if (ret) return ret; dp0_syncval = FIELD_PREP(VS_WIDTH, vsync_len) | FIELD_PREP(HS_WIDTH, hsync_len); if (mode->flags & DRM_MODE_FLAG_NVSYNC) dp0_syncval |= SYNCVAL_VS_POL_ACTIVE_LOW; if (mode->flags & DRM_MODE_FLAG_NHSYNC) dp0_syncval |= SYNCVAL_HS_POL_ACTIVE_LOW; ret = regmap_write(tc->regmap, DP0_SYNCVAL, dp0_syncval); if (ret) return ret; dpipxlfmt = DE_POL_ACTIVE_HIGH | SUB_CFG_TYPE_CONFIG1 | DPI_BPP_RGB888; if (mode->flags & DRM_MODE_FLAG_NVSYNC) dpipxlfmt |= VS_POL_ACTIVE_LOW; if (mode->flags & DRM_MODE_FLAG_NHSYNC) dpipxlfmt |= HS_POL_ACTIVE_LOW; ret = regmap_write(tc->regmap, DPIPXLFMT, dpipxlfmt); if (ret) return ret; ret = regmap_write(tc->regmap, DP0_MISC, FIELD_PREP(MAX_TU_SYMBOL, max_tu_symbol) | FIELD_PREP(TU_SIZE, TU_SIZE_RECOMMENDED) | BPC_8); return ret; } static int tc_wait_link_training(struct tc_data *tc) { u32 value; int ret; ret = tc_poll_timeout(tc, DP0_LTSTAT, LT_LOOPDONE, LT_LOOPDONE, 500, 100000); if (ret) { dev_err(tc->dev, "Link training timeout waiting for LT_LOOPDONE!\n"); return ret; } ret = regmap_read(tc->regmap, DP0_LTSTAT, &value); if (ret) return ret; return (value >> 8) & 0x7; } static int tc_main_link_enable(struct tc_data *tc) { struct drm_dp_aux *aux = &tc->aux; struct device *dev = tc->dev; u32 dp_phy_ctrl; u32 value; int ret; u8 tmp[DP_LINK_STATUS_SIZE]; dev_dbg(tc->dev, "link enable\n"); ret = regmap_read(tc->regmap, DP0CTL, &value); if (ret) return ret; if (WARN_ON(value & DP_EN)) { ret = regmap_write(tc->regmap, DP0CTL, 0); if (ret) return ret; } ret = regmap_write(tc->regmap, DP0_SRCCTRL, tc_srcctrl(tc)); if (ret) return ret; /* SSCG and BW27 on DP1 must be set to the same as on DP0 */ ret = regmap_write(tc->regmap, DP1_SRCCTRL, (tc->link.spread ? DP0_SRCCTRL_SSCG : 0) | ((tc->link.rate != 162000) ? DP0_SRCCTRL_BW27 : 0)); if (ret) return ret; ret = tc_set_syspllparam(tc); if (ret) return ret; /* Setup Main Link */ dp_phy_ctrl = BGREN | PWR_SW_EN | PHY_A0_EN | PHY_M0_EN; if (tc->link.num_lanes == 2) dp_phy_ctrl |= PHY_2LANE; ret = regmap_write(tc->regmap, DP_PHY_CTRL, dp_phy_ctrl); if (ret) return ret; /* PLL setup */ ret = tc_pllupdate(tc, DP0_PLLCTRL); if (ret) return ret; ret = tc_pllupdate(tc, DP1_PLLCTRL); if (ret) return ret; /* Reset/Enable Main Links */ dp_phy_ctrl |= DP_PHY_RST | PHY_M1_RST | PHY_M0_RST; ret = regmap_write(tc->regmap, DP_PHY_CTRL, dp_phy_ctrl); usleep_range(100, 200); dp_phy_ctrl &= ~(DP_PHY_RST | PHY_M1_RST | PHY_M0_RST); ret = regmap_write(tc->regmap, DP_PHY_CTRL, dp_phy_ctrl); ret = tc_poll_timeout(tc, DP_PHY_CTRL, PHY_RDY, PHY_RDY, 500, 100000); if (ret) { dev_err(dev, "timeout waiting for phy become ready"); return ret; } /* Set misc: 8 bits per color */ ret = regmap_update_bits(tc->regmap, DP0_MISC, BPC_8, BPC_8); if (ret) return ret; /* * ASSR mode * on TC358767 side ASSR configured through strap pin * seems there is no way to change this setting from SW * * check is tc configured for same mode */ if (tc->assr != tc->link.assr) { dev_dbg(dev, "Trying to set display to ASSR: %d\n", tc->assr); /* try to set ASSR on display side */ tmp[0] = tc->assr; ret = drm_dp_dpcd_writeb(aux, DP_EDP_CONFIGURATION_SET, tmp[0]); if (ret < 0) goto err_dpcd_read; /* read back */ ret = drm_dp_dpcd_readb(aux, DP_EDP_CONFIGURATION_SET, tmp); if (ret < 0) goto err_dpcd_read; if (tmp[0] != tc->assr) { dev_dbg(dev, "Failed to switch display ASSR to %d, falling back to unscrambled mode\n", tc->assr); /* trying with disabled scrambler */ tc->link.scrambler_dis = true; } } /* Setup Link & DPRx Config for Training */ tmp[0] = drm_dp_link_rate_to_bw_code(tc->link.rate); tmp[1] = tc->link.num_lanes; if (drm_dp_enhanced_frame_cap(tc->link.dpcd)) tmp[1] |= DP_LANE_COUNT_ENHANCED_FRAME_EN; ret = drm_dp_dpcd_write(aux, DP_LINK_BW_SET, tmp, 2); if (ret < 0) goto err_dpcd_write; /* DOWNSPREAD_CTRL */ tmp[0] = tc->link.spread ? DP_SPREAD_AMP_0_5 : 0x00; /* MAIN_LINK_CHANNEL_CODING_SET */ tmp[1] = DP_SET_ANSI_8B10B; ret = drm_dp_dpcd_write(aux, DP_DOWNSPREAD_CTRL, tmp, 2); if (ret < 0) goto err_dpcd_write; /* Reset voltage-swing & pre-emphasis */ tmp[0] = tmp[1] = DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_0; ret = drm_dp_dpcd_write(aux, DP_TRAINING_LANE0_SET, tmp, 2); if (ret < 0) goto err_dpcd_write; /* Clock-Recovery */ /* Set DPCD 0x102 for Training Pattern 1 */ ret = regmap_write(tc->regmap, DP0_SNKLTCTRL, DP_LINK_SCRAMBLING_DISABLE | DP_TRAINING_PATTERN_1); if (ret) return ret; ret = regmap_write(tc->regmap, DP0_LTLOOPCTRL, (15 << 28) | /* Defer Iteration Count */ (15 << 24) | /* Loop Iteration Count */ (0xd << 0)); /* Loop Timer Delay */ if (ret) return ret; ret = regmap_write(tc->regmap, DP0_SRCCTRL, tc_srcctrl(tc) | DP0_SRCCTRL_SCRMBLDIS | DP0_SRCCTRL_AUTOCORRECT | DP0_SRCCTRL_TP1); if (ret) return ret; /* Enable DP0 to start Link Training */ ret = regmap_write(tc->regmap, DP0CTL, (drm_dp_enhanced_frame_cap(tc->link.dpcd) ? EF_EN : 0) | DP_EN); if (ret) return ret; /* wait */ ret = tc_wait_link_training(tc); if (ret < 0) return ret; if (ret) { dev_err(tc->dev, "Link training phase 1 failed: %s\n", training_pattern1_errors[ret]); return -ENODEV; } /* Channel Equalization */ /* Set DPCD 0x102 for Training Pattern 2 */ ret = regmap_write(tc->regmap, DP0_SNKLTCTRL, DP_LINK_SCRAMBLING_DISABLE | DP_TRAINING_PATTERN_2); if (ret) return ret; ret = regmap_write(tc->regmap, DP0_SRCCTRL, tc_srcctrl(tc) | DP0_SRCCTRL_SCRMBLDIS | DP0_SRCCTRL_AUTOCORRECT | DP0_SRCCTRL_TP2); if (ret) return ret; /* wait */ ret = tc_wait_link_training(tc); if (ret < 0) return ret; if (ret) { dev_err(tc->dev, "Link training phase 2 failed: %s\n", training_pattern2_errors[ret]); return -ENODEV; } /* * Toshiba's documentation suggests to first clear DPCD 0x102, then * clear the training pattern bit in DP0_SRCCTRL. Testing shows * that the link sometimes drops if those steps are done in that order, * but if the steps are done in reverse order, the link stays up. * * So we do the steps differently than documented here. */ /* Clear Training Pattern, set AutoCorrect Mode = 1 */ ret = regmap_write(tc->regmap, DP0_SRCCTRL, tc_srcctrl(tc) | DP0_SRCCTRL_AUTOCORRECT); if (ret) return ret; /* Clear DPCD 0x102 */ /* Note: Can Not use DP0_SNKLTCTRL (0x06E4) short cut */ tmp[0] = tc->link.scrambler_dis ? DP_LINK_SCRAMBLING_DISABLE : 0x00; ret = drm_dp_dpcd_writeb(aux, DP_TRAINING_PATTERN_SET, tmp[0]); if (ret < 0) goto err_dpcd_write; /* Check link status */ ret = drm_dp_dpcd_read_link_status(aux, tmp); if (ret < 0) goto err_dpcd_read; ret = 0; value = tmp[0] & DP_CHANNEL_EQ_BITS; if (value != DP_CHANNEL_EQ_BITS) { dev_err(tc->dev, "Lane 0 failed: %x\n", value); ret = -ENODEV; } if (tc->link.num_lanes == 2) { value = (tmp[0] >> 4) & DP_CHANNEL_EQ_BITS; if (value != DP_CHANNEL_EQ_BITS) { dev_err(tc->dev, "Lane 1 failed: %x\n", value); ret = -ENODEV; } if (!(tmp[2] & DP_INTERLANE_ALIGN_DONE)) { dev_err(tc->dev, "Interlane align failed\n"); ret = -ENODEV; } } if (ret) { dev_err(dev, "0x0202 LANE0_1_STATUS: 0x%02x\n", tmp[0]); dev_err(dev, "0x0203 LANE2_3_STATUS 0x%02x\n", tmp[1]); dev_err(dev, "0x0204 LANE_ALIGN_STATUS_UPDATED: 0x%02x\n", tmp[2]); dev_err(dev, "0x0205 SINK_STATUS: 0x%02x\n", tmp[3]); dev_err(dev, "0x0206 ADJUST_REQUEST_LANE0_1: 0x%02x\n", tmp[4]); dev_err(dev, "0x0207 ADJUST_REQUEST_LANE2_3: 0x%02x\n", tmp[5]); return ret; } return 0; err_dpcd_read: dev_err(tc->dev, "Failed to read DPCD: %d\n", ret); return ret; err_dpcd_write: dev_err(tc->dev, "Failed to write DPCD: %d\n", ret); return ret; } static int tc_main_link_disable(struct tc_data *tc) { int ret; dev_dbg(tc->dev, "link disable\n"); ret = regmap_write(tc->regmap, DP0_SRCCTRL, 0); if (ret) return ret; ret = regmap_write(tc->regmap, DP0CTL, 0); if (ret) return ret; return regmap_update_bits(tc->regmap, DP_PHY_CTRL, PHY_M0_RST | PHY_M1_RST | PHY_M0_EN, PHY_M0_RST | PHY_M1_RST); } static int tc_dsi_rx_enable(struct tc_data *tc) { u32 value; int ret; regmap_write(tc->regmap, PPI_D0S_CLRSIPOCOUNT, 25); regmap_write(tc->regmap, PPI_D1S_CLRSIPOCOUNT, 25); regmap_write(tc->regmap, PPI_D2S_CLRSIPOCOUNT, 25); regmap_write(tc->regmap, PPI_D3S_CLRSIPOCOUNT, 25); regmap_write(tc->regmap, PPI_D0S_ATMR, 0); regmap_write(tc->regmap, PPI_D1S_ATMR, 0); regmap_write(tc->regmap, PPI_TX_RX_TA, TTA_GET | TTA_SURE); regmap_write(tc->regmap, PPI_LPTXTIMECNT, LPX_PERIOD); value = ((LANEENABLE_L0EN << tc->dsi->lanes) - LANEENABLE_L0EN) | LANEENABLE_CLEN; regmap_write(tc->regmap, PPI_LANEENABLE, value); regmap_write(tc->regmap, DSI_LANEENABLE, value); /* Set input interface */ value = DP0_AUDSRC_NO_INPUT; if (tc_test_pattern) value |= DP0_VIDSRC_COLOR_BAR; else value |= DP0_VIDSRC_DSI_RX; ret = regmap_write(tc->regmap, SYSCTRL, value); if (ret) return ret; usleep_range(120, 150); regmap_write(tc->regmap, PPI_STARTPPI, PPI_START_FUNCTION); regmap_write(tc->regmap, DSI_STARTDSI, DSI_RX_START); return 0; } static int tc_dpi_rx_enable(struct tc_data *tc) { u32 value; /* Set input interface */ value = DP0_AUDSRC_NO_INPUT; if (tc_test_pattern) value |= DP0_VIDSRC_COLOR_BAR; else value |= DP0_VIDSRC_DPI_RX; return regmap_write(tc->regmap, SYSCTRL, value); } static int tc_dpi_stream_enable(struct tc_data *tc) { int ret; dev_dbg(tc->dev, "enable video stream\n"); /* Setup PLL */ ret = tc_set_syspllparam(tc); if (ret) return ret; /* * Initially PLLs are in bypass. Force PLL parameter update, * disable PLL bypass, enable PLL */ ret = tc_pllupdate(tc, DP0_PLLCTRL); if (ret) return ret; ret = tc_pllupdate(tc, DP1_PLLCTRL); if (ret) return ret; /* Pixel PLL must always be enabled for DPI mode */ ret = tc_pxl_pll_en(tc, clk_get_rate(tc->refclk), 1000 * tc->mode.clock); if (ret) return ret; ret = tc_set_common_video_mode(tc, &tc->mode); if (ret) return ret; ret = tc_set_dpi_video_mode(tc, &tc->mode); if (ret) return ret; return tc_dsi_rx_enable(tc); } static int tc_dpi_stream_disable(struct tc_data *tc) { dev_dbg(tc->dev, "disable video stream\n"); tc_pxl_pll_dis(tc); return 0; } static int tc_edp_stream_enable(struct tc_data *tc) { int ret; u32 value; dev_dbg(tc->dev, "enable video stream\n"); /* * Pixel PLL must be enabled for DSI input mode and test pattern. * * Per TC9595XBG datasheet Revision 0.1 2018-12-27 Figure 4.18 * "Clock Mode Selection and Clock Sources", either Pixel PLL * or DPI_PCLK supplies StrmClk. DPI_PCLK is only available in * case valid Pixel Clock are supplied to the chip DPI input. * In case built-in test pattern is desired OR DSI input mode * is used, DPI_PCLK is not available and thus Pixel PLL must * be used instead. */ if (tc->input_connector_dsi || tc_test_pattern) { ret = tc_pxl_pll_en(tc, clk_get_rate(tc->refclk), 1000 * tc->mode.clock); if (ret) return ret; } ret = tc_set_common_video_mode(tc, &tc->mode); if (ret) return ret; ret = tc_set_edp_video_mode(tc, &tc->mode); if (ret) return ret; /* Set M/N */ ret = tc_stream_clock_calc(tc); if (ret) return ret; value = VID_MN_GEN | DP_EN; if (drm_dp_enhanced_frame_cap(tc->link.dpcd)) value |= EF_EN; ret = regmap_write(tc->regmap, DP0CTL, value); if (ret) return ret; /* * VID_EN assertion should be delayed by at least N * LSCLK * cycles from the time VID_MN_GEN is enabled in order to * generate stable values for VID_M. LSCLK is 270 MHz or * 162 MHz, VID_N is set to 32768 in tc_stream_clock_calc(), * so a delay of at least 203 us should suffice. */ usleep_range(500, 1000); value |= VID_EN; ret = regmap_write(tc->regmap, DP0CTL, value); if (ret) return ret; /* Set input interface */ if (tc->input_connector_dsi) return tc_dsi_rx_enable(tc); else return tc_dpi_rx_enable(tc); } static int tc_edp_stream_disable(struct tc_data *tc) { int ret; dev_dbg(tc->dev, "disable video stream\n"); ret = regmap_update_bits(tc->regmap, DP0CTL, VID_EN, 0); if (ret) return ret; tc_pxl_pll_dis(tc); return 0; } static void tc_dpi_bridge_atomic_enable(struct drm_bridge *bridge, struct drm_bridge_state *old_bridge_state) { struct tc_data *tc = bridge_to_tc(bridge); int ret; ret = tc_dpi_stream_enable(tc); if (ret < 0) { dev_err(tc->dev, "main link stream start error: %d\n", ret); tc_main_link_disable(tc); return; } } static void tc_dpi_bridge_atomic_disable(struct drm_bridge *bridge, struct drm_bridge_state *old_bridge_state) { struct tc_data *tc = bridge_to_tc(bridge); int ret; ret = tc_dpi_stream_disable(tc); if (ret < 0) dev_err(tc->dev, "main link stream stop error: %d\n", ret); } static void tc_edp_bridge_atomic_enable(struct drm_bridge *bridge, struct drm_bridge_state *old_bridge_state) { struct tc_data *tc = bridge_to_tc(bridge); int ret; ret = tc_get_display_props(tc); if (ret < 0) { dev_err(tc->dev, "failed to read display props: %d\n", ret); return; } ret = tc_main_link_enable(tc); if (ret < 0) { dev_err(tc->dev, "main link enable error: %d\n", ret); return; } ret = tc_edp_stream_enable(tc); if (ret < 0) { dev_err(tc->dev, "main link stream start error: %d\n", ret); tc_main_link_disable(tc); return; } } static void tc_edp_bridge_atomic_disable(struct drm_bridge *bridge, struct drm_bridge_state *old_bridge_state) { struct tc_data *tc = bridge_to_tc(bridge); int ret; ret = tc_edp_stream_disable(tc); if (ret < 0) dev_err(tc->dev, "main link stream stop error: %d\n", ret); ret = tc_main_link_disable(tc); if (ret < 0) dev_err(tc->dev, "main link disable error: %d\n", ret); } static int tc_dpi_atomic_check(struct drm_bridge *bridge, struct drm_bridge_state *bridge_state, struct drm_crtc_state *crtc_state, struct drm_connector_state *conn_state) { /* DSI->DPI interface clock limitation: upto 100 MHz */ if (crtc_state->adjusted_mode.clock > 100000) return -EINVAL; return 0; } static int tc_edp_atomic_check(struct drm_bridge *bridge, struct drm_bridge_state *bridge_state, struct drm_crtc_state *crtc_state, struct drm_connector_state *conn_state) { /* DPI->(e)DP interface clock limitation: upto 154 MHz */ if (crtc_state->adjusted_mode.clock > 154000) return -EINVAL; return 0; } static enum drm_mode_status tc_dpi_mode_valid(struct drm_bridge *bridge, const struct drm_display_info *info, const struct drm_display_mode *mode) { /* DPI interface clock limitation: upto 100 MHz */ if (mode->clock > 100000) return MODE_CLOCK_HIGH; return MODE_OK; } static enum drm_mode_status tc_edp_mode_valid(struct drm_bridge *bridge, const struct drm_display_info *info, const struct drm_display_mode *mode) { struct tc_data *tc = bridge_to_tc(bridge); u32 req, avail; u32 bits_per_pixel = 24; /* DPI interface clock limitation: upto 154 MHz */ if (mode->clock > 154000) return MODE_CLOCK_HIGH; req = mode->clock * bits_per_pixel / 8; avail = tc->link.num_lanes * tc->link.rate; if (req > avail) return MODE_BAD; return MODE_OK; } static void tc_bridge_mode_set(struct drm_bridge *bridge, const struct drm_display_mode *mode, const struct drm_display_mode *adj) { struct tc_data *tc = bridge_to_tc(bridge); drm_mode_copy(&tc->mode, mode); } static struct edid *tc_get_edid(struct drm_bridge *bridge, struct drm_connector *connector) { struct tc_data *tc = bridge_to_tc(bridge); return drm_get_edid(connector, &tc->aux.ddc); } static int tc_connector_get_modes(struct drm_connector *connector) { struct tc_data *tc = connector_to_tc(connector); int num_modes; struct edid *edid; int ret; ret = tc_get_display_props(tc); if (ret < 0) { dev_err(tc->dev, "failed to read display props: %d\n", ret); return 0; } if (tc->panel_bridge) { num_modes = drm_bridge_get_modes(tc->panel_bridge, connector); if (num_modes > 0) return num_modes; } edid = tc_get_edid(&tc->bridge, connector); num_modes = drm_add_edid_modes(connector, edid); kfree(edid); return num_modes; } static const struct drm_connector_helper_funcs tc_connector_helper_funcs = { .get_modes = tc_connector_get_modes, }; static enum drm_connector_status tc_bridge_detect(struct drm_bridge *bridge) { struct tc_data *tc = bridge_to_tc(bridge); bool conn; u32 val; int ret; ret = regmap_read(tc->regmap, GPIOI, &val); if (ret) return connector_status_unknown; conn = val & BIT(tc->hpd_pin); if (conn) return connector_status_connected; else return connector_status_disconnected; } static enum drm_connector_status tc_connector_detect(struct drm_connector *connector, bool force) { struct tc_data *tc = connector_to_tc(connector); if (tc->hpd_pin >= 0) return tc_bridge_detect(&tc->bridge); if (tc->panel_bridge) return connector_status_connected; else return connector_status_unknown; } static const struct drm_connector_funcs tc_connector_funcs = { .detect = tc_connector_detect, .fill_modes = drm_helper_probe_single_connector_modes, .destroy = drm_connector_cleanup, .reset = drm_atomic_helper_connector_reset, .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state, .atomic_destroy_state = drm_atomic_helper_connector_destroy_state, }; static int tc_dpi_bridge_attach(struct drm_bridge *bridge, enum drm_bridge_attach_flags flags) { struct tc_data *tc = bridge_to_tc(bridge); if (!tc->panel_bridge) return 0; return drm_bridge_attach(tc->bridge.encoder, tc->panel_bridge, &tc->bridge, flags); } static int tc_edp_bridge_attach(struct drm_bridge *bridge, enum drm_bridge_attach_flags flags) { u32 bus_format = MEDIA_BUS_FMT_RGB888_1X24; struct tc_data *tc = bridge_to_tc(bridge); struct drm_device *drm = bridge->dev; int ret; if (tc->panel_bridge) { /* If a connector is required then this driver shall create it */ ret = drm_bridge_attach(tc->bridge.encoder, tc->panel_bridge, &tc->bridge, flags | DRM_BRIDGE_ATTACH_NO_CONNECTOR); if (ret) return ret; } if (flags & DRM_BRIDGE_ATTACH_NO_CONNECTOR) return 0; tc->aux.drm_dev = drm; ret = drm_dp_aux_register(&tc->aux); if (ret < 0) return ret; /* Create DP/eDP connector */ drm_connector_helper_add(&tc->connector, &tc_connector_helper_funcs); ret = drm_connector_init(drm, &tc->connector, &tc_connector_funcs, tc->bridge.type); if (ret) goto aux_unregister; /* Don't poll if don't have HPD connected */ if (tc->hpd_pin >= 0) { if (tc->have_irq) tc->connector.polled = DRM_CONNECTOR_POLL_HPD; else tc->connector.polled = DRM_CONNECTOR_POLL_CONNECT | DRM_CONNECTOR_POLL_DISCONNECT; } drm_display_info_set_bus_formats(&tc->connector.display_info, &bus_format, 1); tc->connector.display_info.bus_flags = DRM_BUS_FLAG_DE_HIGH | DRM_BUS_FLAG_PIXDATA_DRIVE_NEGEDGE | DRM_BUS_FLAG_SYNC_DRIVE_NEGEDGE; drm_connector_attach_encoder(&tc->connector, tc->bridge.encoder); return 0; aux_unregister: drm_dp_aux_unregister(&tc->aux); return ret; } static void tc_edp_bridge_detach(struct drm_bridge *bridge) { drm_dp_aux_unregister(&bridge_to_tc(bridge)->aux); } #define MAX_INPUT_SEL_FORMATS 1 static u32 * tc_dpi_atomic_get_input_bus_fmts(struct drm_bridge *bridge, struct drm_bridge_state *bridge_state, struct drm_crtc_state *crtc_state, struct drm_connector_state *conn_state, u32 output_fmt, unsigned int *num_input_fmts) { u32 *input_fmts; *num_input_fmts = 0; input_fmts = kcalloc(MAX_INPUT_SEL_FORMATS, sizeof(*input_fmts), GFP_KERNEL); if (!input_fmts) return NULL; /* This is the DSI-end bus format */ input_fmts[0] = MEDIA_BUS_FMT_RGB888_1X24; *num_input_fmts = 1; return input_fmts; } static const struct drm_bridge_funcs tc_dpi_bridge_funcs = { .attach = tc_dpi_bridge_attach, .mode_valid = tc_dpi_mode_valid, .mode_set = tc_bridge_mode_set, .atomic_check = tc_dpi_atomic_check, .atomic_enable = tc_dpi_bridge_atomic_enable, .atomic_disable = tc_dpi_bridge_atomic_disable, .atomic_duplicate_state = drm_atomic_helper_bridge_duplicate_state, .atomic_destroy_state = drm_atomic_helper_bridge_destroy_state, .atomic_reset = drm_atomic_helper_bridge_reset, .atomic_get_input_bus_fmts = tc_dpi_atomic_get_input_bus_fmts, }; static const struct drm_bridge_funcs tc_edp_bridge_funcs = { .attach = tc_edp_bridge_attach, .detach = tc_edp_bridge_detach, .mode_valid = tc_edp_mode_valid, .mode_set = tc_bridge_mode_set, .atomic_check = tc_edp_atomic_check, .atomic_enable = tc_edp_bridge_atomic_enable, .atomic_disable = tc_edp_bridge_atomic_disable, .detect = tc_bridge_detect, .get_edid = tc_get_edid, .atomic_duplicate_state = drm_atomic_helper_bridge_duplicate_state, .atomic_destroy_state = drm_atomic_helper_bridge_destroy_state, .atomic_reset = drm_atomic_helper_bridge_reset, }; static bool tc_readable_reg(struct device *dev, unsigned int reg) { switch (reg) { /* DSI D-PHY Layer */ case 0x004: case 0x020: case 0x024: case 0x028: case 0x02c: case 0x030: case 0x038: case 0x040: case 0x044: case 0x048: case 0x04c: case 0x050: case 0x054: /* DSI PPI Layer */ case PPI_STARTPPI: case 0x108: case 0x110: case PPI_LPTXTIMECNT: case PPI_LANEENABLE: case PPI_TX_RX_TA: case 0x140: case PPI_D0S_ATMR: case PPI_D1S_ATMR: case 0x14c: case 0x150: case PPI_D0S_CLRSIPOCOUNT: case PPI_D1S_CLRSIPOCOUNT: case PPI_D2S_CLRSIPOCOUNT: case PPI_D3S_CLRSIPOCOUNT: case 0x180: case 0x184: case 0x188: case 0x18c: case 0x190: case 0x1a0: case 0x1a4: case 0x1a8: case 0x1ac: case 0x1b0: case 0x1c0: case 0x1c4: case 0x1c8: case 0x1cc: case 0x1d0: case 0x1e0: case 0x1e4: case 0x1f0: case 0x1f4: /* DSI Protocol Layer */ case DSI_STARTDSI: case 0x208: case DSI_LANEENABLE: case 0x214: case 0x218: case 0x220: case 0x224: case 0x228: case 0x230: /* DSI General */ case 0x300: /* DSI Application Layer */ case 0x400: case 0x404: /* DPI */ case DPIPXLFMT: /* Parallel Output */ case POCTRL: /* Video Path0 Configuration */ case VPCTRL0: case HTIM01: case HTIM02: case VTIM01: case VTIM02: case VFUEN0: /* System */ case TC_IDREG: case 0x504: case SYSSTAT: case SYSRSTENB: case SYSCTRL: /* I2C */ case 0x520: /* GPIO */ case GPIOM: case GPIOC: case GPIOO: case GPIOI: /* Interrupt */ case INTCTL_G: case INTSTS_G: case 0x570: case 0x574: case INT_GP0_LCNT: case INT_GP1_LCNT: /* DisplayPort Control */ case DP0CTL: /* DisplayPort Clock */ case DP0_VIDMNGEN0: case DP0_VIDMNGEN1: case DP0_VMNGENSTATUS: case 0x628: case 0x62c: case 0x630: /* DisplayPort Main Channel */ case DP0_SECSAMPLE: case DP0_VIDSYNCDELAY: case DP0_TOTALVAL: case DP0_STARTVAL: case DP0_ACTIVEVAL: case DP0_SYNCVAL: case DP0_MISC: /* DisplayPort Aux Channel */ case DP0_AUXCFG0: case DP0_AUXCFG1: case DP0_AUXADDR: case 0x66c: case 0x670: case 0x674: case 0x678: case 0x67c: case 0x680: case 0x684: case 0x688: case DP0_AUXSTATUS: case DP0_AUXI2CADR: /* DisplayPort Link Training */ case DP0_SRCCTRL: case DP0_LTSTAT: case DP0_SNKLTCHGREQ: case DP0_LTLOOPCTRL: case DP0_SNKLTCTRL: case 0x6e8: case 0x6ec: case 0x6f0: case 0x6f4: /* DisplayPort Audio */ case 0x700: case 0x704: case 0x708: case 0x70c: case 0x710: case 0x714: case 0x718: case 0x71c: case 0x720: /* DisplayPort Source Control */ case DP1_SRCCTRL: /* DisplayPort PHY */ case DP_PHY_CTRL: case 0x810: case 0x814: case 0x820: case 0x840: /* I2S */ case 0x880: case 0x888: case 0x88c: case 0x890: case 0x894: case 0x898: case 0x89c: case 0x8a0: case 0x8a4: case 0x8a8: case 0x8ac: case 0x8b0: case 0x8b4: /* PLL */ case DP0_PLLCTRL: case DP1_PLLCTRL: case PXL_PLLCTRL: case PXL_PLLPARAM: case SYS_PLLPARAM: /* HDCP */ case 0x980: case 0x984: case 0x988: case 0x98c: case 0x990: case 0x994: case 0x998: case 0x99c: case 0x9a0: case 0x9a4: case 0x9a8: case 0x9ac: /* Debug */ case TSTCTL: case PLL_DBG: return true; } return false; } static const struct regmap_range tc_volatile_ranges[] = { regmap_reg_range(DP0_AUXWDATA(0), DP0_AUXSTATUS), regmap_reg_range(DP0_LTSTAT, DP0_SNKLTCHGREQ), regmap_reg_range(DP_PHY_CTRL, DP_PHY_CTRL), regmap_reg_range(DP0_PLLCTRL, PXL_PLLCTRL), regmap_reg_range(VFUEN0, VFUEN0), regmap_reg_range(INTSTS_G, INTSTS_G), regmap_reg_range(GPIOI, GPIOI), }; static const struct regmap_access_table tc_volatile_table = { .yes_ranges = tc_volatile_ranges, .n_yes_ranges = ARRAY_SIZE(tc_volatile_ranges), }; static bool tc_writeable_reg(struct device *dev, unsigned int reg) { return (reg != TC_IDREG) && (reg != DP0_LTSTAT) && (reg != DP0_SNKLTCHGREQ); } static const struct regmap_config tc_regmap_config = { .name = "tc358767", .reg_bits = 16, .val_bits = 32, .reg_stride = 4, .max_register = PLL_DBG, .cache_type = REGCACHE_RBTREE, .readable_reg = tc_readable_reg, .volatile_table = &tc_volatile_table, .writeable_reg = tc_writeable_reg, .reg_format_endian = REGMAP_ENDIAN_BIG, .val_format_endian = REGMAP_ENDIAN_LITTLE, }; static irqreturn_t tc_irq_handler(int irq, void *arg) { struct tc_data *tc = arg; u32 val; int r; r = regmap_read(tc->regmap, INTSTS_G, &val); if (r) return IRQ_NONE; if (!val) return IRQ_NONE; if (val & INT_SYSERR) { u32 stat = 0; regmap_read(tc->regmap, SYSSTAT, &stat); dev_err(tc->dev, "syserr %x\n", stat); } if (tc->hpd_pin >= 0 && tc->bridge.dev) { /* * H is triggered when the GPIO goes high. * * LC is triggered when the GPIO goes low and stays low for * the duration of LCNT */ bool h = val & INT_GPIO_H(tc->hpd_pin); bool lc = val & INT_GPIO_LC(tc->hpd_pin); dev_dbg(tc->dev, "GPIO%d: %s %s\n", tc->hpd_pin, h ? "H" : "", lc ? "LC" : ""); if (h || lc) drm_kms_helper_hotplug_event(tc->bridge.dev); } regmap_write(tc->regmap, INTSTS_G, val); return IRQ_HANDLED; } static int tc_mipi_dsi_host_attach(struct tc_data *tc) { struct device *dev = tc->dev; struct device_node *host_node; struct device_node *endpoint; struct mipi_dsi_device *dsi; struct mipi_dsi_host *host; const struct mipi_dsi_device_info info = { .type = "tc358767", .channel = 0, .node = NULL, }; int dsi_lanes, ret; endpoint = of_graph_get_endpoint_by_regs(dev->of_node, 0, -1); dsi_lanes = drm_of_get_data_lanes_count(endpoint, 1, 4); host_node = of_graph_get_remote_port_parent(endpoint); host = of_find_mipi_dsi_host_by_node(host_node); of_node_put(host_node); of_node_put(endpoint); if (!host) return -EPROBE_DEFER; if (dsi_lanes < 0) return dsi_lanes; dsi = devm_mipi_dsi_device_register_full(dev, host, &info); if (IS_ERR(dsi)) return dev_err_probe(dev, PTR_ERR(dsi), "failed to create dsi device\n"); tc->dsi = dsi; dsi->lanes = dsi_lanes; dsi->format = MIPI_DSI_FMT_RGB888; dsi->mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_BURST | MIPI_DSI_MODE_LPM | MIPI_DSI_CLOCK_NON_CONTINUOUS; ret = devm_mipi_dsi_attach(dev, dsi); if (ret < 0) { dev_err(dev, "failed to attach dsi to host: %d\n", ret); return ret; } return 0; } static int tc_probe_dpi_bridge_endpoint(struct tc_data *tc) { struct device *dev = tc->dev; struct drm_bridge *bridge; struct drm_panel *panel; int ret; /* port@1 is the DPI input/output port */ ret = drm_of_find_panel_or_bridge(dev->of_node, 1, 0, &panel, &bridge); if (ret && ret != -ENODEV) return ret; if (panel) { bridge = devm_drm_panel_bridge_add(dev, panel); if (IS_ERR(bridge)) return PTR_ERR(bridge); } if (bridge) { tc->panel_bridge = bridge; tc->bridge.type = DRM_MODE_CONNECTOR_DPI; tc->bridge.funcs = &tc_dpi_bridge_funcs; return 0; } return ret; } static int tc_probe_edp_bridge_endpoint(struct tc_data *tc) { struct device *dev = tc->dev; struct drm_panel *panel; int ret; /* port@2 is the output port */ ret = drm_of_find_panel_or_bridge(dev->of_node, 2, 0, &panel, NULL); if (ret && ret != -ENODEV) return ret; if (panel) { struct drm_bridge *panel_bridge; panel_bridge = devm_drm_panel_bridge_add(dev, panel); if (IS_ERR(panel_bridge)) return PTR_ERR(panel_bridge); tc->panel_bridge = panel_bridge; tc->bridge.type = DRM_MODE_CONNECTOR_eDP; } else { tc->bridge.type = DRM_MODE_CONNECTOR_DisplayPort; } tc->bridge.funcs = &tc_edp_bridge_funcs; if (tc->hpd_pin >= 0) tc->bridge.ops |= DRM_BRIDGE_OP_DETECT; tc->bridge.ops |= DRM_BRIDGE_OP_EDID; return 0; } static int tc_probe_bridge_endpoint(struct tc_data *tc) { struct device *dev = tc->dev; struct of_endpoint endpoint; struct device_node *node = NULL; const u8 mode_dpi_to_edp = BIT(1) | BIT(2); const u8 mode_dpi_to_dp = BIT(1); const u8 mode_dsi_to_edp = BIT(0) | BIT(2); const u8 mode_dsi_to_dp = BIT(0); const u8 mode_dsi_to_dpi = BIT(0) | BIT(1); u8 mode = 0; /* * Determine bridge configuration. * * Port allocation: * port@0 - DSI input * port@1 - DPI input/output * port@2 - eDP output * * Possible connections: * DPI -> port@1 -> port@2 -> eDP :: [port@0 is not connected] * DSI -> port@0 -> port@2 -> eDP :: [port@1 is not connected] * DSI -> port@0 -> port@1 -> DPI :: [port@2 is not connected] */ for_each_endpoint_of_node(dev->of_node, node) { of_graph_parse_endpoint(node, &endpoint); if (endpoint.port > 2) { of_node_put(node); return -EINVAL; } mode |= BIT(endpoint.port); } if (mode == mode_dpi_to_edp || mode == mode_dpi_to_dp) { tc->input_connector_dsi = false; return tc_probe_edp_bridge_endpoint(tc); } else if (mode == mode_dsi_to_dpi) { tc->input_connector_dsi = true; return tc_probe_dpi_bridge_endpoint(tc); } else if (mode == mode_dsi_to_edp || mode == mode_dsi_to_dp) { tc->input_connector_dsi = true; return tc_probe_edp_bridge_endpoint(tc); } dev_warn(dev, "Invalid mode (0x%x) is not supported!\n", mode); return -EINVAL; } static int tc_probe(struct i2c_client *client) { struct device *dev = &client->dev; struct tc_data *tc; int ret; tc = devm_kzalloc(dev, sizeof(*tc), GFP_KERNEL); if (!tc) return -ENOMEM; tc->dev = dev; ret = tc_probe_bridge_endpoint(tc); if (ret) return ret; tc->refclk = devm_clk_get_enabled(dev, "ref"); if (IS_ERR(tc->refclk)) return dev_err_probe(dev, PTR_ERR(tc->refclk), "Failed to get and enable the ref clk\n"); /* tRSTW = 100 cycles , at 13 MHz that is ~7.69 us */ usleep_range(10, 15); /* Shut down GPIO is optional */ tc->sd_gpio = devm_gpiod_get_optional(dev, "shutdown", GPIOD_OUT_HIGH); if (IS_ERR(tc->sd_gpio)) return PTR_ERR(tc->sd_gpio); if (tc->sd_gpio) { gpiod_set_value_cansleep(tc->sd_gpio, 0); usleep_range(5000, 10000); } /* Reset GPIO is optional */ tc->reset_gpio = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_LOW); if (IS_ERR(tc->reset_gpio)) return PTR_ERR(tc->reset_gpio); if (tc->reset_gpio) { gpiod_set_value_cansleep(tc->reset_gpio, 1); usleep_range(5000, 10000); } tc->regmap = devm_regmap_init_i2c(client, &tc_regmap_config); if (IS_ERR(tc->regmap)) { ret = PTR_ERR(tc->regmap); dev_err(dev, "Failed to initialize regmap: %d\n", ret); return ret; } ret = of_property_read_u32(dev->of_node, "toshiba,hpd-pin", &tc->hpd_pin); if (ret) { tc->hpd_pin = -ENODEV; } else { if (tc->hpd_pin < 0 || tc->hpd_pin > 1) { dev_err(dev, "failed to parse HPD number\n"); return ret; } } if (client->irq > 0) { /* enable SysErr */ regmap_write(tc->regmap, INTCTL_G, INT_SYSERR); ret = devm_request_threaded_irq(dev, client->irq, NULL, tc_irq_handler, IRQF_ONESHOT, "tc358767-irq", tc); if (ret) { dev_err(dev, "failed to register dp interrupt\n"); return ret; } tc->have_irq = true; } ret = regmap_read(tc->regmap, TC_IDREG, &tc->rev); if (ret) { dev_err(tc->dev, "can not read device ID: %d\n", ret); return ret; } if ((tc->rev != 0x6601) && (tc->rev != 0x6603)) { dev_err(tc->dev, "invalid device ID: 0x%08x\n", tc->rev); return -EINVAL; } tc->assr = (tc->rev == 0x6601); /* Enable ASSR for eDP panels */ if (!tc->reset_gpio) { /* * If the reset pin isn't present, do a software reset. It isn't * as thorough as the hardware reset, as we can't reset the I2C * communication block for obvious reasons, but it's getting the * chip into a defined state. */ regmap_update_bits(tc->regmap, SYSRSTENB, ENBLCD0 | ENBBM | ENBDSIRX | ENBREG | ENBHDCP, 0); regmap_update_bits(tc->regmap, SYSRSTENB, ENBLCD0 | ENBBM | ENBDSIRX | ENBREG | ENBHDCP, ENBLCD0 | ENBBM | ENBDSIRX | ENBREG | ENBHDCP); usleep_range(5000, 10000); } if (tc->hpd_pin >= 0) { u32 lcnt_reg = tc->hpd_pin == 0 ? INT_GP0_LCNT : INT_GP1_LCNT; u32 h_lc = INT_GPIO_H(tc->hpd_pin) | INT_GPIO_LC(tc->hpd_pin); /* Set LCNT to 2ms */ regmap_write(tc->regmap, lcnt_reg, clk_get_rate(tc->refclk) * 2 / 1000); /* We need the "alternate" mode for HPD */ regmap_write(tc->regmap, GPIOM, BIT(tc->hpd_pin)); if (tc->have_irq) { /* enable H & LC */ regmap_update_bits(tc->regmap, INTCTL_G, h_lc, h_lc); } } if (tc->bridge.type != DRM_MODE_CONNECTOR_DPI) { /* (e)DP output */ ret = tc_aux_link_setup(tc); if (ret) return ret; } tc->bridge.of_node = dev->of_node; drm_bridge_add(&tc->bridge); i2c_set_clientdata(client, tc); if (tc->input_connector_dsi) { /* DSI input */ ret = tc_mipi_dsi_host_attach(tc); if (ret) { drm_bridge_remove(&tc->bridge); return ret; } } return 0; } static void tc_remove(struct i2c_client *client) { struct tc_data *tc = i2c_get_clientdata(client); drm_bridge_remove(&tc->bridge); } static const struct i2c_device_id tc358767_i2c_ids[] = { { "tc358767", 0 }, { } }; MODULE_DEVICE_TABLE(i2c, tc358767_i2c_ids); static const struct of_device_id tc358767_of_ids[] = { { .compatible = "toshiba,tc358767", }, { } }; MODULE_DEVICE_TABLE(of, tc358767_of_ids); static struct i2c_driver tc358767_driver = { .driver = { .name = "tc358767", .of_match_table = tc358767_of_ids, }, .id_table = tc358767_i2c_ids, .probe = tc_probe, .remove = tc_remove, }; module_i2c_driver(tc358767_driver); MODULE_AUTHOR("Andrey Gusakov <[email protected]>"); MODULE_DESCRIPTION("tc358767 eDP encoder driver"); MODULE_LICENSE("GPL");
linux-master
drivers/gpu/drm/bridge/tc358767.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * Copyright (C) 2017 Samsung Electronics * * Authors: * Tomasz Stanislawski <[email protected]> * Maciej Purski <[email protected]> * * Based on sii9234 driver created by: * Adam Hampson <[email protected]> * Erik Gilling <[email protected]> * Shankar Bandal <[email protected]> * Dharam Kumar <[email protected]> */ #include <drm/bridge/mhl.h> #include <drm/drm_bridge.h> #include <drm/drm_crtc.h> #include <drm/drm_edid.h> #include <linux/delay.h> #include <linux/err.h> #include <linux/gpio/consumer.h> #include <linux/i2c.h> #include <linux/interrupt.h> #include <linux/irq.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/mutex.h> #include <linux/regulator/consumer.h> #include <linux/slab.h> #define CBUS_DEVCAP_OFFSET 0x80 #define SII9234_MHL_VERSION 0x11 #define SII9234_SCRATCHPAD_SIZE 0x10 #define SII9234_INT_STAT_SIZE 0x33 #define BIT_TMDS_CCTRL_TMDS_OE BIT(4) #define MHL_HPD_OUT_OVR_EN BIT(4) #define MHL_HPD_OUT_OVR_VAL BIT(5) #define MHL_INIT_TIMEOUT 0x0C /* MHL Tx registers and bits */ #define MHL_TX_SRST 0x05 #define MHL_TX_SYSSTAT_REG 0x09 #define MHL_TX_INTR1_REG 0x71 #define MHL_TX_INTR4_REG 0x74 #define MHL_TX_INTR1_ENABLE_REG 0x75 #define MHL_TX_INTR4_ENABLE_REG 0x78 #define MHL_TX_INT_CTRL_REG 0x79 #define MHL_TX_TMDS_CCTRL 0x80 #define MHL_TX_DISC_CTRL1_REG 0x90 #define MHL_TX_DISC_CTRL2_REG 0x91 #define MHL_TX_DISC_CTRL3_REG 0x92 #define MHL_TX_DISC_CTRL4_REG 0x93 #define MHL_TX_DISC_CTRL5_REG 0x94 #define MHL_TX_DISC_CTRL6_REG 0x95 #define MHL_TX_DISC_CTRL7_REG 0x96 #define MHL_TX_DISC_CTRL8_REG 0x97 #define MHL_TX_STAT2_REG 0x99 #define MHL_TX_MHLTX_CTL1_REG 0xA0 #define MHL_TX_MHLTX_CTL2_REG 0xA1 #define MHL_TX_MHLTX_CTL4_REG 0xA3 #define MHL_TX_MHLTX_CTL6_REG 0xA5 #define MHL_TX_MHLTX_CTL7_REG 0xA6 #define RSEN_STATUS BIT(2) #define HPD_CHANGE_INT BIT(6) #define RSEN_CHANGE_INT BIT(5) #define RGND_READY_INT BIT(6) #define VBUS_LOW_INT BIT(5) #define CBUS_LKOUT_INT BIT(4) #define MHL_DISC_FAIL_INT BIT(3) #define MHL_EST_INT BIT(2) #define HPD_CHANGE_INT_MASK BIT(6) #define RSEN_CHANGE_INT_MASK BIT(5) #define RGND_READY_MASK BIT(6) #define CBUS_LKOUT_MASK BIT(4) #define MHL_DISC_FAIL_MASK BIT(3) #define MHL_EST_MASK BIT(2) #define SKIP_GND BIT(6) #define ATT_THRESH_SHIFT 0x04 #define ATT_THRESH_MASK (0x03 << ATT_THRESH_SHIFT) #define USB_D_OEN BIT(3) #define DEGLITCH_TIME_MASK 0x07 #define DEGLITCH_TIME_2MS 0 #define DEGLITCH_TIME_4MS 1 #define DEGLITCH_TIME_8MS 2 #define DEGLITCH_TIME_16MS 3 #define DEGLITCH_TIME_40MS 4 #define DEGLITCH_TIME_50MS 5 #define DEGLITCH_TIME_60MS 6 #define DEGLITCH_TIME_128MS 7 #define USB_D_OVR BIT(7) #define USB_ID_OVR BIT(6) #define DVRFLT_SEL BIT(5) #define BLOCK_RGND_INT BIT(4) #define SKIP_DEG BIT(3) #define CI2CA_POL BIT(2) #define CI2CA_WKUP BIT(1) #define SINGLE_ATT BIT(0) #define USB_D_ODN BIT(5) #define VBUS_CHECK BIT(2) #define RGND_INTP_MASK 0x03 #define RGND_INTP_OPEN 0 #define RGND_INTP_2K 1 #define RGND_INTP_1K 2 #define RGND_INTP_SHORT 3 /* HDMI registers */ #define HDMI_RX_TMDS0_CCTRL1_REG 0x10 #define HDMI_RX_TMDS_CLK_EN_REG 0x11 #define HDMI_RX_TMDS_CH_EN_REG 0x12 #define HDMI_RX_PLL_CALREFSEL_REG 0x17 #define HDMI_RX_PLL_VCOCAL_REG 0x1A #define HDMI_RX_EQ_DATA0_REG 0x22 #define HDMI_RX_EQ_DATA1_REG 0x23 #define HDMI_RX_EQ_DATA2_REG 0x24 #define HDMI_RX_EQ_DATA3_REG 0x25 #define HDMI_RX_EQ_DATA4_REG 0x26 #define HDMI_RX_TMDS_ZONE_CTRL_REG 0x4C #define HDMI_RX_TMDS_MODE_CTRL_REG 0x4D /* CBUS registers */ #define CBUS_INT_STATUS_1_REG 0x08 #define CBUS_INTR1_ENABLE_REG 0x09 #define CBUS_MSC_REQ_ABORT_REASON_REG 0x0D #define CBUS_INT_STATUS_2_REG 0x1E #define CBUS_INTR2_ENABLE_REG 0x1F #define CBUS_LINK_CONTROL_2_REG 0x31 #define CBUS_MHL_STATUS_REG_0 0xB0 #define CBUS_MHL_STATUS_REG_1 0xB1 #define BIT_CBUS_RESET BIT(3) #define SET_HPD_DOWNSTREAM BIT(6) /* TPI registers */ #define TPI_DPD_REG 0x3D /* Timeouts in msec */ #define T_SRC_VBUS_CBUS_TO_STABLE 200 #define T_SRC_CBUS_FLOAT 100 #define T_SRC_CBUS_DEGLITCH 2 #define T_SRC_RXSENSE_DEGLITCH 110 #define MHL1_MAX_CLK 75000 /* in kHz */ #define I2C_TPI_ADDR 0x3D #define I2C_HDMI_ADDR 0x49 #define I2C_CBUS_ADDR 0x64 enum sii9234_state { ST_OFF, ST_D3, ST_RGND_INIT, ST_RGND_1K, ST_RSEN_HIGH, ST_MHL_ESTABLISHED, ST_FAILURE_DISCOVERY, ST_FAILURE, }; struct sii9234 { struct i2c_client *client[4]; struct drm_bridge bridge; struct device *dev; struct gpio_desc *gpio_reset; int i2c_error; struct regulator_bulk_data supplies[4]; struct mutex lock; /* Protects fields below and device registers */ enum sii9234_state state; }; enum sii9234_client_id { I2C_MHL, I2C_TPI, I2C_HDMI, I2C_CBUS, }; static const char * const sii9234_client_name[] = { [I2C_MHL] = "MHL", [I2C_TPI] = "TPI", [I2C_HDMI] = "HDMI", [I2C_CBUS] = "CBUS", }; static int sii9234_writeb(struct sii9234 *ctx, int id, int offset, int value) { int ret; struct i2c_client *client = ctx->client[id]; if (ctx->i2c_error) return ctx->i2c_error; ret = i2c_smbus_write_byte_data(client, offset, value); if (ret < 0) dev_err(ctx->dev, "writeb: %4s[0x%02x] <- 0x%02x\n", sii9234_client_name[id], offset, value); ctx->i2c_error = ret; return ret; } static int sii9234_writebm(struct sii9234 *ctx, int id, int offset, int value, int mask) { int ret; struct i2c_client *client = ctx->client[id]; if (ctx->i2c_error) return ctx->i2c_error; ret = i2c_smbus_write_byte(client, offset); if (ret < 0) { dev_err(ctx->dev, "writebm: %4s[0x%02x] <- 0x%02x\n", sii9234_client_name[id], offset, value); ctx->i2c_error = ret; return ret; } ret = i2c_smbus_read_byte(client); if (ret < 0) { dev_err(ctx->dev, "writebm: %4s[0x%02x] <- 0x%02x\n", sii9234_client_name[id], offset, value); ctx->i2c_error = ret; return ret; } value = (value & mask) | (ret & ~mask); ret = i2c_smbus_write_byte_data(client, offset, value); if (ret < 0) { dev_err(ctx->dev, "writebm: %4s[0x%02x] <- 0x%02x\n", sii9234_client_name[id], offset, value); ctx->i2c_error = ret; } return ret; } static int sii9234_readb(struct sii9234 *ctx, int id, int offset) { int ret; struct i2c_client *client = ctx->client[id]; if (ctx->i2c_error) return ctx->i2c_error; ret = i2c_smbus_write_byte(client, offset); if (ret < 0) { dev_err(ctx->dev, "readb: %4s[0x%02x]\n", sii9234_client_name[id], offset); ctx->i2c_error = ret; return ret; } ret = i2c_smbus_read_byte(client); if (ret < 0) { dev_err(ctx->dev, "readb: %4s[0x%02x]\n", sii9234_client_name[id], offset); ctx->i2c_error = ret; } return ret; } static int sii9234_clear_error(struct sii9234 *ctx) { int ret = ctx->i2c_error; ctx->i2c_error = 0; return ret; } #define mhl_tx_writeb(sii9234, offset, value) \ sii9234_writeb(sii9234, I2C_MHL, offset, value) #define mhl_tx_writebm(sii9234, offset, value, mask) \ sii9234_writebm(sii9234, I2C_MHL, offset, value, mask) #define mhl_tx_readb(sii9234, offset) \ sii9234_readb(sii9234, I2C_MHL, offset) #define cbus_writeb(sii9234, offset, value) \ sii9234_writeb(sii9234, I2C_CBUS, offset, value) #define cbus_writebm(sii9234, offset, value, mask) \ sii9234_writebm(sii9234, I2C_CBUS, offset, value, mask) #define cbus_readb(sii9234, offset) \ sii9234_readb(sii9234, I2C_CBUS, offset) #define hdmi_writeb(sii9234, offset, value) \ sii9234_writeb(sii9234, I2C_HDMI, offset, value) #define hdmi_writebm(sii9234, offset, value, mask) \ sii9234_writebm(sii9234, I2C_HDMI, offset, value, mask) #define hdmi_readb(sii9234, offset) \ sii9234_readb(sii9234, I2C_HDMI, offset) #define tpi_writeb(sii9234, offset, value) \ sii9234_writeb(sii9234, I2C_TPI, offset, value) #define tpi_writebm(sii9234, offset, value, mask) \ sii9234_writebm(sii9234, I2C_TPI, offset, value, mask) #define tpi_readb(sii9234, offset) \ sii9234_readb(sii9234, I2C_TPI, offset) static u8 sii9234_tmds_control(struct sii9234 *ctx, bool enable) { mhl_tx_writebm(ctx, MHL_TX_TMDS_CCTRL, enable ? ~0 : 0, BIT_TMDS_CCTRL_TMDS_OE); mhl_tx_writebm(ctx, MHL_TX_INT_CTRL_REG, enable ? ~0 : 0, MHL_HPD_OUT_OVR_EN | MHL_HPD_OUT_OVR_VAL); return sii9234_clear_error(ctx); } static int sii9234_cbus_reset(struct sii9234 *ctx) { int i; mhl_tx_writebm(ctx, MHL_TX_SRST, ~0, BIT_CBUS_RESET); msleep(T_SRC_CBUS_DEGLITCH); mhl_tx_writebm(ctx, MHL_TX_SRST, 0, BIT_CBUS_RESET); for (i = 0; i < 4; i++) { /* * Enable WRITE_STAT interrupt for writes to all * 4 MSC Status registers. */ cbus_writeb(ctx, 0xE0 + i, 0xF2); /* * Enable SET_INT interrupt for writes to all * 4 MSC Interrupt registers. */ cbus_writeb(ctx, 0xF0 + i, 0xF2); } return sii9234_clear_error(ctx); } /* Require to chek mhl imformation of samsung in cbus_init_register */ static int sii9234_cbus_init(struct sii9234 *ctx) { cbus_writeb(ctx, 0x07, 0xF2); cbus_writeb(ctx, 0x40, 0x03); cbus_writeb(ctx, 0x42, 0x06); cbus_writeb(ctx, 0x36, 0x0C); cbus_writeb(ctx, 0x3D, 0xFD); cbus_writeb(ctx, 0x1C, 0x01); cbus_writeb(ctx, 0x1D, 0x0F); cbus_writeb(ctx, 0x44, 0x02); /* Setup our devcap */ cbus_writeb(ctx, CBUS_DEVCAP_OFFSET + MHL_DCAP_DEV_STATE, 0x00); cbus_writeb(ctx, CBUS_DEVCAP_OFFSET + MHL_DCAP_MHL_VERSION, SII9234_MHL_VERSION); cbus_writeb(ctx, CBUS_DEVCAP_OFFSET + MHL_DCAP_CAT, MHL_DCAP_CAT_SOURCE); cbus_writeb(ctx, CBUS_DEVCAP_OFFSET + MHL_DCAP_ADOPTER_ID_H, 0x01); cbus_writeb(ctx, CBUS_DEVCAP_OFFSET + MHL_DCAP_ADOPTER_ID_L, 0x41); cbus_writeb(ctx, CBUS_DEVCAP_OFFSET + MHL_DCAP_VID_LINK_MODE, MHL_DCAP_VID_LINK_RGB444 | MHL_DCAP_VID_LINK_YCBCR444); cbus_writeb(ctx, CBUS_DEVCAP_OFFSET + MHL_DCAP_VIDEO_TYPE, MHL_DCAP_VT_GRAPHICS); cbus_writeb(ctx, CBUS_DEVCAP_OFFSET + MHL_DCAP_LOG_DEV_MAP, MHL_DCAP_LD_GUI); cbus_writeb(ctx, CBUS_DEVCAP_OFFSET + MHL_DCAP_BANDWIDTH, 0x0F); cbus_writeb(ctx, CBUS_DEVCAP_OFFSET + MHL_DCAP_FEATURE_FLAG, MHL_DCAP_FEATURE_RCP_SUPPORT | MHL_DCAP_FEATURE_RAP_SUPPORT | MHL_DCAP_FEATURE_SP_SUPPORT); cbus_writeb(ctx, CBUS_DEVCAP_OFFSET + MHL_DCAP_DEVICE_ID_H, 0x0); cbus_writeb(ctx, CBUS_DEVCAP_OFFSET + MHL_DCAP_DEVICE_ID_L, 0x0); cbus_writeb(ctx, CBUS_DEVCAP_OFFSET + MHL_DCAP_SCRATCHPAD_SIZE, SII9234_SCRATCHPAD_SIZE); cbus_writeb(ctx, CBUS_DEVCAP_OFFSET + MHL_DCAP_INT_STAT_SIZE, SII9234_INT_STAT_SIZE); cbus_writeb(ctx, CBUS_DEVCAP_OFFSET + MHL_DCAP_RESERVED, 0); cbus_writebm(ctx, 0x31, 0x0C, 0x0C); cbus_writeb(ctx, 0x30, 0x01); cbus_writebm(ctx, 0x3C, 0x30, 0x38); cbus_writebm(ctx, 0x22, 0x0D, 0x0F); cbus_writebm(ctx, 0x2E, 0x15, 0x15); cbus_writeb(ctx, CBUS_INTR1_ENABLE_REG, 0); cbus_writeb(ctx, CBUS_INTR2_ENABLE_REG, 0); return sii9234_clear_error(ctx); } static void force_usb_id_switch_open(struct sii9234 *ctx) { /* Disable CBUS discovery */ mhl_tx_writebm(ctx, MHL_TX_DISC_CTRL1_REG, 0, 0x01); /* Force USB ID switch to open */ mhl_tx_writebm(ctx, MHL_TX_DISC_CTRL6_REG, ~0, USB_ID_OVR); mhl_tx_writebm(ctx, MHL_TX_DISC_CTRL3_REG, ~0, 0x86); /* Force upstream HPD to 0 when not in MHL mode. */ mhl_tx_writebm(ctx, MHL_TX_INT_CTRL_REG, 0, 0x30); } static void release_usb_id_switch_open(struct sii9234 *ctx) { msleep(T_SRC_CBUS_FLOAT); /* Clear USB ID switch to open */ mhl_tx_writebm(ctx, MHL_TX_DISC_CTRL6_REG, 0, USB_ID_OVR); /* Enable CBUS discovery */ mhl_tx_writebm(ctx, MHL_TX_DISC_CTRL1_REG, ~0, 0x01); } static int sii9234_power_init(struct sii9234 *ctx) { /* Force the SiI9234 into the D0 state. */ tpi_writeb(ctx, TPI_DPD_REG, 0x3F); /* Enable TxPLL Clock */ hdmi_writeb(ctx, HDMI_RX_TMDS_CLK_EN_REG, 0x01); /* Enable Tx Clock Path & Equalizer */ hdmi_writeb(ctx, HDMI_RX_TMDS_CH_EN_REG, 0x15); /* Power Up TMDS */ mhl_tx_writeb(ctx, 0x08, 0x35); return sii9234_clear_error(ctx); } static int sii9234_hdmi_init(struct sii9234 *ctx) { hdmi_writeb(ctx, HDMI_RX_TMDS0_CCTRL1_REG, 0xC1); hdmi_writeb(ctx, HDMI_RX_PLL_CALREFSEL_REG, 0x03); hdmi_writeb(ctx, HDMI_RX_PLL_VCOCAL_REG, 0x20); hdmi_writeb(ctx, HDMI_RX_EQ_DATA0_REG, 0x8A); hdmi_writeb(ctx, HDMI_RX_EQ_DATA1_REG, 0x6A); hdmi_writeb(ctx, HDMI_RX_EQ_DATA2_REG, 0xAA); hdmi_writeb(ctx, HDMI_RX_EQ_DATA3_REG, 0xCA); hdmi_writeb(ctx, HDMI_RX_EQ_DATA4_REG, 0xEA); hdmi_writeb(ctx, HDMI_RX_TMDS_ZONE_CTRL_REG, 0xA0); hdmi_writeb(ctx, HDMI_RX_TMDS_MODE_CTRL_REG, 0x00); mhl_tx_writeb(ctx, MHL_TX_TMDS_CCTRL, 0x34); hdmi_writeb(ctx, 0x45, 0x44); hdmi_writeb(ctx, 0x31, 0x0A); hdmi_writeb(ctx, HDMI_RX_TMDS0_CCTRL1_REG, 0xC1); return sii9234_clear_error(ctx); } static int sii9234_mhl_tx_ctl_int(struct sii9234 *ctx) { mhl_tx_writeb(ctx, MHL_TX_MHLTX_CTL1_REG, 0xD0); mhl_tx_writeb(ctx, MHL_TX_MHLTX_CTL2_REG, 0xFC); mhl_tx_writeb(ctx, MHL_TX_MHLTX_CTL4_REG, 0xEB); mhl_tx_writeb(ctx, MHL_TX_MHLTX_CTL7_REG, 0x0C); return sii9234_clear_error(ctx); } static int sii9234_reset(struct sii9234 *ctx) { int ret; sii9234_clear_error(ctx); ret = sii9234_power_init(ctx); if (ret < 0) return ret; ret = sii9234_cbus_reset(ctx); if (ret < 0) return ret; ret = sii9234_hdmi_init(ctx); if (ret < 0) return ret; ret = sii9234_mhl_tx_ctl_int(ctx); if (ret < 0) return ret; /* Enable HDCP Compliance safety */ mhl_tx_writeb(ctx, 0x2B, 0x01); /* CBUS discovery cycle time for each drive and float = 150us */ mhl_tx_writebm(ctx, MHL_TX_DISC_CTRL1_REG, 0x04, 0x06); /* Clear bit 6 (reg_skip_rgnd) */ mhl_tx_writeb(ctx, MHL_TX_DISC_CTRL2_REG, (1 << 7) /* Reserved */ | 2 << ATT_THRESH_SHIFT | DEGLITCH_TIME_50MS); /* * Changed from 66 to 65 for 94[1:0] = 01 = 5k reg_cbusmhl_pup_sel * 1.8V CBUS VTH & GND threshold * to meet CTS 3.3.7.2 spec */ mhl_tx_writeb(ctx, MHL_TX_DISC_CTRL5_REG, 0x77); cbus_writebm(ctx, CBUS_LINK_CONTROL_2_REG, ~0, MHL_INIT_TIMEOUT); mhl_tx_writeb(ctx, MHL_TX_MHLTX_CTL6_REG, 0xA0); /* RGND & single discovery attempt (RGND blocking) */ mhl_tx_writeb(ctx, MHL_TX_DISC_CTRL6_REG, BLOCK_RGND_INT | DVRFLT_SEL | SINGLE_ATT); /* Use VBUS path of discovery state machine */ mhl_tx_writeb(ctx, MHL_TX_DISC_CTRL8_REG, 0); /* 0x92[3] sets the CBUS / ID switch */ mhl_tx_writebm(ctx, MHL_TX_DISC_CTRL6_REG, ~0, USB_ID_OVR); /* * To allow RGND engine to operate correctly. * When moving the chip from D2 to D0 (power up, init regs) * the values should be * 94[1:0] = 01 reg_cbusmhl_pup_sel[1:0] should be set for 5k * 93[7:6] = 10 reg_cbusdisc_pup_sel[1:0] should be * set for 10k (default) * 93[5:4] = 00 reg_cbusidle_pup_sel[1:0] = open (default) */ mhl_tx_writebm(ctx, MHL_TX_DISC_CTRL3_REG, ~0, 0x86); /* * Change from CC to 8C to match 5K * to meet CTS 3.3.72 spec */ mhl_tx_writebm(ctx, MHL_TX_DISC_CTRL4_REG, ~0, 0x8C); /* Configure the interrupt as active high */ mhl_tx_writebm(ctx, MHL_TX_INT_CTRL_REG, 0, 0x06); msleep(25); /* Release usb_id switch */ mhl_tx_writebm(ctx, MHL_TX_DISC_CTRL6_REG, 0, USB_ID_OVR); mhl_tx_writeb(ctx, MHL_TX_DISC_CTRL1_REG, 0x27); ret = sii9234_clear_error(ctx); if (ret < 0) return ret; ret = sii9234_cbus_init(ctx); if (ret < 0) return ret; /* Enable Auto soft reset on SCDT = 0 */ mhl_tx_writeb(ctx, 0x05, 0x04); /* HDMI Transcode mode enable */ mhl_tx_writeb(ctx, 0x0D, 0x1C); mhl_tx_writeb(ctx, MHL_TX_INTR4_ENABLE_REG, RGND_READY_MASK | CBUS_LKOUT_MASK | MHL_DISC_FAIL_MASK | MHL_EST_MASK); mhl_tx_writeb(ctx, MHL_TX_INTR1_ENABLE_REG, 0x60); /* This point is very important before measure RGND impedance */ force_usb_id_switch_open(ctx); mhl_tx_writebm(ctx, MHL_TX_DISC_CTRL4_REG, 0, 0xF0); mhl_tx_writebm(ctx, MHL_TX_DISC_CTRL5_REG, 0, 0x03); release_usb_id_switch_open(ctx); /* Force upstream HPD to 0 when not in MHL mode */ mhl_tx_writebm(ctx, MHL_TX_INT_CTRL_REG, 0, 1 << 5); mhl_tx_writebm(ctx, MHL_TX_INT_CTRL_REG, ~0, 1 << 4); return sii9234_clear_error(ctx); } static int sii9234_goto_d3(struct sii9234 *ctx) { int ret; dev_dbg(ctx->dev, "sii9234: detection started d3\n"); ret = sii9234_reset(ctx); if (ret < 0) goto exit; hdmi_writeb(ctx, 0x01, 0x03); tpi_writebm(ctx, TPI_DPD_REG, 0, 1); /* I2C above is expected to fail because power goes down */ sii9234_clear_error(ctx); ctx->state = ST_D3; return 0; exit: dev_err(ctx->dev, "%s failed\n", __func__); return -1; } static int sii9234_hw_on(struct sii9234 *ctx) { return regulator_bulk_enable(ARRAY_SIZE(ctx->supplies), ctx->supplies); } static void sii9234_hw_off(struct sii9234 *ctx) { gpiod_set_value(ctx->gpio_reset, 1); msleep(20); regulator_bulk_disable(ARRAY_SIZE(ctx->supplies), ctx->supplies); } static void sii9234_hw_reset(struct sii9234 *ctx) { gpiod_set_value(ctx->gpio_reset, 1); msleep(20); gpiod_set_value(ctx->gpio_reset, 0); } static void sii9234_cable_in(struct sii9234 *ctx) { int ret; mutex_lock(&ctx->lock); if (ctx->state != ST_OFF) goto unlock; ret = sii9234_hw_on(ctx); if (ret < 0) goto unlock; sii9234_hw_reset(ctx); sii9234_goto_d3(ctx); /* To avoid irq storm, when hw is in meta state */ enable_irq(to_i2c_client(ctx->dev)->irq); unlock: mutex_unlock(&ctx->lock); } static void sii9234_cable_out(struct sii9234 *ctx) { mutex_lock(&ctx->lock); if (ctx->state == ST_OFF) goto unlock; disable_irq(to_i2c_client(ctx->dev)->irq); tpi_writeb(ctx, TPI_DPD_REG, 0); /* Turn on&off hpd festure for only QCT HDMI */ sii9234_hw_off(ctx); ctx->state = ST_OFF; unlock: mutex_unlock(&ctx->lock); } static enum sii9234_state sii9234_rgnd_ready_irq(struct sii9234 *ctx) { int value; if (ctx->state == ST_D3) { int ret; dev_dbg(ctx->dev, "RGND_READY_INT\n"); sii9234_hw_reset(ctx); ret = sii9234_reset(ctx); if (ret < 0) { dev_err(ctx->dev, "sii9234_reset() failed\n"); return ST_FAILURE; } return ST_RGND_INIT; } /* Got interrupt in inappropriate state */ if (ctx->state != ST_RGND_INIT) return ST_FAILURE; value = mhl_tx_readb(ctx, MHL_TX_STAT2_REG); if (sii9234_clear_error(ctx)) return ST_FAILURE; if ((value & RGND_INTP_MASK) != RGND_INTP_1K) { dev_warn(ctx->dev, "RGND is not 1k\n"); return ST_RGND_INIT; } dev_dbg(ctx->dev, "RGND 1K!!\n"); mhl_tx_writebm(ctx, MHL_TX_DISC_CTRL4_REG, ~0, 0x8C); mhl_tx_writeb(ctx, MHL_TX_DISC_CTRL5_REG, 0x77); mhl_tx_writebm(ctx, MHL_TX_DISC_CTRL6_REG, ~0, 0x05); if (sii9234_clear_error(ctx)) return ST_FAILURE; msleep(T_SRC_VBUS_CBUS_TO_STABLE); return ST_RGND_1K; } static enum sii9234_state sii9234_mhl_established(struct sii9234 *ctx) { dev_dbg(ctx->dev, "mhl est interrupt\n"); /* Discovery override */ mhl_tx_writeb(ctx, MHL_TX_MHLTX_CTL1_REG, 0x10); /* Increase DDC translation layer timer (byte mode) */ cbus_writeb(ctx, 0x07, 0x32); cbus_writebm(ctx, 0x44, ~0, 1 << 1); /* Keep the discovery enabled. Need RGND interrupt */ mhl_tx_writebm(ctx, MHL_TX_DISC_CTRL1_REG, ~0, 1); mhl_tx_writeb(ctx, MHL_TX_INTR1_ENABLE_REG, RSEN_CHANGE_INT_MASK | HPD_CHANGE_INT_MASK); if (sii9234_clear_error(ctx)) return ST_FAILURE; return ST_MHL_ESTABLISHED; } static enum sii9234_state sii9234_hpd_change(struct sii9234 *ctx) { int value; value = cbus_readb(ctx, CBUS_MSC_REQ_ABORT_REASON_REG); if (sii9234_clear_error(ctx)) return ST_FAILURE; if (value & SET_HPD_DOWNSTREAM) { /* Downstream HPD High, Enable TMDS */ sii9234_tmds_control(ctx, true); } else { /* Downstream HPD Low, Disable TMDS */ sii9234_tmds_control(ctx, false); } return ctx->state; } static enum sii9234_state sii9234_rsen_change(struct sii9234 *ctx) { int value; /* Work_around code to handle wrong interrupt */ if (ctx->state != ST_RGND_1K) { dev_err(ctx->dev, "RSEN_HIGH without RGND_1K\n"); return ST_FAILURE; } value = mhl_tx_readb(ctx, MHL_TX_SYSSTAT_REG); if (value < 0) return ST_FAILURE; if (value & RSEN_STATUS) { dev_dbg(ctx->dev, "MHL cable connected.. RSEN High\n"); return ST_RSEN_HIGH; } dev_dbg(ctx->dev, "RSEN lost\n"); /* * Once RSEN loss is confirmed,we need to check * based on cable status and chip power status,whether * it is SINK Loss(HDMI cable not connected, TV Off) * or MHL cable disconnection * TODO: Define the below mhl_disconnection() */ msleep(T_SRC_RXSENSE_DEGLITCH); value = mhl_tx_readb(ctx, MHL_TX_SYSSTAT_REG); if (value < 0) return ST_FAILURE; dev_dbg(ctx->dev, "sys_stat: %x\n", value); if (value & RSEN_STATUS) { dev_dbg(ctx->dev, "RSEN recovery\n"); return ST_RSEN_HIGH; } dev_dbg(ctx->dev, "RSEN Really LOW\n"); /* To meet CTS 3.3.22.2 spec */ sii9234_tmds_control(ctx, false); force_usb_id_switch_open(ctx); release_usb_id_switch_open(ctx); return ST_FAILURE; } static irqreturn_t sii9234_irq_thread(int irq, void *data) { struct sii9234 *ctx = data; int intr1, intr4; int intr1_en, intr4_en; int cbus_intr1, cbus_intr2; dev_dbg(ctx->dev, "%s\n", __func__); mutex_lock(&ctx->lock); intr1 = mhl_tx_readb(ctx, MHL_TX_INTR1_REG); intr4 = mhl_tx_readb(ctx, MHL_TX_INTR4_REG); intr1_en = mhl_tx_readb(ctx, MHL_TX_INTR1_ENABLE_REG); intr4_en = mhl_tx_readb(ctx, MHL_TX_INTR4_ENABLE_REG); cbus_intr1 = cbus_readb(ctx, CBUS_INT_STATUS_1_REG); cbus_intr2 = cbus_readb(ctx, CBUS_INT_STATUS_2_REG); if (sii9234_clear_error(ctx)) goto done; dev_dbg(ctx->dev, "irq %02x/%02x %02x/%02x %02x/%02x\n", intr1, intr1_en, intr4, intr4_en, cbus_intr1, cbus_intr2); if (intr4 & RGND_READY_INT) ctx->state = sii9234_rgnd_ready_irq(ctx); if (intr1 & RSEN_CHANGE_INT) ctx->state = sii9234_rsen_change(ctx); if (intr4 & MHL_EST_INT) ctx->state = sii9234_mhl_established(ctx); if (intr1 & HPD_CHANGE_INT) ctx->state = sii9234_hpd_change(ctx); if (intr4 & CBUS_LKOUT_INT) ctx->state = ST_FAILURE; if (intr4 & MHL_DISC_FAIL_INT) ctx->state = ST_FAILURE_DISCOVERY; done: /* Clean interrupt status and pending flags */ mhl_tx_writeb(ctx, MHL_TX_INTR1_REG, intr1); mhl_tx_writeb(ctx, MHL_TX_INTR4_REG, intr4); cbus_writeb(ctx, CBUS_MHL_STATUS_REG_0, 0xFF); cbus_writeb(ctx, CBUS_MHL_STATUS_REG_1, 0xFF); cbus_writeb(ctx, CBUS_INT_STATUS_1_REG, cbus_intr1); cbus_writeb(ctx, CBUS_INT_STATUS_2_REG, cbus_intr2); sii9234_clear_error(ctx); if (ctx->state == ST_FAILURE) { dev_dbg(ctx->dev, "try to reset after failure\n"); sii9234_hw_reset(ctx); sii9234_goto_d3(ctx); } if (ctx->state == ST_FAILURE_DISCOVERY) { dev_err(ctx->dev, "discovery failed, no power for MHL?\n"); tpi_writebm(ctx, TPI_DPD_REG, 0, 1); ctx->state = ST_D3; } mutex_unlock(&ctx->lock); return IRQ_HANDLED; } static int sii9234_init_resources(struct sii9234 *ctx, struct i2c_client *client) { struct i2c_adapter *adapter = client->adapter; int ret; if (!ctx->dev->of_node) { dev_err(ctx->dev, "not DT device\n"); return -ENODEV; } ctx->gpio_reset = devm_gpiod_get(ctx->dev, "reset", GPIOD_OUT_LOW); if (IS_ERR(ctx->gpio_reset)) { dev_err(ctx->dev, "failed to get reset gpio from DT\n"); return PTR_ERR(ctx->gpio_reset); } ctx->supplies[0].supply = "avcc12"; ctx->supplies[1].supply = "avcc33"; ctx->supplies[2].supply = "iovcc18"; ctx->supplies[3].supply = "cvcc12"; ret = devm_regulator_bulk_get(ctx->dev, 4, ctx->supplies); if (ret) { if (ret != -EPROBE_DEFER) dev_err(ctx->dev, "regulator_bulk failed\n"); return ret; } ctx->client[I2C_MHL] = client; ctx->client[I2C_TPI] = devm_i2c_new_dummy_device(&client->dev, adapter, I2C_TPI_ADDR); if (IS_ERR(ctx->client[I2C_TPI])) { dev_err(ctx->dev, "failed to create TPI client\n"); return PTR_ERR(ctx->client[I2C_TPI]); } ctx->client[I2C_HDMI] = devm_i2c_new_dummy_device(&client->dev, adapter, I2C_HDMI_ADDR); if (IS_ERR(ctx->client[I2C_HDMI])) { dev_err(ctx->dev, "failed to create HDMI RX client\n"); return PTR_ERR(ctx->client[I2C_HDMI]); } ctx->client[I2C_CBUS] = devm_i2c_new_dummy_device(&client->dev, adapter, I2C_CBUS_ADDR); if (IS_ERR(ctx->client[I2C_CBUS])) { dev_err(ctx->dev, "failed to create CBUS client\n"); return PTR_ERR(ctx->client[I2C_CBUS]); } return 0; } static enum drm_mode_status sii9234_mode_valid(struct drm_bridge *bridge, const struct drm_display_info *info, const struct drm_display_mode *mode) { if (mode->clock > MHL1_MAX_CLK) return MODE_CLOCK_HIGH; return MODE_OK; } static const struct drm_bridge_funcs sii9234_bridge_funcs = { .mode_valid = sii9234_mode_valid, }; static int sii9234_probe(struct i2c_client *client) { struct i2c_adapter *adapter = client->adapter; struct sii9234 *ctx; struct device *dev = &client->dev; int ret; ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL); if (!ctx) return -ENOMEM; ctx->dev = dev; mutex_init(&ctx->lock); if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA)) { dev_err(dev, "I2C adapter lacks SMBUS feature\n"); return -EIO; } if (!client->irq) { dev_err(dev, "no irq provided\n"); return -EINVAL; } irq_set_status_flags(client->irq, IRQ_NOAUTOEN); ret = devm_request_threaded_irq(dev, client->irq, NULL, sii9234_irq_thread, IRQF_TRIGGER_HIGH | IRQF_ONESHOT, "sii9234", ctx); if (ret < 0) { dev_err(dev, "failed to install IRQ handler\n"); return ret; } ret = sii9234_init_resources(ctx, client); if (ret < 0) return ret; i2c_set_clientdata(client, ctx); ctx->bridge.funcs = &sii9234_bridge_funcs; ctx->bridge.of_node = dev->of_node; drm_bridge_add(&ctx->bridge); sii9234_cable_in(ctx); return 0; } static void sii9234_remove(struct i2c_client *client) { struct sii9234 *ctx = i2c_get_clientdata(client); sii9234_cable_out(ctx); drm_bridge_remove(&ctx->bridge); } static const struct of_device_id sii9234_dt_match[] = { { .compatible = "sil,sii9234" }, { }, }; MODULE_DEVICE_TABLE(of, sii9234_dt_match); static const struct i2c_device_id sii9234_id[] = { { "SII9234", 0 }, { }, }; MODULE_DEVICE_TABLE(i2c, sii9234_id); static struct i2c_driver sii9234_driver = { .driver = { .name = "sii9234", .of_match_table = sii9234_dt_match, }, .probe = sii9234_probe, .remove = sii9234_remove, .id_table = sii9234_id, }; module_i2c_driver(sii9234_driver); MODULE_LICENSE("GPL");
linux-master
drivers/gpu/drm/bridge/sii9234.c
// SPDX-License-Identifier: GPL-2.0 /* * Lontium LT9211 bridge driver * * LT9211 is capable of converting: * 2xDSI/2xLVDS/1xDPI -> 2xDSI/2xLVDS/1xDPI * Currently supported is: * 1xDSI -> 1xLVDS * * Copyright (C) 2022 Marek Vasut <[email protected]> */ #include <linux/bits.h> #include <linux/clk.h> #include <linux/gpio/consumer.h> #include <linux/i2c.h> #include <linux/media-bus-format.h> #include <linux/module.h> #include <linux/of_graph.h> #include <linux/regmap.h> #include <linux/regulator/consumer.h> #include <drm/drm_atomic_helper.h> #include <drm/drm_bridge.h> #include <drm/drm_mipi_dsi.h> #include <drm/drm_of.h> #include <drm/drm_panel.h> #include <drm/drm_print.h> #include <drm/drm_probe_helper.h> #define REG_PAGE_CONTROL 0xff #define REG_CHIPID0 0x8100 #define REG_CHIPID0_VALUE 0x18 #define REG_CHIPID1 0x8101 #define REG_CHIPID1_VALUE 0x01 #define REG_CHIPID2 0x8102 #define REG_CHIPID2_VALUE 0xe3 #define REG_DSI_LANE 0xd000 /* DSI lane count - 0 means 4 lanes ; 1, 2, 3 means 1, 2, 3 lanes. */ #define REG_DSI_LANE_COUNT(n) ((n) & 3) struct lt9211 { struct drm_bridge bridge; struct device *dev; struct regmap *regmap; struct mipi_dsi_device *dsi; struct drm_bridge *panel_bridge; struct gpio_desc *reset_gpio; struct regulator *vccio; bool lvds_dual_link; bool lvds_dual_link_even_odd_swap; }; static const struct regmap_range lt9211_rw_ranges[] = { regmap_reg_range(0xff, 0xff), regmap_reg_range(0x8100, 0x816b), regmap_reg_range(0x8200, 0x82aa), regmap_reg_range(0x8500, 0x85ff), regmap_reg_range(0x8600, 0x86a0), regmap_reg_range(0x8700, 0x8746), regmap_reg_range(0xd000, 0xd0a7), regmap_reg_range(0xd400, 0xd42c), regmap_reg_range(0xd800, 0xd838), regmap_reg_range(0xd9c0, 0xd9d5), }; static const struct regmap_access_table lt9211_rw_table = { .yes_ranges = lt9211_rw_ranges, .n_yes_ranges = ARRAY_SIZE(lt9211_rw_ranges), }; static const struct regmap_range_cfg lt9211_range = { .name = "lt9211", .range_min = 0x0000, .range_max = 0xda00, .selector_reg = REG_PAGE_CONTROL, .selector_mask = 0xff, .selector_shift = 0, .window_start = 0, .window_len = 0x100, }; static const struct regmap_config lt9211_regmap_config = { .reg_bits = 8, .val_bits = 8, .rd_table = &lt9211_rw_table, .wr_table = &lt9211_rw_table, .volatile_table = &lt9211_rw_table, .ranges = &lt9211_range, .num_ranges = 1, .cache_type = REGCACHE_RBTREE, .max_register = 0xda00, }; static struct lt9211 *bridge_to_lt9211(struct drm_bridge *bridge) { return container_of(bridge, struct lt9211, bridge); } static int lt9211_attach(struct drm_bridge *bridge, enum drm_bridge_attach_flags flags) { struct lt9211 *ctx = bridge_to_lt9211(bridge); return drm_bridge_attach(bridge->encoder, ctx->panel_bridge, &ctx->bridge, flags); } static int lt9211_read_chipid(struct lt9211 *ctx) { u8 chipid[3]; int ret; /* Read Chip ID registers and verify the chip can communicate. */ ret = regmap_bulk_read(ctx->regmap, REG_CHIPID0, chipid, 3); if (ret < 0) { dev_err(ctx->dev, "Failed to read Chip ID: %d\n", ret); return ret; } /* Test for known Chip ID. */ if (chipid[0] != REG_CHIPID0_VALUE || chipid[1] != REG_CHIPID1_VALUE || chipid[2] != REG_CHIPID2_VALUE) { dev_err(ctx->dev, "Unknown Chip ID: 0x%02x 0x%02x 0x%02x\n", chipid[0], chipid[1], chipid[2]); return -EINVAL; } return 0; } static int lt9211_system_init(struct lt9211 *ctx) { const struct reg_sequence lt9211_system_init_seq[] = { { 0x8201, 0x18 }, { 0x8606, 0x61 }, { 0x8607, 0xa8 }, { 0x8714, 0x08 }, { 0x8715, 0x00 }, { 0x8718, 0x0f }, { 0x8722, 0x08 }, { 0x8723, 0x00 }, { 0x8726, 0x0f }, { 0x810b, 0xfe }, }; return regmap_multi_reg_write(ctx->regmap, lt9211_system_init_seq, ARRAY_SIZE(lt9211_system_init_seq)); } static int lt9211_configure_rx(struct lt9211 *ctx) { const struct reg_sequence lt9211_rx_phy_seq[] = { { 0x8202, 0x44 }, { 0x8204, 0xa0 }, { 0x8205, 0x22 }, { 0x8207, 0x9f }, { 0x8208, 0xfc }, /* ORR with 0xf8 here to enable DSI DN/DP swap. */ { 0x8209, 0x01 }, { 0x8217, 0x0c }, { 0x8633, 0x1b }, }; const struct reg_sequence lt9211_rx_cal_reset_seq[] = { { 0x8120, 0x7f }, { 0x8120, 0xff }, }; const struct reg_sequence lt9211_rx_dig_seq[] = { { 0x8630, 0x85 }, /* 0x8588: BIT 6 set = MIPI-RX, BIT 4 unset = LVDS-TX */ { 0x8588, 0x40 }, { 0x85ff, 0xd0 }, { REG_DSI_LANE, REG_DSI_LANE_COUNT(ctx->dsi->lanes) }, { 0xd002, 0x05 }, }; const struct reg_sequence lt9211_rx_div_reset_seq[] = { { 0x810a, 0xc0 }, { 0x8120, 0xbf }, }; const struct reg_sequence lt9211_rx_div_clear_seq[] = { { 0x810a, 0xc1 }, { 0x8120, 0xff }, }; int ret; ret = regmap_multi_reg_write(ctx->regmap, lt9211_rx_phy_seq, ARRAY_SIZE(lt9211_rx_phy_seq)); if (ret) return ret; ret = regmap_multi_reg_write(ctx->regmap, lt9211_rx_cal_reset_seq, ARRAY_SIZE(lt9211_rx_cal_reset_seq)); if (ret) return ret; ret = regmap_multi_reg_write(ctx->regmap, lt9211_rx_dig_seq, ARRAY_SIZE(lt9211_rx_dig_seq)); if (ret) return ret; ret = regmap_multi_reg_write(ctx->regmap, lt9211_rx_div_reset_seq, ARRAY_SIZE(lt9211_rx_div_reset_seq)); if (ret) return ret; usleep_range(10000, 15000); return regmap_multi_reg_write(ctx->regmap, lt9211_rx_div_clear_seq, ARRAY_SIZE(lt9211_rx_div_clear_seq)); } static int lt9211_autodetect_rx(struct lt9211 *ctx, const struct drm_display_mode *mode) { u16 width, height; u32 byteclk; u8 buf[5]; u8 format; u8 bc[3]; int ret; /* Measure ByteClock frequency. */ ret = regmap_write(ctx->regmap, 0x8600, 0x01); if (ret) return ret; /* Give the chip time to lock onto RX stream. */ msleep(100); /* Read the ByteClock frequency from the chip. */ ret = regmap_bulk_read(ctx->regmap, 0x8608, bc, sizeof(bc)); if (ret) return ret; /* RX ByteClock in kHz */ byteclk = ((bc[0] & 0xf) << 16) | (bc[1] << 8) | bc[2]; /* Width/Height/Format Auto-detection */ ret = regmap_bulk_read(ctx->regmap, 0xd082, buf, sizeof(buf)); if (ret) return ret; width = (buf[0] << 8) | buf[1]; height = (buf[3] << 8) | buf[4]; format = buf[2] & 0xf; if (format == 0x3) { /* YUV422 16bit */ width /= 2; } else if (format == 0xa) { /* RGB888 24bit */ width /= 3; } else { dev_err(ctx->dev, "Unsupported DSI pixel format 0x%01x\n", format); return -EINVAL; } if (width != mode->hdisplay) { dev_err(ctx->dev, "RX: Detected DSI width (%d) does not match mode hdisplay (%d)\n", width, mode->hdisplay); return -EINVAL; } if (height != mode->vdisplay) { dev_err(ctx->dev, "RX: Detected DSI height (%d) does not match mode vdisplay (%d)\n", height, mode->vdisplay); return -EINVAL; } dev_dbg(ctx->dev, "RX: %dx%d format=0x%01x byteclock=%d kHz\n", width, height, format, byteclk); return 0; } static int lt9211_configure_timing(struct lt9211 *ctx, const struct drm_display_mode *mode) { const struct reg_sequence lt9211_timing[] = { { 0xd00d, (mode->vtotal >> 8) & 0xff }, { 0xd00e, mode->vtotal & 0xff }, { 0xd00f, (mode->vdisplay >> 8) & 0xff }, { 0xd010, mode->vdisplay & 0xff }, { 0xd011, (mode->htotal >> 8) & 0xff }, { 0xd012, mode->htotal & 0xff }, { 0xd013, (mode->hdisplay >> 8) & 0xff }, { 0xd014, mode->hdisplay & 0xff }, { 0xd015, (mode->vsync_end - mode->vsync_start) & 0xff }, { 0xd016, (mode->hsync_end - mode->hsync_start) & 0xff }, { 0xd017, ((mode->vsync_start - mode->vdisplay) >> 8) & 0xff }, { 0xd018, (mode->vsync_start - mode->vdisplay) & 0xff }, { 0xd019, ((mode->hsync_start - mode->hdisplay) >> 8) & 0xff }, { 0xd01a, (mode->hsync_start - mode->hdisplay) & 0xff }, }; return regmap_multi_reg_write(ctx->regmap, lt9211_timing, ARRAY_SIZE(lt9211_timing)); } static int lt9211_configure_plls(struct lt9211 *ctx, const struct drm_display_mode *mode) { const struct reg_sequence lt9211_pcr_seq[] = { { 0xd026, 0x17 }, { 0xd027, 0xc3 }, { 0xd02d, 0x30 }, { 0xd031, 0x10 }, { 0xd023, 0x20 }, { 0xd038, 0x02 }, { 0xd039, 0x10 }, { 0xd03a, 0x20 }, { 0xd03b, 0x60 }, { 0xd03f, 0x04 }, { 0xd040, 0x08 }, { 0xd041, 0x10 }, { 0x810b, 0xee }, { 0x810b, 0xfe }, }; unsigned int pval; int ret; /* DeSSC PLL reference clock is 25 MHz XTal. */ ret = regmap_write(ctx->regmap, 0x822d, 0x48); if (ret) return ret; if (mode->clock < 44000) { ret = regmap_write(ctx->regmap, 0x8235, 0x83); } else if (mode->clock < 88000) { ret = regmap_write(ctx->regmap, 0x8235, 0x82); } else if (mode->clock < 176000) { ret = regmap_write(ctx->regmap, 0x8235, 0x81); } else { dev_err(ctx->dev, "Unsupported mode clock (%d kHz) above 176 MHz.\n", mode->clock); return -EINVAL; } if (ret) return ret; /* Wait for the DeSSC PLL to stabilize. */ msleep(100); ret = regmap_multi_reg_write(ctx->regmap, lt9211_pcr_seq, ARRAY_SIZE(lt9211_pcr_seq)); if (ret) return ret; /* PCR stability test takes seconds. */ ret = regmap_read_poll_timeout(ctx->regmap, 0xd087, pval, pval & 0x8, 20000, 10000000); if (ret) dev_err(ctx->dev, "PCR unstable, ret=%i\n", ret); return ret; } static int lt9211_configure_tx(struct lt9211 *ctx, bool jeida, bool bpp24, bool de) { const struct reg_sequence system_lt9211_tx_phy_seq[] = { /* DPI output disable */ { 0x8262, 0x00 }, /* BIT(7) is LVDS dual-port */ { 0x823b, 0x38 | (ctx->lvds_dual_link ? BIT(7) : 0) }, { 0x823e, 0x92 }, { 0x823f, 0x48 }, { 0x8240, 0x31 }, { 0x8243, 0x80 }, { 0x8244, 0x00 }, { 0x8245, 0x00 }, { 0x8249, 0x00 }, { 0x824a, 0x01 }, { 0x824e, 0x00 }, { 0x824f, 0x00 }, { 0x8250, 0x00 }, { 0x8253, 0x00 }, { 0x8254, 0x01 }, /* LVDS channel order, Odd:Even 0x10..A:B, 0x40..B:A */ { 0x8646, ctx->lvds_dual_link_even_odd_swap ? 0x40 : 0x10 }, { 0x8120, 0x7b }, { 0x816b, 0xff }, }; const struct reg_sequence system_lt9211_tx_dig_seq[] = { { 0x8559, 0x40 | (jeida ? BIT(7) : 0) | (de ? BIT(5) : 0) | (bpp24 ? BIT(4) : 0) }, { 0x855a, 0xaa }, { 0x855b, 0xaa }, { 0x855c, ctx->lvds_dual_link ? BIT(0) : 0 }, { 0x85a1, 0x77 }, { 0x8640, 0x40 }, { 0x8641, 0x34 }, { 0x8642, 0x10 }, { 0x8643, 0x23 }, { 0x8644, 0x41 }, { 0x8645, 0x02 }, }; const struct reg_sequence system_lt9211_tx_pll_seq[] = { /* TX PLL power down */ { 0x8236, 0x01 }, { 0x8237, ctx->lvds_dual_link ? 0x2a : 0x29 }, { 0x8238, 0x06 }, { 0x8239, 0x30 }, { 0x823a, 0x8e }, { 0x8737, 0x14 }, { 0x8713, 0x00 }, { 0x8713, 0x80 }, }; unsigned int pval; int ret; ret = regmap_multi_reg_write(ctx->regmap, system_lt9211_tx_phy_seq, ARRAY_SIZE(system_lt9211_tx_phy_seq)); if (ret) return ret; ret = regmap_multi_reg_write(ctx->regmap, system_lt9211_tx_dig_seq, ARRAY_SIZE(system_lt9211_tx_dig_seq)); if (ret) return ret; ret = regmap_multi_reg_write(ctx->regmap, system_lt9211_tx_pll_seq, ARRAY_SIZE(system_lt9211_tx_pll_seq)); if (ret) return ret; ret = regmap_read_poll_timeout(ctx->regmap, 0x871f, pval, pval & 0x80, 10000, 1000000); if (ret) { dev_err(ctx->dev, "TX PLL unstable, ret=%i\n", ret); return ret; } ret = regmap_read_poll_timeout(ctx->regmap, 0x8720, pval, pval & 0x80, 10000, 1000000); if (ret) { dev_err(ctx->dev, "TX PLL unstable, ret=%i\n", ret); return ret; } return 0; } static void lt9211_atomic_enable(struct drm_bridge *bridge, struct drm_bridge_state *old_bridge_state) { struct lt9211 *ctx = bridge_to_lt9211(bridge); struct drm_atomic_state *state = old_bridge_state->base.state; const struct drm_bridge_state *bridge_state; const struct drm_crtc_state *crtc_state; const struct drm_display_mode *mode; struct drm_connector *connector; struct drm_crtc *crtc; bool lvds_format_24bpp; bool lvds_format_jeida; u32 bus_flags; int ret; ret = regulator_enable(ctx->vccio); if (ret) { dev_err(ctx->dev, "Failed to enable vccio: %d\n", ret); return; } /* Deassert reset */ gpiod_set_value(ctx->reset_gpio, 1); usleep_range(20000, 21000); /* Very long post-reset delay. */ /* Get the LVDS format from the bridge state. */ bridge_state = drm_atomic_get_new_bridge_state(state, bridge); bus_flags = bridge_state->output_bus_cfg.flags; switch (bridge_state->output_bus_cfg.format) { case MEDIA_BUS_FMT_RGB666_1X7X3_SPWG: lvds_format_24bpp = false; lvds_format_jeida = true; break; case MEDIA_BUS_FMT_RGB888_1X7X4_JEIDA: lvds_format_24bpp = true; lvds_format_jeida = true; break; case MEDIA_BUS_FMT_RGB888_1X7X4_SPWG: lvds_format_24bpp = true; lvds_format_jeida = false; break; default: /* * Some bridges still don't set the correct * LVDS bus pixel format, use SPWG24 default * format until those are fixed. */ lvds_format_24bpp = true; lvds_format_jeida = false; dev_warn(ctx->dev, "Unsupported LVDS bus format 0x%04x, please check output bridge driver. Falling back to SPWG24.\n", bridge_state->output_bus_cfg.format); break; } /* * Retrieve the CRTC adjusted mode. This requires a little dance to go * from the bridge to the encoder, to the connector and to the CRTC. */ connector = drm_atomic_get_new_connector_for_encoder(state, bridge->encoder); crtc = drm_atomic_get_new_connector_state(state, connector)->crtc; crtc_state = drm_atomic_get_new_crtc_state(state, crtc); mode = &crtc_state->adjusted_mode; ret = lt9211_read_chipid(ctx); if (ret) return; ret = lt9211_system_init(ctx); if (ret) return; ret = lt9211_configure_rx(ctx); if (ret) return; ret = lt9211_autodetect_rx(ctx, mode); if (ret) return; ret = lt9211_configure_timing(ctx, mode); if (ret) return; ret = lt9211_configure_plls(ctx, mode); if (ret) return; ret = lt9211_configure_tx(ctx, lvds_format_jeida, lvds_format_24bpp, bus_flags & DRM_BUS_FLAG_DE_HIGH); if (ret) return; dev_dbg(ctx->dev, "LT9211 enabled.\n"); } static void lt9211_atomic_disable(struct drm_bridge *bridge, struct drm_bridge_state *old_bridge_state) { struct lt9211 *ctx = bridge_to_lt9211(bridge); int ret; /* * Put the chip in reset, pull nRST line low, * and assure lengthy 10ms reset low timing. */ gpiod_set_value(ctx->reset_gpio, 0); usleep_range(10000, 11000); /* Very long reset duration. */ ret = regulator_disable(ctx->vccio); if (ret) dev_err(ctx->dev, "Failed to disable vccio: %d\n", ret); regcache_mark_dirty(ctx->regmap); } static enum drm_mode_status lt9211_mode_valid(struct drm_bridge *bridge, const struct drm_display_info *info, const struct drm_display_mode *mode) { /* LVDS output clock range 25..176 MHz */ if (mode->clock < 25000) return MODE_CLOCK_LOW; if (mode->clock > 176000) return MODE_CLOCK_HIGH; return MODE_OK; } #define MAX_INPUT_SEL_FORMATS 1 static u32 * lt9211_atomic_get_input_bus_fmts(struct drm_bridge *bridge, struct drm_bridge_state *bridge_state, struct drm_crtc_state *crtc_state, struct drm_connector_state *conn_state, u32 output_fmt, unsigned int *num_input_fmts) { u32 *input_fmts; *num_input_fmts = 0; input_fmts = kcalloc(MAX_INPUT_SEL_FORMATS, sizeof(*input_fmts), GFP_KERNEL); if (!input_fmts) return NULL; /* This is the DSI-end bus format */ input_fmts[0] = MEDIA_BUS_FMT_RGB888_1X24; *num_input_fmts = 1; return input_fmts; } static const struct drm_bridge_funcs lt9211_funcs = { .attach = lt9211_attach, .mode_valid = lt9211_mode_valid, .atomic_enable = lt9211_atomic_enable, .atomic_disable = lt9211_atomic_disable, .atomic_duplicate_state = drm_atomic_helper_bridge_duplicate_state, .atomic_destroy_state = drm_atomic_helper_bridge_destroy_state, .atomic_get_input_bus_fmts = lt9211_atomic_get_input_bus_fmts, .atomic_reset = drm_atomic_helper_bridge_reset, }; static int lt9211_parse_dt(struct lt9211 *ctx) { struct device_node *port2, *port3; struct drm_bridge *panel_bridge; struct device *dev = ctx->dev; struct drm_panel *panel; int dual_link; int ret; ctx->vccio = devm_regulator_get(dev, "vccio"); if (IS_ERR(ctx->vccio)) return dev_err_probe(dev, PTR_ERR(ctx->vccio), "Failed to get supply 'vccio'\n"); ctx->lvds_dual_link = false; ctx->lvds_dual_link_even_odd_swap = false; port2 = of_graph_get_port_by_id(dev->of_node, 2); port3 = of_graph_get_port_by_id(dev->of_node, 3); dual_link = drm_of_lvds_get_dual_link_pixel_order(port2, port3); of_node_put(port2); of_node_put(port3); if (dual_link == DRM_LVDS_DUAL_LINK_ODD_EVEN_PIXELS) { ctx->lvds_dual_link = true; /* Odd pixels to LVDS Channel A, even pixels to B */ ctx->lvds_dual_link_even_odd_swap = false; } else if (dual_link == DRM_LVDS_DUAL_LINK_EVEN_ODD_PIXELS) { ctx->lvds_dual_link = true; /* Even pixels to LVDS Channel A, odd pixels to B */ ctx->lvds_dual_link_even_odd_swap = true; } ret = drm_of_find_panel_or_bridge(dev->of_node, 2, 0, &panel, &panel_bridge); if (ret < 0) return ret; if (panel) { panel_bridge = devm_drm_panel_bridge_add(dev, panel); if (IS_ERR(panel_bridge)) return PTR_ERR(panel_bridge); } ctx->panel_bridge = panel_bridge; return 0; } static int lt9211_host_attach(struct lt9211 *ctx) { const struct mipi_dsi_device_info info = { .type = "lt9211", .channel = 0, .node = NULL, }; struct device *dev = ctx->dev; struct device_node *host_node; struct device_node *endpoint; struct mipi_dsi_device *dsi; struct mipi_dsi_host *host; int dsi_lanes; int ret; endpoint = of_graph_get_endpoint_by_regs(dev->of_node, 0, -1); dsi_lanes = drm_of_get_data_lanes_count(endpoint, 1, 4); host_node = of_graph_get_remote_port_parent(endpoint); host = of_find_mipi_dsi_host_by_node(host_node); of_node_put(host_node); of_node_put(endpoint); if (!host) return -EPROBE_DEFER; if (dsi_lanes < 0) return dsi_lanes; dsi = devm_mipi_dsi_device_register_full(dev, host, &info); if (IS_ERR(dsi)) return dev_err_probe(dev, PTR_ERR(dsi), "failed to create dsi device\n"); ctx->dsi = dsi; dsi->lanes = dsi_lanes; dsi->format = MIPI_DSI_FMT_RGB888; dsi->mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_SYNC_PULSE | MIPI_DSI_MODE_VIDEO_HSE | MIPI_DSI_MODE_VIDEO_NO_HSA | MIPI_DSI_MODE_VIDEO_NO_HFP | MIPI_DSI_MODE_VIDEO_NO_HBP | MIPI_DSI_MODE_NO_EOT_PACKET; ret = devm_mipi_dsi_attach(dev, dsi); if (ret < 0) { dev_err(dev, "failed to attach dsi to host: %d\n", ret); return ret; } return 0; } static int lt9211_probe(struct i2c_client *client) { struct device *dev = &client->dev; struct lt9211 *ctx; int ret; ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL); if (!ctx) return -ENOMEM; ctx->dev = dev; /* * Put the chip in reset, pull nRST line low, * and assure lengthy 10ms reset low timing. */ ctx->reset_gpio = devm_gpiod_get_optional(ctx->dev, "reset", GPIOD_OUT_LOW); if (IS_ERR(ctx->reset_gpio)) return PTR_ERR(ctx->reset_gpio); usleep_range(10000, 11000); /* Very long reset duration. */ ret = lt9211_parse_dt(ctx); if (ret) return ret; ctx->regmap = devm_regmap_init_i2c(client, &lt9211_regmap_config); if (IS_ERR(ctx->regmap)) return PTR_ERR(ctx->regmap); dev_set_drvdata(dev, ctx); i2c_set_clientdata(client, ctx); ctx->bridge.funcs = &lt9211_funcs; ctx->bridge.of_node = dev->of_node; drm_bridge_add(&ctx->bridge); ret = lt9211_host_attach(ctx); if (ret) drm_bridge_remove(&ctx->bridge); return ret; } static void lt9211_remove(struct i2c_client *client) { struct lt9211 *ctx = i2c_get_clientdata(client); drm_bridge_remove(&ctx->bridge); } static struct i2c_device_id lt9211_id[] = { { "lontium,lt9211" }, {}, }; MODULE_DEVICE_TABLE(i2c, lt9211_id); static const struct of_device_id lt9211_match_table[] = { { .compatible = "lontium,lt9211" }, {}, }; MODULE_DEVICE_TABLE(of, lt9211_match_table); static struct i2c_driver lt9211_driver = { .probe = lt9211_probe, .remove = lt9211_remove, .id_table = lt9211_id, .driver = { .name = "lt9211", .of_match_table = lt9211_match_table, }, }; module_i2c_driver(lt9211_driver); MODULE_AUTHOR("Marek Vasut <[email protected]>"); MODULE_DESCRIPTION("Lontium LT9211 DSI/LVDS/DPI bridge driver"); MODULE_LICENSE("GPL");
linux-master
drivers/gpu/drm/bridge/lontium-lt9211.c
// SPDX-License-Identifier: GPL-2.0+ /* * Copyright (C) 2021 RenewOutReach * Copyright (C) 2021 Amarula Solutions(India) * * Author: * Jagan Teki <[email protected]> * Christopher Vollo <[email protected]> */ #include <drm/drm_atomic_helper.h> #include <drm/drm_of.h> #include <drm/drm_print.h> #include <drm/drm_mipi_dsi.h> #include <linux/delay.h> #include <linux/gpio/consumer.h> #include <linux/i2c.h> #include <linux/media-bus-format.h> #include <linux/module.h> #include <linux/regmap.h> #include <linux/regulator/consumer.h> enum cmd_registers { WR_INPUT_SOURCE = 0x05, /* Write Input Source Select */ WR_EXT_SOURCE_FMT = 0x07, /* Write External Video Source Format */ WR_IMAGE_CROP = 0x10, /* Write Image Crop */ WR_DISPLAY_SIZE = 0x12, /* Write Display Size */ WR_IMAGE_FREEZE = 0x1A, /* Write Image Freeze */ WR_INPUT_IMAGE_SIZE = 0x2E, /* Write External Input Image Size */ WR_RGB_LED_EN = 0x52, /* Write RGB LED Enable */ WR_RGB_LED_CURRENT = 0x54, /* Write RGB LED Current */ WR_RGB_LED_MAX_CURRENT = 0x5C, /* Write RGB LED Max Current */ WR_DSI_HS_CLK = 0xBD, /* Write DSI HS Clock */ RD_DEVICE_ID = 0xD4, /* Read Controller Device ID */ WR_DSI_PORT_EN = 0xD7, /* Write DSI Port Enable */ }; enum input_source { INPUT_EXTERNAL_VIDEO = 0, INPUT_TEST_PATTERN, INPUT_SPLASH_SCREEN, }; #define DEV_ID_MASK GENMASK(3, 0) #define IMAGE_FREESE_EN BIT(0) #define DSI_PORT_EN 0 #define EXT_SOURCE_FMT_DSI 0 #define RED_LED_EN BIT(0) #define GREEN_LED_EN BIT(1) #define BLUE_LED_EN BIT(2) #define LED_MASK GENMASK(2, 0) #define MAX_BYTE_SIZE 8 struct dlpc { struct device *dev; struct drm_bridge bridge; struct drm_bridge *next_bridge; struct device_node *host_node; struct mipi_dsi_device *dsi; struct drm_display_mode mode; struct gpio_desc *enable_gpio; struct regulator *vcc_intf; struct regulator *vcc_flsh; struct regmap *regmap; unsigned int dsi_lanes; }; static inline struct dlpc *bridge_to_dlpc(struct drm_bridge *bridge) { return container_of(bridge, struct dlpc, bridge); } static bool dlpc_writeable_noinc_reg(struct device *dev, unsigned int reg) { switch (reg) { case WR_IMAGE_CROP: case WR_DISPLAY_SIZE: case WR_INPUT_IMAGE_SIZE: case WR_DSI_HS_CLK: return true; default: return false; } } static const struct regmap_range dlpc_volatile_ranges[] = { { .range_min = 0x10, .range_max = 0xBF }, }; static const struct regmap_access_table dlpc_volatile_table = { .yes_ranges = dlpc_volatile_ranges, .n_yes_ranges = ARRAY_SIZE(dlpc_volatile_ranges), }; static struct regmap_config dlpc_regmap_config = { .reg_bits = 8, .val_bits = 8, .max_register = WR_DSI_PORT_EN, .writeable_noinc_reg = dlpc_writeable_noinc_reg, .volatile_table = &dlpc_volatile_table, .cache_type = REGCACHE_RBTREE, .name = "dlpc3433", }; static void dlpc_atomic_enable(struct drm_bridge *bridge, struct drm_bridge_state *old_bridge_state) { struct dlpc *dlpc = bridge_to_dlpc(bridge); struct device *dev = dlpc->dev; struct drm_display_mode *mode = &dlpc->mode; struct regmap *regmap = dlpc->regmap; char buf[MAX_BYTE_SIZE]; unsigned int devid; regmap_read(regmap, RD_DEVICE_ID, &devid); devid &= DEV_ID_MASK; DRM_DEV_DEBUG(dev, "DLPC3433 device id: 0x%02x\n", devid); if (devid != 0x01) { DRM_DEV_ERROR(dev, "Unsupported DLPC device id: 0x%02x\n", devid); return; } /* disable image freeze */ regmap_write(regmap, WR_IMAGE_FREEZE, IMAGE_FREESE_EN); /* enable DSI port */ regmap_write(regmap, WR_DSI_PORT_EN, DSI_PORT_EN); memset(buf, 0, MAX_BYTE_SIZE); /* set image crop */ buf[4] = mode->hdisplay & 0xff; buf[5] = (mode->hdisplay & 0xff00) >> 8; buf[6] = mode->vdisplay & 0xff; buf[7] = (mode->vdisplay & 0xff00) >> 8; regmap_noinc_write(regmap, WR_IMAGE_CROP, buf, MAX_BYTE_SIZE); /* set display size */ buf[4] = mode->hdisplay & 0xff; buf[5] = (mode->hdisplay & 0xff00) >> 8; buf[6] = mode->vdisplay & 0xff; buf[7] = (mode->vdisplay & 0xff00) >> 8; regmap_noinc_write(regmap, WR_DISPLAY_SIZE, buf, MAX_BYTE_SIZE); /* set input image size */ buf[0] = mode->hdisplay & 0xff; buf[1] = (mode->hdisplay & 0xff00) >> 8; buf[2] = mode->vdisplay & 0xff; buf[3] = (mode->vdisplay & 0xff00) >> 8; regmap_noinc_write(regmap, WR_INPUT_IMAGE_SIZE, buf, 4); /* set external video port */ regmap_write(regmap, WR_INPUT_SOURCE, INPUT_EXTERNAL_VIDEO); /* set external video format select as DSI */ regmap_write(regmap, WR_EXT_SOURCE_FMT, EXT_SOURCE_FMT_DSI); /* disable image freeze */ regmap_write(regmap, WR_IMAGE_FREEZE, 0x00); /* enable RGB led */ regmap_update_bits(regmap, WR_RGB_LED_EN, LED_MASK, RED_LED_EN | GREEN_LED_EN | BLUE_LED_EN); msleep(10); } static void dlpc_atomic_pre_enable(struct drm_bridge *bridge, struct drm_bridge_state *old_bridge_state) { struct dlpc *dlpc = bridge_to_dlpc(bridge); int ret; gpiod_set_value(dlpc->enable_gpio, 1); msleep(500); ret = regulator_enable(dlpc->vcc_intf); if (ret) DRM_DEV_ERROR(dlpc->dev, "failed to enable VCC_INTF regulator: %d\n", ret); ret = regulator_enable(dlpc->vcc_flsh); if (ret) DRM_DEV_ERROR(dlpc->dev, "failed to enable VCC_FLSH regulator: %d\n", ret); msleep(10); } static void dlpc_atomic_post_disable(struct drm_bridge *bridge, struct drm_bridge_state *old_bridge_state) { struct dlpc *dlpc = bridge_to_dlpc(bridge); regulator_disable(dlpc->vcc_flsh); regulator_disable(dlpc->vcc_intf); msleep(10); gpiod_set_value(dlpc->enable_gpio, 0); msleep(500); } #define MAX_INPUT_SEL_FORMATS 1 static u32 * dlpc_atomic_get_input_bus_fmts(struct drm_bridge *bridge, struct drm_bridge_state *bridge_state, struct drm_crtc_state *crtc_state, struct drm_connector_state *conn_state, u32 output_fmt, unsigned int *num_input_fmts) { u32 *input_fmts; *num_input_fmts = 0; input_fmts = kcalloc(MAX_INPUT_SEL_FORMATS, sizeof(*input_fmts), GFP_KERNEL); if (!input_fmts) return NULL; /* This is the DSI-end bus format */ input_fmts[0] = MEDIA_BUS_FMT_RGB888_1X24; *num_input_fmts = 1; return input_fmts; } static void dlpc_mode_set(struct drm_bridge *bridge, const struct drm_display_mode *mode, const struct drm_display_mode *adjusted_mode) { struct dlpc *dlpc = bridge_to_dlpc(bridge); drm_mode_copy(&dlpc->mode, adjusted_mode); } static int dlpc_attach(struct drm_bridge *bridge, enum drm_bridge_attach_flags flags) { struct dlpc *dlpc = bridge_to_dlpc(bridge); return drm_bridge_attach(bridge->encoder, dlpc->next_bridge, bridge, flags); } static const struct drm_bridge_funcs dlpc_bridge_funcs = { .atomic_duplicate_state = drm_atomic_helper_bridge_duplicate_state, .atomic_destroy_state = drm_atomic_helper_bridge_destroy_state, .atomic_get_input_bus_fmts = dlpc_atomic_get_input_bus_fmts, .atomic_reset = drm_atomic_helper_bridge_reset, .atomic_pre_enable = dlpc_atomic_pre_enable, .atomic_enable = dlpc_atomic_enable, .atomic_post_disable = dlpc_atomic_post_disable, .mode_set = dlpc_mode_set, .attach = dlpc_attach, }; static int dlpc3433_parse_dt(struct dlpc *dlpc) { struct device *dev = dlpc->dev; struct device_node *endpoint; int ret; dlpc->enable_gpio = devm_gpiod_get(dev, "enable", GPIOD_OUT_LOW); if (IS_ERR(dlpc->enable_gpio)) return PTR_ERR(dlpc->enable_gpio); dlpc->vcc_intf = devm_regulator_get(dlpc->dev, "vcc_intf"); if (IS_ERR(dlpc->vcc_intf)) return dev_err_probe(dev, PTR_ERR(dlpc->vcc_intf), "failed to get VCC_INTF supply\n"); dlpc->vcc_flsh = devm_regulator_get(dlpc->dev, "vcc_flsh"); if (IS_ERR(dlpc->vcc_flsh)) return dev_err_probe(dev, PTR_ERR(dlpc->vcc_flsh), "failed to get VCC_FLSH supply\n"); dlpc->next_bridge = devm_drm_of_get_bridge(dev, dev->of_node, 1, 0); if (IS_ERR(dlpc->next_bridge)) return PTR_ERR(dlpc->next_bridge); endpoint = of_graph_get_endpoint_by_regs(dev->of_node, 0, 0); dlpc->dsi_lanes = of_property_count_u32_elems(endpoint, "data-lanes"); if (dlpc->dsi_lanes < 0 || dlpc->dsi_lanes > 4) { ret = -EINVAL; goto err_put_endpoint; } dlpc->host_node = of_graph_get_remote_port_parent(endpoint); if (!dlpc->host_node) { ret = -ENODEV; goto err_put_host; } of_node_put(endpoint); return 0; err_put_host: of_node_put(dlpc->host_node); err_put_endpoint: of_node_put(endpoint); return ret; } static int dlpc_host_attach(struct dlpc *dlpc) { struct device *dev = dlpc->dev; struct mipi_dsi_host *host; struct mipi_dsi_device_info info = { .type = "dlpc3433", .channel = 0, .node = NULL, }; host = of_find_mipi_dsi_host_by_node(dlpc->host_node); if (!host) { DRM_DEV_ERROR(dev, "failed to find dsi host\n"); return -EPROBE_DEFER; } dlpc->dsi = mipi_dsi_device_register_full(host, &info); if (IS_ERR(dlpc->dsi)) { DRM_DEV_ERROR(dev, "failed to create dsi device\n"); return PTR_ERR(dlpc->dsi); } dlpc->dsi->mode_flags = MIPI_DSI_MODE_VIDEO_BURST; dlpc->dsi->format = MIPI_DSI_FMT_RGB565; dlpc->dsi->lanes = dlpc->dsi_lanes; return devm_mipi_dsi_attach(dev, dlpc->dsi); } static int dlpc3433_probe(struct i2c_client *client) { struct device *dev = &client->dev; struct dlpc *dlpc; int ret; dlpc = devm_kzalloc(dev, sizeof(*dlpc), GFP_KERNEL); if (!dlpc) return -ENOMEM; dlpc->dev = dev; dlpc->regmap = devm_regmap_init_i2c(client, &dlpc_regmap_config); if (IS_ERR(dlpc->regmap)) return PTR_ERR(dlpc->regmap); ret = dlpc3433_parse_dt(dlpc); if (ret) return ret; dev_set_drvdata(dev, dlpc); i2c_set_clientdata(client, dlpc); dlpc->bridge.funcs = &dlpc_bridge_funcs; dlpc->bridge.of_node = dev->of_node; drm_bridge_add(&dlpc->bridge); ret = dlpc_host_attach(dlpc); if (ret) { DRM_DEV_ERROR(dev, "failed to attach dsi host\n"); goto err_remove_bridge; } return 0; err_remove_bridge: drm_bridge_remove(&dlpc->bridge); return ret; } static void dlpc3433_remove(struct i2c_client *client) { struct dlpc *dlpc = i2c_get_clientdata(client); drm_bridge_remove(&dlpc->bridge); of_node_put(dlpc->host_node); } static const struct i2c_device_id dlpc3433_id[] = { { "ti,dlpc3433", 0 }, { /* sentinel */ } }; MODULE_DEVICE_TABLE(i2c, dlpc3433_id); static const struct of_device_id dlpc3433_match_table[] = { { .compatible = "ti,dlpc3433" }, { /* sentinel */ } }; MODULE_DEVICE_TABLE(of, dlpc3433_match_table); static struct i2c_driver dlpc3433_driver = { .probe = dlpc3433_probe, .remove = dlpc3433_remove, .id_table = dlpc3433_id, .driver = { .name = "ti-dlpc3433", .of_match_table = dlpc3433_match_table, }, }; module_i2c_driver(dlpc3433_driver); MODULE_AUTHOR("Jagan Teki <[email protected]>"); MODULE_AUTHOR("Christopher Vollo <[email protected]>"); MODULE_DESCRIPTION("TI DLPC3433 MIPI DSI Display Controller Bridge"); MODULE_LICENSE("GPL");
linux-master
drivers/gpu/drm/bridge/ti-dlpc3433.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * Copyright (C) 2015-2016 Free Electrons * Copyright (C) 2015-2016 NextThing Co * * Maxime Ripard <[email protected]> */ #include <linux/gpio/consumer.h> #include <linux/module.h> #include <linux/of.h> #include <linux/of_graph.h> #include <linux/platform_device.h> #include <linux/regulator/consumer.h> #include <drm/drm_atomic_helper.h> #include <drm/drm_bridge.h> #include <drm/drm_crtc.h> #include <drm/drm_edid.h> #include <drm/drm_print.h> #include <drm/drm_probe_helper.h> struct simple_bridge_info { const struct drm_bridge_timings *timings; unsigned int connector_type; }; struct simple_bridge { struct drm_bridge bridge; struct drm_connector connector; const struct simple_bridge_info *info; struct drm_bridge *next_bridge; struct regulator *vdd; struct gpio_desc *enable; }; static inline struct simple_bridge * drm_bridge_to_simple_bridge(struct drm_bridge *bridge) { return container_of(bridge, struct simple_bridge, bridge); } static inline struct simple_bridge * drm_connector_to_simple_bridge(struct drm_connector *connector) { return container_of(connector, struct simple_bridge, connector); } static int simple_bridge_get_modes(struct drm_connector *connector) { struct simple_bridge *sbridge = drm_connector_to_simple_bridge(connector); struct edid *edid; int ret; if (sbridge->next_bridge->ops & DRM_BRIDGE_OP_EDID) { edid = drm_bridge_get_edid(sbridge->next_bridge, connector); if (!edid) DRM_INFO("EDID read failed. Fallback to standard modes\n"); } else { edid = NULL; } if (!edid) { /* * In case we cannot retrieve the EDIDs (missing or broken DDC * bus from the next bridge), fallback on the XGA standards and * prefer a mode pretty much anyone can handle. */ ret = drm_add_modes_noedid(connector, 1920, 1200); drm_set_preferred_mode(connector, 1024, 768); return ret; } drm_connector_update_edid_property(connector, edid); ret = drm_add_edid_modes(connector, edid); kfree(edid); return ret; } static const struct drm_connector_helper_funcs simple_bridge_con_helper_funcs = { .get_modes = simple_bridge_get_modes, }; static enum drm_connector_status simple_bridge_connector_detect(struct drm_connector *connector, bool force) { struct simple_bridge *sbridge = drm_connector_to_simple_bridge(connector); return drm_bridge_detect(sbridge->next_bridge); } static const struct drm_connector_funcs simple_bridge_con_funcs = { .detect = simple_bridge_connector_detect, .fill_modes = drm_helper_probe_single_connector_modes, .destroy = drm_connector_cleanup, .reset = drm_atomic_helper_connector_reset, .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state, .atomic_destroy_state = drm_atomic_helper_connector_destroy_state, }; static int simple_bridge_attach(struct drm_bridge *bridge, enum drm_bridge_attach_flags flags) { struct simple_bridge *sbridge = drm_bridge_to_simple_bridge(bridge); int ret; ret = drm_bridge_attach(bridge->encoder, sbridge->next_bridge, bridge, DRM_BRIDGE_ATTACH_NO_CONNECTOR); if (ret < 0) return ret; if (flags & DRM_BRIDGE_ATTACH_NO_CONNECTOR) return 0; if (!bridge->encoder) { DRM_ERROR("Missing encoder\n"); return -ENODEV; } drm_connector_helper_add(&sbridge->connector, &simple_bridge_con_helper_funcs); ret = drm_connector_init_with_ddc(bridge->dev, &sbridge->connector, &simple_bridge_con_funcs, sbridge->info->connector_type, sbridge->next_bridge->ddc); if (ret) { DRM_ERROR("Failed to initialize connector\n"); return ret; } drm_connector_attach_encoder(&sbridge->connector, bridge->encoder); return 0; } static void simple_bridge_enable(struct drm_bridge *bridge) { struct simple_bridge *sbridge = drm_bridge_to_simple_bridge(bridge); int ret; if (sbridge->vdd) { ret = regulator_enable(sbridge->vdd); if (ret) DRM_ERROR("Failed to enable vdd regulator: %d\n", ret); } gpiod_set_value_cansleep(sbridge->enable, 1); } static void simple_bridge_disable(struct drm_bridge *bridge) { struct simple_bridge *sbridge = drm_bridge_to_simple_bridge(bridge); gpiod_set_value_cansleep(sbridge->enable, 0); if (sbridge->vdd) regulator_disable(sbridge->vdd); } static const struct drm_bridge_funcs simple_bridge_bridge_funcs = { .attach = simple_bridge_attach, .enable = simple_bridge_enable, .disable = simple_bridge_disable, }; static int simple_bridge_probe(struct platform_device *pdev) { struct simple_bridge *sbridge; struct device_node *remote; sbridge = devm_kzalloc(&pdev->dev, sizeof(*sbridge), GFP_KERNEL); if (!sbridge) return -ENOMEM; platform_set_drvdata(pdev, sbridge); sbridge->info = of_device_get_match_data(&pdev->dev); /* Get the next bridge in the pipeline. */ remote = of_graph_get_remote_node(pdev->dev.of_node, 1, -1); if (!remote) return -EINVAL; sbridge->next_bridge = of_drm_find_bridge(remote); of_node_put(remote); if (!sbridge->next_bridge) { dev_dbg(&pdev->dev, "Next bridge not found, deferring probe\n"); return -EPROBE_DEFER; } /* Get the regulator and GPIO resources. */ sbridge->vdd = devm_regulator_get_optional(&pdev->dev, "vdd"); if (IS_ERR(sbridge->vdd)) { int ret = PTR_ERR(sbridge->vdd); if (ret == -EPROBE_DEFER) return -EPROBE_DEFER; sbridge->vdd = NULL; dev_dbg(&pdev->dev, "No vdd regulator found: %d\n", ret); } sbridge->enable = devm_gpiod_get_optional(&pdev->dev, "enable", GPIOD_OUT_LOW); if (IS_ERR(sbridge->enable)) return dev_err_probe(&pdev->dev, PTR_ERR(sbridge->enable), "Unable to retrieve enable GPIO\n"); /* Register the bridge. */ sbridge->bridge.funcs = &simple_bridge_bridge_funcs; sbridge->bridge.of_node = pdev->dev.of_node; sbridge->bridge.timings = sbridge->info->timings; drm_bridge_add(&sbridge->bridge); return 0; } static void simple_bridge_remove(struct platform_device *pdev) { struct simple_bridge *sbridge = platform_get_drvdata(pdev); drm_bridge_remove(&sbridge->bridge); } /* * We assume the ADV7123 DAC is the "default" for historical reasons * Information taken from the ADV7123 datasheet, revision D. * NOTE: the ADV7123EP seems to have other timings and need a new timings * set if used. */ static const struct drm_bridge_timings default_bridge_timings = { /* Timing specifications, datasheet page 7 */ .input_bus_flags = DRM_BUS_FLAG_PIXDATA_SAMPLE_POSEDGE, .setup_time_ps = 500, .hold_time_ps = 1500, }; /* * Information taken from the THS8134, THS8134A, THS8134B datasheet named * "SLVS205D", dated May 1990, revised March 2000. */ static const struct drm_bridge_timings ti_ths8134_bridge_timings = { /* From timing diagram, datasheet page 9 */ .input_bus_flags = DRM_BUS_FLAG_PIXDATA_SAMPLE_POSEDGE, /* From datasheet, page 12 */ .setup_time_ps = 3000, /* I guess this means latched input */ .hold_time_ps = 0, }; /* * Information taken from the THS8135 datasheet named "SLAS343B", dated * May 2001, revised April 2013. */ static const struct drm_bridge_timings ti_ths8135_bridge_timings = { /* From timing diagram, datasheet page 14 */ .input_bus_flags = DRM_BUS_FLAG_PIXDATA_SAMPLE_POSEDGE, /* From datasheet, page 16 */ .setup_time_ps = 2000, .hold_time_ps = 500, }; static const struct of_device_id simple_bridge_match[] = { { .compatible = "dumb-vga-dac", .data = &(const struct simple_bridge_info) { .connector_type = DRM_MODE_CONNECTOR_VGA, }, }, { .compatible = "adi,adv7123", .data = &(const struct simple_bridge_info) { .timings = &default_bridge_timings, .connector_type = DRM_MODE_CONNECTOR_VGA, }, }, { .compatible = "ti,opa362", .data = &(const struct simple_bridge_info) { .connector_type = DRM_MODE_CONNECTOR_Composite, }, }, { .compatible = "ti,ths8135", .data = &(const struct simple_bridge_info) { .timings = &ti_ths8135_bridge_timings, .connector_type = DRM_MODE_CONNECTOR_VGA, }, }, { .compatible = "ti,ths8134", .data = &(const struct simple_bridge_info) { .timings = &ti_ths8134_bridge_timings, .connector_type = DRM_MODE_CONNECTOR_VGA, }, }, {}, }; MODULE_DEVICE_TABLE(of, simple_bridge_match); static struct platform_driver simple_bridge_driver = { .probe = simple_bridge_probe, .remove_new = simple_bridge_remove, .driver = { .name = "simple-bridge", .of_match_table = simple_bridge_match, }, }; module_platform_driver(simple_bridge_driver); MODULE_AUTHOR("Maxime Ripard <[email protected]>"); MODULE_DESCRIPTION("Simple DRM bridge driver"); MODULE_LICENSE("GPL");
linux-master
drivers/gpu/drm/bridge/simple-bridge.c
// SPDX-License-Identifier: GPL-2.0 /* * Copyright (C) 2020 Marek Vasut <[email protected]> * * Based on tc358764.c by * Andrzej Hajda <[email protected]> * Maciej Purski <[email protected]> * * Based on rpi_touchscreen.c by * Eric Anholt <[email protected]> */ #include <linux/delay.h> #include <linux/gpio/consumer.h> #include <linux/mod_devicetable.h> #include <linux/module.h> #include <linux/of_graph.h> #include <linux/regulator/consumer.h> #include <video/mipi_display.h> #include <drm/drm_atomic_helper.h> #include <drm/drm_crtc.h> #include <drm/drm_mipi_dsi.h> #include <drm/drm_of.h> #include <drm/drm_panel.h> #include <drm/drm_print.h> #include <drm/drm_probe_helper.h> /* PPI layer registers */ #define PPI_STARTPPI 0x0104 /* START control bit */ #define PPI_LPTXTIMECNT 0x0114 /* LPTX timing signal */ #define PPI_D0S_ATMR 0x0144 #define PPI_D1S_ATMR 0x0148 #define PPI_D0S_CLRSIPOCOUNT 0x0164 /* Assertion timer for Lane 0 */ #define PPI_D1S_CLRSIPOCOUNT 0x0168 /* Assertion timer for Lane 1 */ #define PPI_START_FUNCTION 1 /* DSI layer registers */ #define DSI_STARTDSI 0x0204 /* START control bit of DSI-TX */ #define DSI_LANEENABLE 0x0210 /* Enables each lane */ #define DSI_RX_START 1 /* LCDC/DPI Host Registers, based on guesswork that this matches TC358764 */ #define LCDCTRL 0x0420 /* Video Path Control */ #define LCDCTRL_MSF BIT(0) /* Magic square in RGB666 */ #define LCDCTRL_VTGEN BIT(4)/* Use chip clock for timing */ #define LCDCTRL_UNK6 BIT(6) /* Unknown */ #define LCDCTRL_EVTMODE BIT(5) /* Event mode */ #define LCDCTRL_RGB888 BIT(8) /* RGB888 mode */ #define LCDCTRL_HSPOL BIT(17) /* Polarity of HSYNC signal */ #define LCDCTRL_DEPOL BIT(18) /* Polarity of DE signal */ #define LCDCTRL_VSPOL BIT(19) /* Polarity of VSYNC signal */ #define LCDCTRL_VSDELAY(v) (((v) & 0xfff) << 20) /* VSYNC delay */ /* SPI Master Registers */ #define SPICMR 0x0450 #define SPITCR 0x0454 /* System Controller Registers */ #define SYSCTRL 0x0464 /* System registers */ #define LPX_PERIOD 3 /* Lane enable PPI and DSI register bits */ #define LANEENABLE_CLEN BIT(0) #define LANEENABLE_L0EN BIT(1) #define LANEENABLE_L1EN BIT(2) struct tc358762 { struct device *dev; struct drm_bridge bridge; struct regulator *regulator; struct drm_bridge *panel_bridge; struct gpio_desc *reset_gpio; struct drm_display_mode mode; bool pre_enabled; int error; }; static int tc358762_clear_error(struct tc358762 *ctx) { int ret = ctx->error; ctx->error = 0; return ret; } static void tc358762_write(struct tc358762 *ctx, u16 addr, u32 val) { struct mipi_dsi_device *dsi = to_mipi_dsi_device(ctx->dev); ssize_t ret; u8 data[6]; if (ctx->error) return; data[0] = addr; data[1] = addr >> 8; data[2] = val; data[3] = val >> 8; data[4] = val >> 16; data[5] = val >> 24; ret = mipi_dsi_generic_write(dsi, data, sizeof(data)); if (ret < 0) ctx->error = ret; } static inline struct tc358762 *bridge_to_tc358762(struct drm_bridge *bridge) { return container_of(bridge, struct tc358762, bridge); } static int tc358762_init(struct tc358762 *ctx) { u32 lcdctrl; tc358762_write(ctx, DSI_LANEENABLE, LANEENABLE_L0EN | LANEENABLE_CLEN); tc358762_write(ctx, PPI_D0S_CLRSIPOCOUNT, 5); tc358762_write(ctx, PPI_D1S_CLRSIPOCOUNT, 5); tc358762_write(ctx, PPI_D0S_ATMR, 0); tc358762_write(ctx, PPI_D1S_ATMR, 0); tc358762_write(ctx, PPI_LPTXTIMECNT, LPX_PERIOD); tc358762_write(ctx, SPICMR, 0x00); lcdctrl = LCDCTRL_VSDELAY(1) | LCDCTRL_RGB888 | LCDCTRL_UNK6 | LCDCTRL_VTGEN; if (ctx->mode.flags & DRM_MODE_FLAG_NHSYNC) lcdctrl |= LCDCTRL_HSPOL; if (ctx->mode.flags & DRM_MODE_FLAG_NVSYNC) lcdctrl |= LCDCTRL_VSPOL; tc358762_write(ctx, LCDCTRL, lcdctrl); tc358762_write(ctx, SYSCTRL, 0x040f); msleep(100); tc358762_write(ctx, PPI_STARTPPI, PPI_START_FUNCTION); tc358762_write(ctx, DSI_STARTDSI, DSI_RX_START); msleep(100); return tc358762_clear_error(ctx); } static void tc358762_post_disable(struct drm_bridge *bridge, struct drm_bridge_state *state) { struct tc358762 *ctx = bridge_to_tc358762(bridge); int ret; /* * The post_disable hook might be called multiple times. * We want to avoid regulator imbalance below. */ if (!ctx->pre_enabled) return; ctx->pre_enabled = false; if (ctx->reset_gpio) gpiod_set_value_cansleep(ctx->reset_gpio, 0); ret = regulator_disable(ctx->regulator); if (ret < 0) dev_err(ctx->dev, "error disabling regulators (%d)\n", ret); } static void tc358762_pre_enable(struct drm_bridge *bridge, struct drm_bridge_state *state) { struct tc358762 *ctx = bridge_to_tc358762(bridge); int ret; ret = regulator_enable(ctx->regulator); if (ret < 0) dev_err(ctx->dev, "error enabling regulators (%d)\n", ret); if (ctx->reset_gpio) { gpiod_set_value_cansleep(ctx->reset_gpio, 1); usleep_range(5000, 10000); } ctx->pre_enabled = true; } static void tc358762_enable(struct drm_bridge *bridge, struct drm_bridge_state *state) { struct tc358762 *ctx = bridge_to_tc358762(bridge); int ret; ret = tc358762_init(ctx); if (ret < 0) dev_err(ctx->dev, "error initializing bridge (%d)\n", ret); } static int tc358762_attach(struct drm_bridge *bridge, enum drm_bridge_attach_flags flags) { struct tc358762 *ctx = bridge_to_tc358762(bridge); return drm_bridge_attach(bridge->encoder, ctx->panel_bridge, bridge, flags); } static void tc358762_bridge_mode_set(struct drm_bridge *bridge, const struct drm_display_mode *mode, const struct drm_display_mode *adj) { struct tc358762 *ctx = bridge_to_tc358762(bridge); drm_mode_copy(&ctx->mode, mode); } static const struct drm_bridge_funcs tc358762_bridge_funcs = { .atomic_post_disable = tc358762_post_disable, .atomic_pre_enable = tc358762_pre_enable, .atomic_enable = tc358762_enable, .atomic_duplicate_state = drm_atomic_helper_bridge_duplicate_state, .atomic_destroy_state = drm_atomic_helper_bridge_destroy_state, .atomic_reset = drm_atomic_helper_bridge_reset, .attach = tc358762_attach, .mode_set = tc358762_bridge_mode_set, }; static int tc358762_parse_dt(struct tc358762 *ctx) { struct drm_bridge *panel_bridge; struct device *dev = ctx->dev; panel_bridge = devm_drm_of_get_bridge(dev, dev->of_node, 1, 0); if (IS_ERR(panel_bridge)) return PTR_ERR(panel_bridge); ctx->panel_bridge = panel_bridge; /* Reset GPIO is optional */ ctx->reset_gpio = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_LOW); if (IS_ERR(ctx->reset_gpio)) return PTR_ERR(ctx->reset_gpio); return 0; } static int tc358762_configure_regulators(struct tc358762 *ctx) { ctx->regulator = devm_regulator_get(ctx->dev, "vddc"); if (IS_ERR(ctx->regulator)) return PTR_ERR(ctx->regulator); return 0; } static int tc358762_probe(struct mipi_dsi_device *dsi) { struct device *dev = &dsi->dev; struct tc358762 *ctx; int ret; ctx = devm_kzalloc(dev, sizeof(struct tc358762), GFP_KERNEL); if (!ctx) return -ENOMEM; mipi_dsi_set_drvdata(dsi, ctx); ctx->dev = dev; ctx->pre_enabled = false; /* TODO: Find out how to get dual-lane mode working */ dsi->lanes = 1; dsi->format = MIPI_DSI_FMT_RGB888; dsi->mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_SYNC_PULSE | MIPI_DSI_MODE_LPM | MIPI_DSI_MODE_VIDEO_HSE; ret = tc358762_parse_dt(ctx); if (ret < 0) return ret; ret = tc358762_configure_regulators(ctx); if (ret < 0) return ret; ctx->bridge.funcs = &tc358762_bridge_funcs; ctx->bridge.type = DRM_MODE_CONNECTOR_DPI; ctx->bridge.of_node = dev->of_node; ctx->bridge.pre_enable_prev_first = true; drm_bridge_add(&ctx->bridge); ret = mipi_dsi_attach(dsi); if (ret < 0) { drm_bridge_remove(&ctx->bridge); dev_err(dev, "failed to attach dsi\n"); } return ret; } static void tc358762_remove(struct mipi_dsi_device *dsi) { struct tc358762 *ctx = mipi_dsi_get_drvdata(dsi); mipi_dsi_detach(dsi); drm_bridge_remove(&ctx->bridge); } static const struct of_device_id tc358762_of_match[] = { { .compatible = "toshiba,tc358762" }, { } }; MODULE_DEVICE_TABLE(of, tc358762_of_match); static struct mipi_dsi_driver tc358762_driver = { .probe = tc358762_probe, .remove = tc358762_remove, .driver = { .name = "tc358762", .of_match_table = tc358762_of_match, }, }; module_mipi_dsi_driver(tc358762_driver); MODULE_AUTHOR("Marek Vasut <[email protected]>"); MODULE_DESCRIPTION("MIPI-DSI based Driver for TC358762 DSI/DPI Bridge"); MODULE_LICENSE("GPL v2");
linux-master
drivers/gpu/drm/bridge/tc358762.c
// SPDX-License-Identifier: GPL-2.0 /* * Copyright (C) 2020 Texas Instruments Incorporated - https://www.ti.com * Author: Peter Ujfalusi <[email protected]> */ #include <linux/clk.h> #include <linux/device.h> #include <linux/gpio/consumer.h> #include <linux/i2c.h> #include <linux/kernel.h> #include <linux/media-bus-format.h> #include <linux/minmax.h> #include <linux/module.h> #include <linux/regmap.h> #include <linux/regulator/consumer.h> #include <linux/slab.h> #include <drm/drm_atomic_helper.h> #include <drm/drm_drv.h> #include <drm/drm_mipi_dsi.h> #include <drm/drm_of.h> #include <drm/drm_panel.h> #include <video/mipi_display.h> #include <video/videomode.h> /* Global (16-bit addressable) */ #define TC358768_CHIPID 0x0000 #define TC358768_SYSCTL 0x0002 #define TC358768_CONFCTL 0x0004 #define TC358768_VSDLY 0x0006 #define TC358768_DATAFMT 0x0008 #define TC358768_GPIOEN 0x000E #define TC358768_GPIODIR 0x0010 #define TC358768_GPIOIN 0x0012 #define TC358768_GPIOOUT 0x0014 #define TC358768_PLLCTL0 0x0016 #define TC358768_PLLCTL1 0x0018 #define TC358768_CMDBYTE 0x0022 #define TC358768_PP_MISC 0x0032 #define TC358768_DSITX_DT 0x0050 #define TC358768_FIFOSTATUS 0x00F8 /* Debug (16-bit addressable) */ #define TC358768_VBUFCTRL 0x00E0 #define TC358768_DBG_WIDTH 0x00E2 #define TC358768_DBG_VBLANK 0x00E4 #define TC358768_DBG_DATA 0x00E8 /* TX PHY (32-bit addressable) */ #define TC358768_CLW_DPHYCONTTX 0x0100 #define TC358768_D0W_DPHYCONTTX 0x0104 #define TC358768_D1W_DPHYCONTTX 0x0108 #define TC358768_D2W_DPHYCONTTX 0x010C #define TC358768_D3W_DPHYCONTTX 0x0110 #define TC358768_CLW_CNTRL 0x0140 #define TC358768_D0W_CNTRL 0x0144 #define TC358768_D1W_CNTRL 0x0148 #define TC358768_D2W_CNTRL 0x014C #define TC358768_D3W_CNTRL 0x0150 /* TX PPI (32-bit addressable) */ #define TC358768_STARTCNTRL 0x0204 #define TC358768_DSITXSTATUS 0x0208 #define TC358768_LINEINITCNT 0x0210 #define TC358768_LPTXTIMECNT 0x0214 #define TC358768_TCLK_HEADERCNT 0x0218 #define TC358768_TCLK_TRAILCNT 0x021C #define TC358768_THS_HEADERCNT 0x0220 #define TC358768_TWAKEUP 0x0224 #define TC358768_TCLK_POSTCNT 0x0228 #define TC358768_THS_TRAILCNT 0x022C #define TC358768_HSTXVREGCNT 0x0230 #define TC358768_HSTXVREGEN 0x0234 #define TC358768_TXOPTIONCNTRL 0x0238 #define TC358768_BTACNTRL1 0x023C /* TX CTRL (32-bit addressable) */ #define TC358768_DSI_CONTROL 0x040C #define TC358768_DSI_STATUS 0x0410 #define TC358768_DSI_INT 0x0414 #define TC358768_DSI_INT_ENA 0x0418 #define TC358768_DSICMD_RDFIFO 0x0430 #define TC358768_DSI_ACKERR 0x0434 #define TC358768_DSI_ACKERR_INTENA 0x0438 #define TC358768_DSI_ACKERR_HALT 0x043c #define TC358768_DSI_RXERR 0x0440 #define TC358768_DSI_RXERR_INTENA 0x0444 #define TC358768_DSI_RXERR_HALT 0x0448 #define TC358768_DSI_ERR 0x044C #define TC358768_DSI_ERR_INTENA 0x0450 #define TC358768_DSI_ERR_HALT 0x0454 #define TC358768_DSI_CONFW 0x0500 #define TC358768_DSI_LPCMD 0x0500 #define TC358768_DSI_RESET 0x0504 #define TC358768_DSI_INT_CLR 0x050C #define TC358768_DSI_START 0x0518 /* DSITX CTRL (16-bit addressable) */ #define TC358768_DSICMD_TX 0x0600 #define TC358768_DSICMD_TYPE 0x0602 #define TC358768_DSICMD_WC 0x0604 #define TC358768_DSICMD_WD0 0x0610 #define TC358768_DSICMD_WD1 0x0612 #define TC358768_DSICMD_WD2 0x0614 #define TC358768_DSICMD_WD3 0x0616 #define TC358768_DSI_EVENT 0x0620 #define TC358768_DSI_VSW 0x0622 #define TC358768_DSI_VBPR 0x0624 #define TC358768_DSI_VACT 0x0626 #define TC358768_DSI_HSW 0x0628 #define TC358768_DSI_HBPR 0x062A #define TC358768_DSI_HACT 0x062C /* TC358768_DSI_CONTROL (0x040C) register */ #define TC358768_DSI_CONTROL_DIS_MODE BIT(15) #define TC358768_DSI_CONTROL_TXMD BIT(7) #define TC358768_DSI_CONTROL_HSCKMD BIT(5) #define TC358768_DSI_CONTROL_EOTDIS BIT(0) /* TC358768_DSI_CONFW (0x0500) register */ #define TC358768_DSI_CONFW_MODE_SET (5 << 29) #define TC358768_DSI_CONFW_MODE_CLR (6 << 29) #define TC358768_DSI_CONFW_ADDR_DSI_CONTROL (0x3 << 24) static const char * const tc358768_supplies[] = { "vddc", "vddmipi", "vddio" }; struct tc358768_dsi_output { struct mipi_dsi_device *dev; struct drm_panel *panel; struct drm_bridge *bridge; }; struct tc358768_priv { struct device *dev; struct regmap *regmap; struct gpio_desc *reset_gpio; struct regulator_bulk_data supplies[ARRAY_SIZE(tc358768_supplies)]; struct clk *refclk; int enabled; int error; struct mipi_dsi_host dsi_host; struct drm_bridge bridge; struct tc358768_dsi_output output; u32 pd_lines; /* number of Parallel Port Input Data Lines */ u32 dsi_lanes; /* number of DSI Lanes */ u32 dsi_bpp; /* number of Bits Per Pixel over DSI */ /* Parameters for PLL programming */ u32 fbd; /* PLL feedback divider */ u32 prd; /* PLL input divider */ u32 frs; /* PLL Freqency range for HSCK (post divider) */ u32 dsiclk; /* pll_clk / 2 */ }; static inline struct tc358768_priv *dsi_host_to_tc358768(struct mipi_dsi_host *host) { return container_of(host, struct tc358768_priv, dsi_host); } static inline struct tc358768_priv *bridge_to_tc358768(struct drm_bridge *bridge) { return container_of(bridge, struct tc358768_priv, bridge); } static int tc358768_clear_error(struct tc358768_priv *priv) { int ret = priv->error; priv->error = 0; return ret; } static void tc358768_write(struct tc358768_priv *priv, u32 reg, u32 val) { /* work around https://gcc.gnu.org/bugzilla/show_bug.cgi?id=81715 */ int tmpval = val; size_t count = 2; if (priv->error) return; /* 16-bit register? */ if (reg < 0x100 || reg >= 0x600) count = 1; priv->error = regmap_bulk_write(priv->regmap, reg, &tmpval, count); } static void tc358768_read(struct tc358768_priv *priv, u32 reg, u32 *val) { size_t count = 2; if (priv->error) return; /* 16-bit register? */ if (reg < 0x100 || reg >= 0x600) { *val = 0; count = 1; } priv->error = regmap_bulk_read(priv->regmap, reg, val, count); } static void tc358768_update_bits(struct tc358768_priv *priv, u32 reg, u32 mask, u32 val) { u32 tmp, orig; tc358768_read(priv, reg, &orig); tmp = orig & ~mask; tmp |= val & mask; if (tmp != orig) tc358768_write(priv, reg, tmp); } static int tc358768_sw_reset(struct tc358768_priv *priv) { /* Assert Reset */ tc358768_write(priv, TC358768_SYSCTL, 1); /* Release Reset, Exit Sleep */ tc358768_write(priv, TC358768_SYSCTL, 0); return tc358768_clear_error(priv); } static void tc358768_hw_enable(struct tc358768_priv *priv) { int ret; if (priv->enabled) return; ret = clk_prepare_enable(priv->refclk); if (ret < 0) dev_err(priv->dev, "error enabling refclk (%d)\n", ret); ret = regulator_bulk_enable(ARRAY_SIZE(priv->supplies), priv->supplies); if (ret < 0) dev_err(priv->dev, "error enabling regulators (%d)\n", ret); if (priv->reset_gpio) usleep_range(200, 300); /* * The RESX is active low (GPIO_ACTIVE_LOW). * DEASSERT (value = 0) the reset_gpio to enable the chip */ gpiod_set_value_cansleep(priv->reset_gpio, 0); /* wait for encoder clocks to stabilize */ usleep_range(1000, 2000); priv->enabled = true; } static void tc358768_hw_disable(struct tc358768_priv *priv) { int ret; if (!priv->enabled) return; /* * The RESX is active low (GPIO_ACTIVE_LOW). * ASSERT (value = 1) the reset_gpio to disable the chip */ gpiod_set_value_cansleep(priv->reset_gpio, 1); ret = regulator_bulk_disable(ARRAY_SIZE(priv->supplies), priv->supplies); if (ret < 0) dev_err(priv->dev, "error disabling regulators (%d)\n", ret); clk_disable_unprepare(priv->refclk); priv->enabled = false; } static u32 tc358768_pll_to_pclk(struct tc358768_priv *priv, u32 pll_clk) { return (u32)div_u64((u64)pll_clk * priv->dsi_lanes, priv->dsi_bpp); } static u32 tc358768_pclk_to_pll(struct tc358768_priv *priv, u32 pclk) { return (u32)div_u64((u64)pclk * priv->dsi_bpp, priv->dsi_lanes); } static int tc358768_calc_pll(struct tc358768_priv *priv, const struct drm_display_mode *mode, bool verify_only) { static const u32 frs_limits[] = { 1000000000, 500000000, 250000000, 125000000, 62500000 }; unsigned long refclk; u32 prd, target_pll, i, max_pll, min_pll; u32 frs, best_diff, best_pll, best_prd, best_fbd; target_pll = tc358768_pclk_to_pll(priv, mode->clock * 1000); /* pll_clk = RefClk * [(FBD + 1)/ (PRD + 1)] * [1 / (2^FRS)] */ for (i = 0; i < ARRAY_SIZE(frs_limits); i++) if (target_pll >= frs_limits[i]) break; if (i == ARRAY_SIZE(frs_limits) || i == 0) return -EINVAL; frs = i - 1; max_pll = frs_limits[i - 1]; min_pll = frs_limits[i]; refclk = clk_get_rate(priv->refclk); best_diff = UINT_MAX; best_pll = 0; best_prd = 0; best_fbd = 0; for (prd = 0; prd < 16; ++prd) { u32 divisor = (prd + 1) * (1 << frs); u32 fbd; for (fbd = 0; fbd < 512; ++fbd) { u32 pll, diff, pll_in; pll = (u32)div_u64((u64)refclk * (fbd + 1), divisor); if (pll >= max_pll || pll < min_pll) continue; pll_in = (u32)div_u64((u64)refclk, prd + 1); if (pll_in < 4000000) continue; diff = max(pll, target_pll) - min(pll, target_pll); if (diff < best_diff) { best_diff = diff; best_pll = pll; best_prd = prd; best_fbd = fbd; if (best_diff == 0) goto found; } } } if (best_diff == UINT_MAX) { dev_err(priv->dev, "could not find suitable PLL setup\n"); return -EINVAL; } found: if (verify_only) return 0; priv->fbd = best_fbd; priv->prd = best_prd; priv->frs = frs; priv->dsiclk = best_pll / 2; return 0; } static int tc358768_dsi_host_attach(struct mipi_dsi_host *host, struct mipi_dsi_device *dev) { struct tc358768_priv *priv = dsi_host_to_tc358768(host); struct drm_bridge *bridge; struct drm_panel *panel; struct device_node *ep; int ret; if (dev->lanes > 4) { dev_err(priv->dev, "unsupported number of data lanes(%u)\n", dev->lanes); return -EINVAL; } /* * tc358768 supports both Video and Pulse mode, but the driver only * implements Video (event) mode currently */ if (!(dev->mode_flags & MIPI_DSI_MODE_VIDEO)) { dev_err(priv->dev, "Only MIPI_DSI_MODE_VIDEO is supported\n"); return -ENOTSUPP; } /* * tc358768 supports RGB888, RGB666, RGB666_PACKED and RGB565, but only * RGB888 is verified. */ if (dev->format != MIPI_DSI_FMT_RGB888) { dev_warn(priv->dev, "Only MIPI_DSI_FMT_RGB888 tested!\n"); return -ENOTSUPP; } ret = drm_of_find_panel_or_bridge(host->dev->of_node, 1, 0, &panel, &bridge); if (ret) return ret; if (panel) { bridge = drm_panel_bridge_add_typed(panel, DRM_MODE_CONNECTOR_DSI); if (IS_ERR(bridge)) return PTR_ERR(bridge); } priv->output.dev = dev; priv->output.bridge = bridge; priv->output.panel = panel; priv->dsi_lanes = dev->lanes; priv->dsi_bpp = mipi_dsi_pixel_format_to_bpp(dev->format); /* get input ep (port0/endpoint0) */ ret = -EINVAL; ep = of_graph_get_endpoint_by_regs(host->dev->of_node, 0, 0); if (ep) { ret = of_property_read_u32(ep, "data-lines", &priv->pd_lines); of_node_put(ep); } if (ret) priv->pd_lines = priv->dsi_bpp; drm_bridge_add(&priv->bridge); return 0; } static int tc358768_dsi_host_detach(struct mipi_dsi_host *host, struct mipi_dsi_device *dev) { struct tc358768_priv *priv = dsi_host_to_tc358768(host); drm_bridge_remove(&priv->bridge); if (priv->output.panel) drm_panel_bridge_remove(priv->output.bridge); return 0; } static ssize_t tc358768_dsi_host_transfer(struct mipi_dsi_host *host, const struct mipi_dsi_msg *msg) { struct tc358768_priv *priv = dsi_host_to_tc358768(host); struct mipi_dsi_packet packet; int ret; if (!priv->enabled) { dev_err(priv->dev, "Bridge is not enabled\n"); return -ENODEV; } if (msg->rx_len) { dev_warn(priv->dev, "MIPI rx is not supported\n"); return -ENOTSUPP; } if (msg->tx_len > 8) { dev_warn(priv->dev, "Maximum 8 byte MIPI tx is supported\n"); return -ENOTSUPP; } ret = mipi_dsi_create_packet(&packet, msg); if (ret) return ret; if (mipi_dsi_packet_format_is_short(msg->type)) { tc358768_write(priv, TC358768_DSICMD_TYPE, (0x10 << 8) | (packet.header[0] & 0x3f)); tc358768_write(priv, TC358768_DSICMD_WC, 0); tc358768_write(priv, TC358768_DSICMD_WD0, (packet.header[2] << 8) | packet.header[1]); } else { int i; tc358768_write(priv, TC358768_DSICMD_TYPE, (0x40 << 8) | (packet.header[0] & 0x3f)); tc358768_write(priv, TC358768_DSICMD_WC, packet.payload_length); for (i = 0; i < packet.payload_length; i += 2) { u16 val = packet.payload[i]; if (i + 1 < packet.payload_length) val |= packet.payload[i + 1] << 8; tc358768_write(priv, TC358768_DSICMD_WD0 + i, val); } } /* start transfer */ tc358768_write(priv, TC358768_DSICMD_TX, 1); ret = tc358768_clear_error(priv); if (ret) dev_warn(priv->dev, "Software disable failed: %d\n", ret); else ret = packet.size; return ret; } static const struct mipi_dsi_host_ops tc358768_dsi_host_ops = { .attach = tc358768_dsi_host_attach, .detach = tc358768_dsi_host_detach, .transfer = tc358768_dsi_host_transfer, }; static int tc358768_bridge_attach(struct drm_bridge *bridge, enum drm_bridge_attach_flags flags) { struct tc358768_priv *priv = bridge_to_tc358768(bridge); if (!drm_core_check_feature(bridge->dev, DRIVER_ATOMIC)) { dev_err(priv->dev, "needs atomic updates support\n"); return -ENOTSUPP; } return drm_bridge_attach(bridge->encoder, priv->output.bridge, bridge, flags); } static enum drm_mode_status tc358768_bridge_mode_valid(struct drm_bridge *bridge, const struct drm_display_info *info, const struct drm_display_mode *mode) { struct tc358768_priv *priv = bridge_to_tc358768(bridge); if (tc358768_calc_pll(priv, mode, true)) return MODE_CLOCK_RANGE; return MODE_OK; } static void tc358768_bridge_disable(struct drm_bridge *bridge) { struct tc358768_priv *priv = bridge_to_tc358768(bridge); int ret; /* set FrmStop */ tc358768_update_bits(priv, TC358768_PP_MISC, BIT(15), BIT(15)); /* wait at least for one frame */ msleep(50); /* clear PP_en */ tc358768_update_bits(priv, TC358768_CONFCTL, BIT(6), 0); /* set RstPtr */ tc358768_update_bits(priv, TC358768_PP_MISC, BIT(14), BIT(14)); ret = tc358768_clear_error(priv); if (ret) dev_warn(priv->dev, "Software disable failed: %d\n", ret); } static void tc358768_bridge_post_disable(struct drm_bridge *bridge) { struct tc358768_priv *priv = bridge_to_tc358768(bridge); tc358768_hw_disable(priv); } static int tc358768_setup_pll(struct tc358768_priv *priv, const struct drm_display_mode *mode) { u32 fbd, prd, frs; int ret; ret = tc358768_calc_pll(priv, mode, false); if (ret) { dev_err(priv->dev, "PLL calculation failed: %d\n", ret); return ret; } fbd = priv->fbd; prd = priv->prd; frs = priv->frs; dev_dbg(priv->dev, "PLL: refclk %lu, fbd %u, prd %u, frs %u\n", clk_get_rate(priv->refclk), fbd, prd, frs); dev_dbg(priv->dev, "PLL: pll_clk: %u, DSIClk %u, DSIByteClk %u\n", priv->dsiclk * 2, priv->dsiclk, priv->dsiclk / 4); dev_dbg(priv->dev, "PLL: pclk %u (panel: %u)\n", tc358768_pll_to_pclk(priv, priv->dsiclk * 2), mode->clock * 1000); /* PRD[15:12] FBD[8:0] */ tc358768_write(priv, TC358768_PLLCTL0, (prd << 12) | fbd); /* FRS[11:10] LBWS[9:8] CKEN[4] RESETB[1] EN[0] */ tc358768_write(priv, TC358768_PLLCTL1, (frs << 10) | (0x2 << 8) | BIT(1) | BIT(0)); /* wait for lock */ usleep_range(1000, 2000); /* FRS[11:10] LBWS[9:8] CKEN[4] PLL_CKEN[4] RESETB[1] EN[0] */ tc358768_write(priv, TC358768_PLLCTL1, (frs << 10) | (0x2 << 8) | BIT(4) | BIT(1) | BIT(0)); return tc358768_clear_error(priv); } #define TC358768_PRECISION 1000 static u32 tc358768_ns_to_cnt(u32 ns, u32 period_nsk) { return (ns * TC358768_PRECISION + period_nsk) / period_nsk; } static u32 tc358768_to_ns(u32 nsk) { return (nsk / TC358768_PRECISION); } static void tc358768_bridge_pre_enable(struct drm_bridge *bridge) { struct tc358768_priv *priv = bridge_to_tc358768(bridge); struct mipi_dsi_device *dsi_dev = priv->output.dev; unsigned long mode_flags = dsi_dev->mode_flags; u32 val, val2, lptxcnt, hact, data_type; s32 raw_val; const struct drm_display_mode *mode; u32 dsibclk_nsk, dsiclk_nsk, ui_nsk; u32 dsiclk, dsibclk, video_start; const u32 internal_delay = 40; int ret, i; if (mode_flags & MIPI_DSI_CLOCK_NON_CONTINUOUS) { dev_warn_once(priv->dev, "Non-continuous mode unimplemented, falling back to continuous\n"); mode_flags &= ~MIPI_DSI_CLOCK_NON_CONTINUOUS; } tc358768_hw_enable(priv); ret = tc358768_sw_reset(priv); if (ret) { dev_err(priv->dev, "Software reset failed: %d\n", ret); tc358768_hw_disable(priv); return; } mode = &bridge->encoder->crtc->state->adjusted_mode; ret = tc358768_setup_pll(priv, mode); if (ret) { dev_err(priv->dev, "PLL setup failed: %d\n", ret); tc358768_hw_disable(priv); return; } dsiclk = priv->dsiclk; dsibclk = dsiclk / 4; /* Data Format Control Register */ val = BIT(2) | BIT(1) | BIT(0); /* rdswap_en | dsitx_en | txdt_en */ switch (dsi_dev->format) { case MIPI_DSI_FMT_RGB888: val |= (0x3 << 4); hact = mode->hdisplay * 3; video_start = (mode->htotal - mode->hsync_start) * 3; data_type = MIPI_DSI_PACKED_PIXEL_STREAM_24; break; case MIPI_DSI_FMT_RGB666: val |= (0x4 << 4); hact = mode->hdisplay * 3; video_start = (mode->htotal - mode->hsync_start) * 3; data_type = MIPI_DSI_PACKED_PIXEL_STREAM_18; break; case MIPI_DSI_FMT_RGB666_PACKED: val |= (0x4 << 4) | BIT(3); hact = mode->hdisplay * 18 / 8; video_start = (mode->htotal - mode->hsync_start) * 18 / 8; data_type = MIPI_DSI_PIXEL_STREAM_3BYTE_18; break; case MIPI_DSI_FMT_RGB565: val |= (0x5 << 4); hact = mode->hdisplay * 2; video_start = (mode->htotal - mode->hsync_start) * 2; data_type = MIPI_DSI_PACKED_PIXEL_STREAM_16; break; default: dev_err(priv->dev, "Invalid data format (%u)\n", dsi_dev->format); tc358768_hw_disable(priv); return; } /* VSDly[9:0] */ video_start = max(video_start, internal_delay + 1) - internal_delay; tc358768_write(priv, TC358768_VSDLY, video_start); tc358768_write(priv, TC358768_DATAFMT, val); tc358768_write(priv, TC358768_DSITX_DT, data_type); /* Enable D-PHY (HiZ->LP11) */ tc358768_write(priv, TC358768_CLW_CNTRL, 0x0000); /* Enable lanes */ for (i = 0; i < dsi_dev->lanes; i++) tc358768_write(priv, TC358768_D0W_CNTRL + i * 4, 0x0000); /* DSI Timings */ dsibclk_nsk = (u32)div_u64((u64)1000000000 * TC358768_PRECISION, dsibclk); dsiclk_nsk = (u32)div_u64((u64)1000000000 * TC358768_PRECISION, dsiclk); ui_nsk = dsiclk_nsk / 2; dev_dbg(priv->dev, "dsiclk_nsk: %u\n", dsiclk_nsk); dev_dbg(priv->dev, "ui_nsk: %u\n", ui_nsk); dev_dbg(priv->dev, "dsibclk_nsk: %u\n", dsibclk_nsk); /* LP11 > 100us for D-PHY Rx Init */ val = tc358768_ns_to_cnt(100 * 1000, dsibclk_nsk) - 1; dev_dbg(priv->dev, "LINEINITCNT: 0x%x\n", val); tc358768_write(priv, TC358768_LINEINITCNT, val); /* LPTimeCnt > 50ns */ val = tc358768_ns_to_cnt(50, dsibclk_nsk) - 1; lptxcnt = val; dev_dbg(priv->dev, "LPTXTIMECNT: 0x%x\n", val); tc358768_write(priv, TC358768_LPTXTIMECNT, val); /* 38ns < TCLK_PREPARE < 95ns */ val = tc358768_ns_to_cnt(65, dsibclk_nsk) - 1; /* TCLK_PREPARE + TCLK_ZERO > 300ns */ val2 = tc358768_ns_to_cnt(300 - tc358768_to_ns(2 * ui_nsk), dsibclk_nsk) - 2; val |= val2 << 8; dev_dbg(priv->dev, "TCLK_HEADERCNT: 0x%x\n", val); tc358768_write(priv, TC358768_TCLK_HEADERCNT, val); /* TCLK_TRAIL > 60ns AND TEOT <= 105 ns + 12*UI */ raw_val = tc358768_ns_to_cnt(60 + tc358768_to_ns(2 * ui_nsk), dsibclk_nsk) - 5; val = clamp(raw_val, 0, 127); dev_dbg(priv->dev, "TCLK_TRAILCNT: 0x%x\n", val); tc358768_write(priv, TC358768_TCLK_TRAILCNT, val); /* 40ns + 4*UI < THS_PREPARE < 85ns + 6*UI */ val = 50 + tc358768_to_ns(4 * ui_nsk); val = tc358768_ns_to_cnt(val, dsibclk_nsk) - 1; /* THS_PREPARE + THS_ZERO > 145ns + 10*UI */ raw_val = tc358768_ns_to_cnt(145 - tc358768_to_ns(3 * ui_nsk), dsibclk_nsk) - 10; val2 = clamp(raw_val, 0, 127); val |= val2 << 8; dev_dbg(priv->dev, "THS_HEADERCNT: 0x%x\n", val); tc358768_write(priv, TC358768_THS_HEADERCNT, val); /* TWAKEUP > 1ms in lptxcnt steps */ val = tc358768_ns_to_cnt(1020000, dsibclk_nsk); val = val / (lptxcnt + 1) - 1; dev_dbg(priv->dev, "TWAKEUP: 0x%x\n", val); tc358768_write(priv, TC358768_TWAKEUP, val); /* TCLK_POSTCNT > 60ns + 52*UI */ val = tc358768_ns_to_cnt(60 + tc358768_to_ns(52 * ui_nsk), dsibclk_nsk) - 3; dev_dbg(priv->dev, "TCLK_POSTCNT: 0x%x\n", val); tc358768_write(priv, TC358768_TCLK_POSTCNT, val); /* max(60ns + 4*UI, 8*UI) < THS_TRAILCNT < 105ns + 12*UI */ raw_val = tc358768_ns_to_cnt(60 + tc358768_to_ns(18 * ui_nsk), dsibclk_nsk) - 4; val = clamp(raw_val, 0, 15); dev_dbg(priv->dev, "THS_TRAILCNT: 0x%x\n", val); tc358768_write(priv, TC358768_THS_TRAILCNT, val); val = BIT(0); for (i = 0; i < dsi_dev->lanes; i++) val |= BIT(i + 1); tc358768_write(priv, TC358768_HSTXVREGEN, val); if (!(mode_flags & MIPI_DSI_CLOCK_NON_CONTINUOUS)) tc358768_write(priv, TC358768_TXOPTIONCNTRL, 0x1); /* TXTAGOCNT[26:16] RXTASURECNT[10:0] */ val = tc358768_to_ns((lptxcnt + 1) * dsibclk_nsk * 4); val = tc358768_ns_to_cnt(val, dsibclk_nsk) / 4 - 1; val2 = tc358768_ns_to_cnt(tc358768_to_ns((lptxcnt + 1) * dsibclk_nsk), dsibclk_nsk) - 2; val = val << 16 | val2; dev_dbg(priv->dev, "BTACNTRL1: 0x%x\n", val); tc358768_write(priv, TC358768_BTACNTRL1, val); /* START[0] */ tc358768_write(priv, TC358768_STARTCNTRL, 1); if (dsi_dev->mode_flags & MIPI_DSI_MODE_VIDEO_SYNC_PULSE) { /* Set pulse mode */ tc358768_write(priv, TC358768_DSI_EVENT, 0); /* vact */ tc358768_write(priv, TC358768_DSI_VACT, mode->vdisplay); /* vsw */ tc358768_write(priv, TC358768_DSI_VSW, mode->vsync_end - mode->vsync_start); /* vbp */ tc358768_write(priv, TC358768_DSI_VBPR, mode->vtotal - mode->vsync_end); /* hsw * byteclk * ndl / pclk */ val = (u32)div_u64((mode->hsync_end - mode->hsync_start) * ((u64)priv->dsiclk / 4) * priv->dsi_lanes, mode->clock * 1000); tc358768_write(priv, TC358768_DSI_HSW, val); /* hbp * byteclk * ndl / pclk */ val = (u32)div_u64((mode->htotal - mode->hsync_end) * ((u64)priv->dsiclk / 4) * priv->dsi_lanes, mode->clock * 1000); tc358768_write(priv, TC358768_DSI_HBPR, val); } else { /* Set event mode */ tc358768_write(priv, TC358768_DSI_EVENT, 1); /* vact */ tc358768_write(priv, TC358768_DSI_VACT, mode->vdisplay); /* vsw (+ vbp) */ tc358768_write(priv, TC358768_DSI_VSW, mode->vtotal - mode->vsync_start); /* vbp (not used in event mode) */ tc358768_write(priv, TC358768_DSI_VBPR, 0); /* (hsw + hbp) * byteclk * ndl / pclk */ val = (u32)div_u64((mode->htotal - mode->hsync_start) * ((u64)priv->dsiclk / 4) * priv->dsi_lanes, mode->clock * 1000); tc358768_write(priv, TC358768_DSI_HSW, val); /* hbp (not used in event mode) */ tc358768_write(priv, TC358768_DSI_HBPR, 0); } /* hact (bytes) */ tc358768_write(priv, TC358768_DSI_HACT, hact); /* VSYNC polarity */ if (!(mode->flags & DRM_MODE_FLAG_NVSYNC)) tc358768_update_bits(priv, TC358768_CONFCTL, BIT(5), BIT(5)); /* HSYNC polarity */ if (mode->flags & DRM_MODE_FLAG_PHSYNC) tc358768_update_bits(priv, TC358768_PP_MISC, BIT(0), BIT(0)); /* Start DSI Tx */ tc358768_write(priv, TC358768_DSI_START, 0x1); /* Configure DSI_Control register */ val = TC358768_DSI_CONFW_MODE_CLR | TC358768_DSI_CONFW_ADDR_DSI_CONTROL; val |= TC358768_DSI_CONTROL_TXMD | TC358768_DSI_CONTROL_HSCKMD | 0x3 << 1 | TC358768_DSI_CONTROL_EOTDIS; tc358768_write(priv, TC358768_DSI_CONFW, val); val = TC358768_DSI_CONFW_MODE_SET | TC358768_DSI_CONFW_ADDR_DSI_CONTROL; val |= (dsi_dev->lanes - 1) << 1; val |= TC358768_DSI_CONTROL_TXMD; if (!(mode_flags & MIPI_DSI_CLOCK_NON_CONTINUOUS)) val |= TC358768_DSI_CONTROL_HSCKMD; if (dsi_dev->mode_flags & MIPI_DSI_MODE_NO_EOT_PACKET) val |= TC358768_DSI_CONTROL_EOTDIS; tc358768_write(priv, TC358768_DSI_CONFW, val); val = TC358768_DSI_CONFW_MODE_CLR | TC358768_DSI_CONFW_ADDR_DSI_CONTROL; val |= TC358768_DSI_CONTROL_DIS_MODE; /* DSI mode */ tc358768_write(priv, TC358768_DSI_CONFW, val); ret = tc358768_clear_error(priv); if (ret) { dev_err(priv->dev, "Bridge pre_enable failed: %d\n", ret); tc358768_bridge_disable(bridge); tc358768_bridge_post_disable(bridge); } } static void tc358768_bridge_enable(struct drm_bridge *bridge) { struct tc358768_priv *priv = bridge_to_tc358768(bridge); int ret; if (!priv->enabled) { dev_err(priv->dev, "Bridge is not enabled\n"); return; } /* clear FrmStop and RstPtr */ tc358768_update_bits(priv, TC358768_PP_MISC, 0x3 << 14, 0); /* set PP_en */ tc358768_update_bits(priv, TC358768_CONFCTL, BIT(6), BIT(6)); ret = tc358768_clear_error(priv); if (ret) { dev_err(priv->dev, "Bridge enable failed: %d\n", ret); tc358768_bridge_disable(bridge); tc358768_bridge_post_disable(bridge); } } #define MAX_INPUT_SEL_FORMATS 1 static u32 * tc358768_atomic_get_input_bus_fmts(struct drm_bridge *bridge, struct drm_bridge_state *bridge_state, struct drm_crtc_state *crtc_state, struct drm_connector_state *conn_state, u32 output_fmt, unsigned int *num_input_fmts) { struct tc358768_priv *priv = bridge_to_tc358768(bridge); u32 *input_fmts; *num_input_fmts = 0; input_fmts = kcalloc(MAX_INPUT_SEL_FORMATS, sizeof(*input_fmts), GFP_KERNEL); if (!input_fmts) return NULL; switch (priv->pd_lines) { case 16: input_fmts[0] = MEDIA_BUS_FMT_RGB565_1X16; break; case 18: input_fmts[0] = MEDIA_BUS_FMT_RGB666_1X18; break; default: case 24: input_fmts[0] = MEDIA_BUS_FMT_RGB888_1X24; break; } *num_input_fmts = MAX_INPUT_SEL_FORMATS; return input_fmts; } static const struct drm_bridge_funcs tc358768_bridge_funcs = { .attach = tc358768_bridge_attach, .mode_valid = tc358768_bridge_mode_valid, .pre_enable = tc358768_bridge_pre_enable, .enable = tc358768_bridge_enable, .disable = tc358768_bridge_disable, .post_disable = tc358768_bridge_post_disable, .atomic_duplicate_state = drm_atomic_helper_bridge_duplicate_state, .atomic_destroy_state = drm_atomic_helper_bridge_destroy_state, .atomic_reset = drm_atomic_helper_bridge_reset, .atomic_get_input_bus_fmts = tc358768_atomic_get_input_bus_fmts, }; static const struct drm_bridge_timings default_tc358768_timings = { .input_bus_flags = DRM_BUS_FLAG_PIXDATA_SAMPLE_POSEDGE | DRM_BUS_FLAG_SYNC_SAMPLE_NEGEDGE | DRM_BUS_FLAG_DE_HIGH, }; static bool tc358768_is_reserved_reg(unsigned int reg) { switch (reg) { case 0x114 ... 0x13f: case 0x200: case 0x20c: case 0x400 ... 0x408: case 0x41c ... 0x42f: return true; default: return false; } } static bool tc358768_writeable_reg(struct device *dev, unsigned int reg) { if (tc358768_is_reserved_reg(reg)) return false; switch (reg) { case TC358768_CHIPID: case TC358768_FIFOSTATUS: case TC358768_DSITXSTATUS ... (TC358768_DSITXSTATUS + 2): case TC358768_DSI_CONTROL ... (TC358768_DSI_INT_ENA + 2): case TC358768_DSICMD_RDFIFO ... (TC358768_DSI_ERR_HALT + 2): return false; default: return true; } } static bool tc358768_readable_reg(struct device *dev, unsigned int reg) { if (tc358768_is_reserved_reg(reg)) return false; switch (reg) { case TC358768_STARTCNTRL: case TC358768_DSI_CONFW ... (TC358768_DSI_CONFW + 2): case TC358768_DSI_INT_CLR ... (TC358768_DSI_INT_CLR + 2): case TC358768_DSI_START ... (TC358768_DSI_START + 2): case TC358768_DBG_DATA: return false; default: return true; } } static const struct regmap_config tc358768_regmap_config = { .name = "tc358768", .reg_bits = 16, .val_bits = 16, .max_register = TC358768_DSI_HACT, .cache_type = REGCACHE_NONE, .writeable_reg = tc358768_writeable_reg, .readable_reg = tc358768_readable_reg, .reg_format_endian = REGMAP_ENDIAN_BIG, .val_format_endian = REGMAP_ENDIAN_BIG, }; static const struct i2c_device_id tc358768_i2c_ids[] = { { "tc358768", 0 }, { "tc358778", 0 }, { } }; MODULE_DEVICE_TABLE(i2c, tc358768_i2c_ids); static const struct of_device_id tc358768_of_ids[] = { { .compatible = "toshiba,tc358768", }, { .compatible = "toshiba,tc358778", }, { } }; MODULE_DEVICE_TABLE(of, tc358768_of_ids); static int tc358768_get_regulators(struct tc358768_priv *priv) { int i, ret; for (i = 0; i < ARRAY_SIZE(priv->supplies); ++i) priv->supplies[i].supply = tc358768_supplies[i]; ret = devm_regulator_bulk_get(priv->dev, ARRAY_SIZE(priv->supplies), priv->supplies); if (ret < 0) dev_err(priv->dev, "failed to get regulators: %d\n", ret); return ret; } static int tc358768_i2c_probe(struct i2c_client *client) { struct tc358768_priv *priv; struct device *dev = &client->dev; struct device_node *np = dev->of_node; int ret; if (!np) return -ENODEV; priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL); if (!priv) return -ENOMEM; dev_set_drvdata(dev, priv); priv->dev = dev; ret = tc358768_get_regulators(priv); if (ret) return ret; priv->refclk = devm_clk_get(dev, "refclk"); if (IS_ERR(priv->refclk)) return PTR_ERR(priv->refclk); /* * RESX is low active, to disable tc358768 initially (keep in reset) * the gpio line must be LOW. This is the ASSERTED state of * GPIO_ACTIVE_LOW (GPIOD_OUT_HIGH == ASSERTED). */ priv->reset_gpio = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_HIGH); if (IS_ERR(priv->reset_gpio)) return PTR_ERR(priv->reset_gpio); priv->regmap = devm_regmap_init_i2c(client, &tc358768_regmap_config); if (IS_ERR(priv->regmap)) { dev_err(dev, "Failed to init regmap\n"); return PTR_ERR(priv->regmap); } priv->dsi_host.dev = dev; priv->dsi_host.ops = &tc358768_dsi_host_ops; priv->bridge.funcs = &tc358768_bridge_funcs; priv->bridge.timings = &default_tc358768_timings; priv->bridge.of_node = np; i2c_set_clientdata(client, priv); return mipi_dsi_host_register(&priv->dsi_host); } static void tc358768_i2c_remove(struct i2c_client *client) { struct tc358768_priv *priv = i2c_get_clientdata(client); mipi_dsi_host_unregister(&priv->dsi_host); } static struct i2c_driver tc358768_driver = { .driver = { .name = "tc358768", .of_match_table = tc358768_of_ids, }, .id_table = tc358768_i2c_ids, .probe = tc358768_i2c_probe, .remove = tc358768_i2c_remove, }; module_i2c_driver(tc358768_driver); MODULE_AUTHOR("Peter Ujfalusi <[email protected]>"); MODULE_DESCRIPTION("TC358768AXBG/TC358778XBG DSI bridge"); MODULE_LICENSE("GPL v2");
linux-master
drivers/gpu/drm/bridge/tc358768.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * Copyright (C) 2019 Renesas Electronics Corporation * Copyright (C) 2016 Laurent Pinchart <[email protected]> */ #include <linux/gpio/consumer.h> #include <linux/module.h> #include <linux/of.h> #include <linux/of_graph.h> #include <linux/platform_device.h> #include <linux/regulator/consumer.h> #include <drm/drm_atomic_helper.h> #include <drm/drm_bridge.h> #include <drm/drm_of.h> #include <drm/drm_panel.h> struct lvds_codec { struct device *dev; struct drm_bridge bridge; struct drm_bridge *panel_bridge; struct drm_bridge_timings timings; struct regulator *vcc; struct gpio_desc *powerdown_gpio; u32 connector_type; unsigned int bus_format; }; static inline struct lvds_codec *to_lvds_codec(struct drm_bridge *bridge) { return container_of(bridge, struct lvds_codec, bridge); } static int lvds_codec_attach(struct drm_bridge *bridge, enum drm_bridge_attach_flags flags) { struct lvds_codec *lvds_codec = to_lvds_codec(bridge); return drm_bridge_attach(bridge->encoder, lvds_codec->panel_bridge, bridge, flags); } static void lvds_codec_enable(struct drm_bridge *bridge) { struct lvds_codec *lvds_codec = to_lvds_codec(bridge); int ret; ret = regulator_enable(lvds_codec->vcc); if (ret) { dev_err(lvds_codec->dev, "Failed to enable regulator \"vcc\": %d\n", ret); return; } if (lvds_codec->powerdown_gpio) gpiod_set_value_cansleep(lvds_codec->powerdown_gpio, 0); } static void lvds_codec_disable(struct drm_bridge *bridge) { struct lvds_codec *lvds_codec = to_lvds_codec(bridge); int ret; if (lvds_codec->powerdown_gpio) gpiod_set_value_cansleep(lvds_codec->powerdown_gpio, 1); ret = regulator_disable(lvds_codec->vcc); if (ret) dev_err(lvds_codec->dev, "Failed to disable regulator \"vcc\": %d\n", ret); } static const struct drm_bridge_funcs funcs = { .attach = lvds_codec_attach, .enable = lvds_codec_enable, .disable = lvds_codec_disable, }; #define MAX_INPUT_SEL_FORMATS 1 static u32 * lvds_codec_atomic_get_input_bus_fmts(struct drm_bridge *bridge, struct drm_bridge_state *bridge_state, struct drm_crtc_state *crtc_state, struct drm_connector_state *conn_state, u32 output_fmt, unsigned int *num_input_fmts) { struct lvds_codec *lvds_codec = to_lvds_codec(bridge); u32 *input_fmts; *num_input_fmts = 0; input_fmts = kcalloc(MAX_INPUT_SEL_FORMATS, sizeof(*input_fmts), GFP_KERNEL); if (!input_fmts) return NULL; input_fmts[0] = lvds_codec->bus_format; *num_input_fmts = MAX_INPUT_SEL_FORMATS; return input_fmts; } static const struct drm_bridge_funcs funcs_decoder = { .attach = lvds_codec_attach, .enable = lvds_codec_enable, .disable = lvds_codec_disable, .atomic_duplicate_state = drm_atomic_helper_bridge_duplicate_state, .atomic_destroy_state = drm_atomic_helper_bridge_destroy_state, .atomic_reset = drm_atomic_helper_bridge_reset, .atomic_get_input_bus_fmts = lvds_codec_atomic_get_input_bus_fmts, }; static int lvds_codec_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; struct device_node *panel_node; struct device_node *bus_node; struct drm_panel *panel; struct lvds_codec *lvds_codec; u32 val; int ret; lvds_codec = devm_kzalloc(dev, sizeof(*lvds_codec), GFP_KERNEL); if (!lvds_codec) return -ENOMEM; lvds_codec->dev = &pdev->dev; lvds_codec->connector_type = (uintptr_t)of_device_get_match_data(dev); lvds_codec->vcc = devm_regulator_get(lvds_codec->dev, "power"); if (IS_ERR(lvds_codec->vcc)) return dev_err_probe(dev, PTR_ERR(lvds_codec->vcc), "Unable to get \"vcc\" supply\n"); lvds_codec->powerdown_gpio = devm_gpiod_get_optional(dev, "powerdown", GPIOD_OUT_HIGH); if (IS_ERR(lvds_codec->powerdown_gpio)) return dev_err_probe(dev, PTR_ERR(lvds_codec->powerdown_gpio), "powerdown GPIO failure\n"); /* Locate the panel DT node. */ panel_node = of_graph_get_remote_node(dev->of_node, 1, 0); if (!panel_node) { dev_dbg(dev, "panel DT node not found\n"); return -ENXIO; } panel = of_drm_find_panel(panel_node); of_node_put(panel_node); if (IS_ERR(panel)) { dev_dbg(dev, "panel not found, deferring probe\n"); return PTR_ERR(panel); } lvds_codec->panel_bridge = devm_drm_panel_bridge_add_typed(dev, panel, lvds_codec->connector_type); if (IS_ERR(lvds_codec->panel_bridge)) return PTR_ERR(lvds_codec->panel_bridge); lvds_codec->bridge.funcs = &funcs; /* * Decoder input LVDS format is a property of the decoder chip or even * its strapping. Handle data-mapping the same way lvds-panel does. In * case data-mapping is not present, do nothing, since there are still * legacy bindings which do not specify this property. */ if (lvds_codec->connector_type != DRM_MODE_CONNECTOR_LVDS) { bus_node = of_graph_get_endpoint_by_regs(dev->of_node, 0, 0); if (!bus_node) { dev_dbg(dev, "bus DT node not found\n"); return -ENXIO; } ret = drm_of_lvds_get_data_mapping(bus_node); of_node_put(bus_node); if (ret == -ENODEV) { dev_warn(dev, "missing 'data-mapping' DT property\n"); } else if (ret < 0) { dev_err(dev, "invalid 'data-mapping' DT property\n"); return ret; } else { lvds_codec->bus_format = ret; lvds_codec->bridge.funcs = &funcs_decoder; } } /* * Encoder might sample data on different clock edge than the display, * for example OnSemi FIN3385 has a dedicated strapping pin to select * the sampling edge. */ if (lvds_codec->connector_type == DRM_MODE_CONNECTOR_LVDS && !of_property_read_u32(dev->of_node, "pclk-sample", &val)) { lvds_codec->timings.input_bus_flags = val ? DRM_BUS_FLAG_PIXDATA_SAMPLE_POSEDGE : DRM_BUS_FLAG_PIXDATA_SAMPLE_NEGEDGE; } /* * The panel_bridge bridge is attached to the panel's of_node, * but we need a bridge attached to our of_node for our user * to look up. */ lvds_codec->bridge.of_node = dev->of_node; lvds_codec->bridge.timings = &lvds_codec->timings; drm_bridge_add(&lvds_codec->bridge); platform_set_drvdata(pdev, lvds_codec); return 0; } static void lvds_codec_remove(struct platform_device *pdev) { struct lvds_codec *lvds_codec = platform_get_drvdata(pdev); drm_bridge_remove(&lvds_codec->bridge); } static const struct of_device_id lvds_codec_match[] = { { .compatible = "lvds-decoder", .data = (void *)DRM_MODE_CONNECTOR_DPI, }, { .compatible = "lvds-encoder", .data = (void *)DRM_MODE_CONNECTOR_LVDS, }, { .compatible = "thine,thc63lvdm83d", .data = (void *)DRM_MODE_CONNECTOR_LVDS, }, {}, }; MODULE_DEVICE_TABLE(of, lvds_codec_match); static struct platform_driver lvds_codec_driver = { .probe = lvds_codec_probe, .remove_new = lvds_codec_remove, .driver = { .name = "lvds-codec", .of_match_table = lvds_codec_match, }, }; module_platform_driver(lvds_codec_driver); MODULE_AUTHOR("Laurent Pinchart <[email protected]>"); MODULE_DESCRIPTION("LVDS encoders and decoders"); MODULE_LICENSE("GPL");
linux-master
drivers/gpu/drm/bridge/lvds-codec.c
// SPDX-License-Identifier: GPL-2.0-only /* * Driver for MegaChips STDP4028 with GE B850v3 firmware (LVDS-DP) * Driver for MegaChips STDP2690 with GE B850v3 firmware (DP-DP++) * Copyright (c) 2017, Collabora Ltd. * Copyright (c) 2017, General Electric Company * This driver creates a drm_bridge and a drm_connector for the LVDS to DP++ * display bridge of the GE B850v3. There are two physical bridges on the video * signal pipeline: a STDP4028(LVDS to DP) and a STDP2690(DP to DP++). The * physical bridges are automatically configured by the input video signal, and * the driver has no access to the video processing pipeline. The driver is * only needed to read EDID from the STDP2690 and to handle HPD events from the * STDP4028. The driver communicates with both bridges over i2c. The video * signal pipeline is as follows: * * Host -> LVDS|--(STDP4028)--|DP -> DP|--(STDP2690)--|DP++ -> Video output */ #include <linux/i2c.h> #include <linux/module.h> #include <linux/of.h> #include <drm/drm_atomic.h> #include <drm/drm_atomic_helper.h> #include <drm/drm_bridge.h> #include <drm/drm_edid.h> #include <drm/drm_print.h> #include <drm/drm_probe_helper.h> #define EDID_EXT_BLOCK_CNT 0x7E #define STDP4028_IRQ_OUT_CONF_REG 0x02 #define STDP4028_DPTX_IRQ_EN_REG 0x3C #define STDP4028_DPTX_IRQ_STS_REG 0x3D #define STDP4028_DPTX_STS_REG 0x3E #define STDP4028_DPTX_DP_IRQ_EN 0x1000 #define STDP4028_DPTX_HOTPLUG_IRQ_EN 0x0400 #define STDP4028_DPTX_LINK_CH_IRQ_EN 0x2000 #define STDP4028_DPTX_IRQ_CONFIG \ (STDP4028_DPTX_LINK_CH_IRQ_EN | STDP4028_DPTX_HOTPLUG_IRQ_EN) #define STDP4028_DPTX_HOTPLUG_STS 0x0200 #define STDP4028_DPTX_LINK_STS 0x1000 #define STDP4028_CON_STATE_CONNECTED \ (STDP4028_DPTX_HOTPLUG_STS | STDP4028_DPTX_LINK_STS) #define STDP4028_DPTX_HOTPLUG_CH_STS 0x0400 #define STDP4028_DPTX_LINK_CH_STS 0x2000 #define STDP4028_DPTX_IRQ_CLEAR \ (STDP4028_DPTX_LINK_CH_STS | STDP4028_DPTX_HOTPLUG_CH_STS) static DEFINE_MUTEX(ge_b850v3_lvds_dev_mutex); struct ge_b850v3_lvds { struct drm_connector connector; struct drm_bridge bridge; struct i2c_client *stdp4028_i2c; struct i2c_client *stdp2690_i2c; }; static struct ge_b850v3_lvds *ge_b850v3_lvds_ptr; static u8 *stdp2690_get_edid(struct i2c_client *client) { struct i2c_adapter *adapter = client->adapter; unsigned char start = 0x00; unsigned int total_size; u8 *block = kmalloc(EDID_LENGTH, GFP_KERNEL); struct i2c_msg msgs[] = { { .addr = client->addr, .flags = 0, .len = 1, .buf = &start, }, { .addr = client->addr, .flags = I2C_M_RD, .len = EDID_LENGTH, .buf = block, } }; if (!block) return NULL; if (i2c_transfer(adapter, msgs, 2) != 2) { DRM_ERROR("Unable to read EDID.\n"); goto err; } if (!drm_edid_block_valid(block, 0, false, NULL)) { DRM_ERROR("Invalid EDID data\n"); goto err; } total_size = (block[EDID_EXT_BLOCK_CNT] + 1) * EDID_LENGTH; if (total_size > EDID_LENGTH) { kfree(block); block = kmalloc(total_size, GFP_KERNEL); if (!block) return NULL; /* Yes, read the entire buffer, and do not skip the first * EDID_LENGTH bytes. */ start = 0x00; msgs[1].len = total_size; msgs[1].buf = block; if (i2c_transfer(adapter, msgs, 2) != 2) { DRM_ERROR("Unable to read EDID extension blocks.\n"); goto err; } if (!drm_edid_block_valid(block, 1, false, NULL)) { DRM_ERROR("Invalid EDID data\n"); goto err; } } return block; err: kfree(block); return NULL; } static struct edid *ge_b850v3_lvds_get_edid(struct drm_bridge *bridge, struct drm_connector *connector) { struct i2c_client *client; client = ge_b850v3_lvds_ptr->stdp2690_i2c; return (struct edid *)stdp2690_get_edid(client); } static int ge_b850v3_lvds_get_modes(struct drm_connector *connector) { struct edid *edid; int num_modes; edid = ge_b850v3_lvds_get_edid(&ge_b850v3_lvds_ptr->bridge, connector); drm_connector_update_edid_property(connector, edid); num_modes = drm_add_edid_modes(connector, edid); kfree(edid); return num_modes; } static enum drm_mode_status ge_b850v3_lvds_mode_valid( struct drm_connector *connector, struct drm_display_mode *mode) { return MODE_OK; } static const struct drm_connector_helper_funcs ge_b850v3_lvds_connector_helper_funcs = { .get_modes = ge_b850v3_lvds_get_modes, .mode_valid = ge_b850v3_lvds_mode_valid, }; static enum drm_connector_status ge_b850v3_lvds_bridge_detect(struct drm_bridge *bridge) { struct i2c_client *stdp4028_i2c = ge_b850v3_lvds_ptr->stdp4028_i2c; s32 link_state; link_state = i2c_smbus_read_word_data(stdp4028_i2c, STDP4028_DPTX_STS_REG); if (link_state == STDP4028_CON_STATE_CONNECTED) return connector_status_connected; if (link_state == 0) return connector_status_disconnected; return connector_status_unknown; } static enum drm_connector_status ge_b850v3_lvds_detect(struct drm_connector *connector, bool force) { return ge_b850v3_lvds_bridge_detect(&ge_b850v3_lvds_ptr->bridge); } static const struct drm_connector_funcs ge_b850v3_lvds_connector_funcs = { .fill_modes = drm_helper_probe_single_connector_modes, .detect = ge_b850v3_lvds_detect, .destroy = drm_connector_cleanup, .reset = drm_atomic_helper_connector_reset, .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state, .atomic_destroy_state = drm_atomic_helper_connector_destroy_state, }; static int ge_b850v3_lvds_create_connector(struct drm_bridge *bridge) { struct drm_connector *connector = &ge_b850v3_lvds_ptr->connector; int ret; if (!bridge->encoder) { DRM_ERROR("Parent encoder object not found"); return -ENODEV; } connector->polled = DRM_CONNECTOR_POLL_HPD; drm_connector_helper_add(connector, &ge_b850v3_lvds_connector_helper_funcs); ret = drm_connector_init(bridge->dev, connector, &ge_b850v3_lvds_connector_funcs, DRM_MODE_CONNECTOR_DisplayPort); if (ret) { DRM_ERROR("Failed to initialize connector with drm\n"); return ret; } return drm_connector_attach_encoder(connector, bridge->encoder); } static irqreturn_t ge_b850v3_lvds_irq_handler(int irq, void *dev_id) { struct i2c_client *stdp4028_i2c = ge_b850v3_lvds_ptr->stdp4028_i2c; i2c_smbus_write_word_data(stdp4028_i2c, STDP4028_DPTX_IRQ_STS_REG, STDP4028_DPTX_IRQ_CLEAR); if (ge_b850v3_lvds_ptr->bridge.dev) drm_kms_helper_hotplug_event(ge_b850v3_lvds_ptr->bridge.dev); return IRQ_HANDLED; } static int ge_b850v3_lvds_attach(struct drm_bridge *bridge, enum drm_bridge_attach_flags flags) { struct i2c_client *stdp4028_i2c = ge_b850v3_lvds_ptr->stdp4028_i2c; /* Configures the bridge to re-enable interrupts after each ack. */ i2c_smbus_write_word_data(stdp4028_i2c, STDP4028_IRQ_OUT_CONF_REG, STDP4028_DPTX_DP_IRQ_EN); /* Enable interrupts */ i2c_smbus_write_word_data(stdp4028_i2c, STDP4028_DPTX_IRQ_EN_REG, STDP4028_DPTX_IRQ_CONFIG); if (flags & DRM_BRIDGE_ATTACH_NO_CONNECTOR) return 0; return ge_b850v3_lvds_create_connector(bridge); } static const struct drm_bridge_funcs ge_b850v3_lvds_funcs = { .attach = ge_b850v3_lvds_attach, .detect = ge_b850v3_lvds_bridge_detect, .get_edid = ge_b850v3_lvds_get_edid, }; static int ge_b850v3_lvds_init(struct device *dev) { mutex_lock(&ge_b850v3_lvds_dev_mutex); if (ge_b850v3_lvds_ptr) goto success; ge_b850v3_lvds_ptr = devm_kzalloc(dev, sizeof(*ge_b850v3_lvds_ptr), GFP_KERNEL); if (!ge_b850v3_lvds_ptr) { mutex_unlock(&ge_b850v3_lvds_dev_mutex); return -ENOMEM; } success: mutex_unlock(&ge_b850v3_lvds_dev_mutex); return 0; } static void ge_b850v3_lvds_remove(void) { mutex_lock(&ge_b850v3_lvds_dev_mutex); /* * This check is to avoid both the drivers * removing the bridge in their remove() function */ if (!ge_b850v3_lvds_ptr || !ge_b850v3_lvds_ptr->stdp2690_i2c || !ge_b850v3_lvds_ptr->stdp4028_i2c) goto out; drm_bridge_remove(&ge_b850v3_lvds_ptr->bridge); ge_b850v3_lvds_ptr = NULL; out: mutex_unlock(&ge_b850v3_lvds_dev_mutex); } static int ge_b850v3_register(void) { struct i2c_client *stdp4028_i2c = ge_b850v3_lvds_ptr->stdp4028_i2c; struct device *dev = &stdp4028_i2c->dev; /* drm bridge initialization */ ge_b850v3_lvds_ptr->bridge.funcs = &ge_b850v3_lvds_funcs; ge_b850v3_lvds_ptr->bridge.ops = DRM_BRIDGE_OP_DETECT | DRM_BRIDGE_OP_EDID; ge_b850v3_lvds_ptr->bridge.type = DRM_MODE_CONNECTOR_DisplayPort; ge_b850v3_lvds_ptr->bridge.of_node = dev->of_node; drm_bridge_add(&ge_b850v3_lvds_ptr->bridge); /* Clear pending interrupts since power up. */ i2c_smbus_write_word_data(stdp4028_i2c, STDP4028_DPTX_IRQ_STS_REG, STDP4028_DPTX_IRQ_CLEAR); if (!stdp4028_i2c->irq) return 0; return devm_request_threaded_irq(&stdp4028_i2c->dev, stdp4028_i2c->irq, NULL, ge_b850v3_lvds_irq_handler, IRQF_TRIGGER_HIGH | IRQF_ONESHOT, "ge-b850v3-lvds-dp", ge_b850v3_lvds_ptr); } static int stdp4028_ge_b850v3_fw_probe(struct i2c_client *stdp4028_i2c) { struct device *dev = &stdp4028_i2c->dev; int ret; ret = ge_b850v3_lvds_init(dev); if (ret) return ret; ge_b850v3_lvds_ptr->stdp4028_i2c = stdp4028_i2c; i2c_set_clientdata(stdp4028_i2c, ge_b850v3_lvds_ptr); /* Only register after both bridges are probed */ if (!ge_b850v3_lvds_ptr->stdp2690_i2c) return 0; return ge_b850v3_register(); } static void stdp4028_ge_b850v3_fw_remove(struct i2c_client *stdp4028_i2c) { ge_b850v3_lvds_remove(); } static const struct i2c_device_id stdp4028_ge_b850v3_fw_i2c_table[] = { {"stdp4028_ge_fw", 0}, {}, }; MODULE_DEVICE_TABLE(i2c, stdp4028_ge_b850v3_fw_i2c_table); static const struct of_device_id stdp4028_ge_b850v3_fw_match[] = { { .compatible = "megachips,stdp4028-ge-b850v3-fw" }, {}, }; MODULE_DEVICE_TABLE(of, stdp4028_ge_b850v3_fw_match); static struct i2c_driver stdp4028_ge_b850v3_fw_driver = { .id_table = stdp4028_ge_b850v3_fw_i2c_table, .probe = stdp4028_ge_b850v3_fw_probe, .remove = stdp4028_ge_b850v3_fw_remove, .driver = { .name = "stdp4028-ge-b850v3-fw", .of_match_table = stdp4028_ge_b850v3_fw_match, }, }; static int stdp2690_ge_b850v3_fw_probe(struct i2c_client *stdp2690_i2c) { struct device *dev = &stdp2690_i2c->dev; int ret; ret = ge_b850v3_lvds_init(dev); if (ret) return ret; ge_b850v3_lvds_ptr->stdp2690_i2c = stdp2690_i2c; i2c_set_clientdata(stdp2690_i2c, ge_b850v3_lvds_ptr); /* Only register after both bridges are probed */ if (!ge_b850v3_lvds_ptr->stdp4028_i2c) return 0; return ge_b850v3_register(); } static void stdp2690_ge_b850v3_fw_remove(struct i2c_client *stdp2690_i2c) { ge_b850v3_lvds_remove(); } static const struct i2c_device_id stdp2690_ge_b850v3_fw_i2c_table[] = { {"stdp2690_ge_fw", 0}, {}, }; MODULE_DEVICE_TABLE(i2c, stdp2690_ge_b850v3_fw_i2c_table); static const struct of_device_id stdp2690_ge_b850v3_fw_match[] = { { .compatible = "megachips,stdp2690-ge-b850v3-fw" }, {}, }; MODULE_DEVICE_TABLE(of, stdp2690_ge_b850v3_fw_match); static struct i2c_driver stdp2690_ge_b850v3_fw_driver = { .id_table = stdp2690_ge_b850v3_fw_i2c_table, .probe = stdp2690_ge_b850v3_fw_probe, .remove = stdp2690_ge_b850v3_fw_remove, .driver = { .name = "stdp2690-ge-b850v3-fw", .of_match_table = stdp2690_ge_b850v3_fw_match, }, }; static int __init stdpxxxx_ge_b850v3_init(void) { int ret; ret = i2c_add_driver(&stdp4028_ge_b850v3_fw_driver); if (ret) return ret; ret = i2c_add_driver(&stdp2690_ge_b850v3_fw_driver); if (ret) i2c_del_driver(&stdp4028_ge_b850v3_fw_driver); return ret; } module_init(stdpxxxx_ge_b850v3_init); static void __exit stdpxxxx_ge_b850v3_exit(void) { i2c_del_driver(&stdp2690_ge_b850v3_fw_driver); i2c_del_driver(&stdp4028_ge_b850v3_fw_driver); } module_exit(stdpxxxx_ge_b850v3_exit); MODULE_AUTHOR("Peter Senna Tschudin <[email protected]>"); MODULE_AUTHOR("Martyn Welch <[email protected]>"); MODULE_DESCRIPTION("GE LVDS to DP++ display bridge)"); MODULE_LICENSE("GPL v2");
linux-master
drivers/gpu/drm/bridge/megachips-stdpxxxx-ge-b850v3-fw.c
// SPDX-License-Identifier: GPL-2.0 /* * TPD12S015 HDMI ESD protection & level shifter chip driver * * Copyright (C) 2019 Texas Instruments Incorporated * * Based on the omapdrm-specific encoder-opa362 driver * * Copyright (C) 2013 Texas Instruments Incorporated * Author: Tomi Valkeinen <[email protected]> */ #include <linux/delay.h> #include <linux/gpio/consumer.h> #include <linux/interrupt.h> #include <linux/module.h> #include <linux/mutex.h> #include <linux/of.h> #include <linux/of_graph.h> #include <linux/platform_device.h> #include <drm/drm_bridge.h> struct tpd12s015_device { struct drm_bridge bridge; struct gpio_desc *ct_cp_hpd_gpio; struct gpio_desc *ls_oe_gpio; struct gpio_desc *hpd_gpio; int hpd_irq; struct drm_bridge *next_bridge; }; static inline struct tpd12s015_device *to_tpd12s015(struct drm_bridge *bridge) { return container_of(bridge, struct tpd12s015_device, bridge); } static int tpd12s015_attach(struct drm_bridge *bridge, enum drm_bridge_attach_flags flags) { struct tpd12s015_device *tpd = to_tpd12s015(bridge); int ret; if (!(flags & DRM_BRIDGE_ATTACH_NO_CONNECTOR)) return -EINVAL; ret = drm_bridge_attach(bridge->encoder, tpd->next_bridge, bridge, flags); if (ret < 0) return ret; gpiod_set_value_cansleep(tpd->ls_oe_gpio, 1); /* DC-DC converter needs at max 300us to get to 90% of 5V. */ usleep_range(300, 1000); return 0; } static void tpd12s015_detach(struct drm_bridge *bridge) { struct tpd12s015_device *tpd = to_tpd12s015(bridge); gpiod_set_value_cansleep(tpd->ls_oe_gpio, 0); } static enum drm_connector_status tpd12s015_detect(struct drm_bridge *bridge) { struct tpd12s015_device *tpd = to_tpd12s015(bridge); if (gpiod_get_value_cansleep(tpd->hpd_gpio)) return connector_status_connected; else return connector_status_disconnected; } static void tpd12s015_hpd_enable(struct drm_bridge *bridge) { struct tpd12s015_device *tpd = to_tpd12s015(bridge); gpiod_set_value_cansleep(tpd->ct_cp_hpd_gpio, 1); } static void tpd12s015_hpd_disable(struct drm_bridge *bridge) { struct tpd12s015_device *tpd = to_tpd12s015(bridge); gpiod_set_value_cansleep(tpd->ct_cp_hpd_gpio, 0); } static const struct drm_bridge_funcs tpd12s015_bridge_funcs = { .attach = tpd12s015_attach, .detach = tpd12s015_detach, .detect = tpd12s015_detect, .hpd_enable = tpd12s015_hpd_enable, .hpd_disable = tpd12s015_hpd_disable, }; static irqreturn_t tpd12s015_hpd_isr(int irq, void *data) { struct tpd12s015_device *tpd = data; struct drm_bridge *bridge = &tpd->bridge; drm_bridge_hpd_notify(bridge, tpd12s015_detect(bridge)); return IRQ_HANDLED; } static int tpd12s015_probe(struct platform_device *pdev) { struct tpd12s015_device *tpd; struct device_node *node; struct gpio_desc *gpio; int ret; tpd = devm_kzalloc(&pdev->dev, sizeof(*tpd), GFP_KERNEL); if (!tpd) return -ENOMEM; platform_set_drvdata(pdev, tpd); tpd->bridge.funcs = &tpd12s015_bridge_funcs; tpd->bridge.of_node = pdev->dev.of_node; tpd->bridge.type = DRM_MODE_CONNECTOR_HDMIA; tpd->bridge.ops = DRM_BRIDGE_OP_DETECT; /* Get the next bridge, connected to port@1. */ node = of_graph_get_remote_node(pdev->dev.of_node, 1, -1); if (!node) return -ENODEV; tpd->next_bridge = of_drm_find_bridge(node); of_node_put(node); if (!tpd->next_bridge) return -EPROBE_DEFER; /* Get the control and HPD GPIOs. */ gpio = devm_gpiod_get_index_optional(&pdev->dev, NULL, 0, GPIOD_OUT_LOW); if (IS_ERR(gpio)) return PTR_ERR(gpio); tpd->ct_cp_hpd_gpio = gpio; gpio = devm_gpiod_get_index_optional(&pdev->dev, NULL, 1, GPIOD_OUT_LOW); if (IS_ERR(gpio)) return PTR_ERR(gpio); tpd->ls_oe_gpio = gpio; gpio = devm_gpiod_get_index(&pdev->dev, NULL, 2, GPIOD_IN); if (IS_ERR(gpio)) return PTR_ERR(gpio); tpd->hpd_gpio = gpio; /* Register the IRQ if the HPD GPIO is IRQ-capable. */ tpd->hpd_irq = gpiod_to_irq(tpd->hpd_gpio); if (tpd->hpd_irq >= 0) { ret = devm_request_threaded_irq(&pdev->dev, tpd->hpd_irq, NULL, tpd12s015_hpd_isr, IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING | IRQF_ONESHOT, "tpd12s015 hpd", tpd); if (ret) return ret; tpd->bridge.ops |= DRM_BRIDGE_OP_HPD; } /* Register the DRM bridge. */ drm_bridge_add(&tpd->bridge); return 0; } static int __exit tpd12s015_remove(struct platform_device *pdev) { struct tpd12s015_device *tpd = platform_get_drvdata(pdev); drm_bridge_remove(&tpd->bridge); return 0; } static const struct of_device_id tpd12s015_of_match[] = { { .compatible = "ti,tpd12s015", }, {}, }; MODULE_DEVICE_TABLE(of, tpd12s015_of_match); static struct platform_driver tpd12s015_driver = { .probe = tpd12s015_probe, .remove = __exit_p(tpd12s015_remove), .driver = { .name = "tpd12s015", .of_match_table = tpd12s015_of_match, }, }; module_platform_driver(tpd12s015_driver); MODULE_AUTHOR("Tomi Valkeinen <[email protected]>"); MODULE_DESCRIPTION("TPD12S015 HDMI level shifter and ESD protection driver"); MODULE_LICENSE("GPL");
linux-master
drivers/gpu/drm/bridge/ti-tpd12s015.c
// SPDX-License-Identifier: GPL-2.0 /* * Copyright (C) 2019 Laurent Pinchart <[email protected]> */ #include <linux/gpio/consumer.h> #include <linux/i2c.h> #include <linux/interrupt.h> #include <linux/media-bus-format.h> #include <linux/module.h> #include <linux/mutex.h> #include <linux/of.h> #include <linux/platform_device.h> #include <linux/regulator/consumer.h> #include <drm/drm_atomic_helper.h> #include <drm/drm_bridge.h> #include <drm/drm_edid.h> struct display_connector { struct drm_bridge bridge; struct gpio_desc *hpd_gpio; int hpd_irq; struct regulator *supply; struct gpio_desc *ddc_en; }; static inline struct display_connector * to_display_connector(struct drm_bridge *bridge) { return container_of(bridge, struct display_connector, bridge); } static int display_connector_attach(struct drm_bridge *bridge, enum drm_bridge_attach_flags flags) { return flags & DRM_BRIDGE_ATTACH_NO_CONNECTOR ? 0 : -EINVAL; } static enum drm_connector_status display_connector_detect(struct drm_bridge *bridge) { struct display_connector *conn = to_display_connector(bridge); if (conn->hpd_gpio) { if (gpiod_get_value_cansleep(conn->hpd_gpio)) return connector_status_connected; else return connector_status_disconnected; } if (conn->bridge.ddc && drm_probe_ddc(conn->bridge.ddc)) return connector_status_connected; switch (conn->bridge.type) { case DRM_MODE_CONNECTOR_DVIA: case DRM_MODE_CONNECTOR_DVID: case DRM_MODE_CONNECTOR_DVII: case DRM_MODE_CONNECTOR_HDMIA: case DRM_MODE_CONNECTOR_HDMIB: /* * For DVI and HDMI connectors a DDC probe failure indicates * that no cable is connected. */ return connector_status_disconnected; case DRM_MODE_CONNECTOR_Composite: case DRM_MODE_CONNECTOR_SVIDEO: case DRM_MODE_CONNECTOR_VGA: default: /* * Composite and S-Video connectors have no other detection * mean than the HPD GPIO. For VGA connectors, even if we have * an I2C bus, we can't assume that the cable is disconnected * if drm_probe_ddc fails, as some cables don't wire the DDC * pins. */ return connector_status_unknown; } } static struct edid *display_connector_get_edid(struct drm_bridge *bridge, struct drm_connector *connector) { struct display_connector *conn = to_display_connector(bridge); return drm_get_edid(connector, conn->bridge.ddc); } /* * Since this bridge is tied to the connector, it acts like a passthrough, * so concerning the output bus formats, either pass the bus formats from the * previous bridge or return fallback data like done in the bridge function: * drm_atomic_bridge_chain_select_bus_fmts(). * This supports negotiation if the bridge chain has all bits in place. */ static u32 *display_connector_get_output_bus_fmts(struct drm_bridge *bridge, struct drm_bridge_state *bridge_state, struct drm_crtc_state *crtc_state, struct drm_connector_state *conn_state, unsigned int *num_output_fmts) { struct drm_bridge *prev_bridge = drm_bridge_get_prev_bridge(bridge); struct drm_bridge_state *prev_bridge_state; if (!prev_bridge || !prev_bridge->funcs->atomic_get_output_bus_fmts) { struct drm_connector *conn = conn_state->connector; u32 *out_bus_fmts; *num_output_fmts = 1; out_bus_fmts = kmalloc(sizeof(*out_bus_fmts), GFP_KERNEL); if (!out_bus_fmts) return NULL; if (conn->display_info.num_bus_formats && conn->display_info.bus_formats) out_bus_fmts[0] = conn->display_info.bus_formats[0]; else out_bus_fmts[0] = MEDIA_BUS_FMT_FIXED; return out_bus_fmts; } prev_bridge_state = drm_atomic_get_new_bridge_state(crtc_state->state, prev_bridge); return prev_bridge->funcs->atomic_get_output_bus_fmts(prev_bridge, prev_bridge_state, crtc_state, conn_state, num_output_fmts); } /* * Since this bridge is tied to the connector, it acts like a passthrough, * so concerning the input bus formats, either pass the bus formats from the * previous bridge or MEDIA_BUS_FMT_FIXED (like select_bus_fmt_recursive()) * when atomic_get_input_bus_fmts is not supported. * This supports negotiation if the bridge chain has all bits in place. */ static u32 *display_connector_get_input_bus_fmts(struct drm_bridge *bridge, struct drm_bridge_state *bridge_state, struct drm_crtc_state *crtc_state, struct drm_connector_state *conn_state, u32 output_fmt, unsigned int *num_input_fmts) { struct drm_bridge *prev_bridge = drm_bridge_get_prev_bridge(bridge); struct drm_bridge_state *prev_bridge_state; if (!prev_bridge || !prev_bridge->funcs->atomic_get_input_bus_fmts) { u32 *in_bus_fmts; *num_input_fmts = 1; in_bus_fmts = kmalloc(sizeof(*in_bus_fmts), GFP_KERNEL); if (!in_bus_fmts) return NULL; in_bus_fmts[0] = MEDIA_BUS_FMT_FIXED; return in_bus_fmts; } prev_bridge_state = drm_atomic_get_new_bridge_state(crtc_state->state, prev_bridge); return prev_bridge->funcs->atomic_get_input_bus_fmts(prev_bridge, prev_bridge_state, crtc_state, conn_state, output_fmt, num_input_fmts); } static const struct drm_bridge_funcs display_connector_bridge_funcs = { .attach = display_connector_attach, .detect = display_connector_detect, .get_edid = display_connector_get_edid, .atomic_get_output_bus_fmts = display_connector_get_output_bus_fmts, .atomic_get_input_bus_fmts = display_connector_get_input_bus_fmts, .atomic_duplicate_state = drm_atomic_helper_bridge_duplicate_state, .atomic_destroy_state = drm_atomic_helper_bridge_destroy_state, .atomic_reset = drm_atomic_helper_bridge_reset, }; static irqreturn_t display_connector_hpd_irq(int irq, void *arg) { struct display_connector *conn = arg; struct drm_bridge *bridge = &conn->bridge; drm_bridge_hpd_notify(bridge, display_connector_detect(bridge)); return IRQ_HANDLED; } static int display_connector_get_supply(struct platform_device *pdev, struct display_connector *conn, const char *name) { conn->supply = devm_regulator_get_optional(&pdev->dev, name); if (conn->supply == ERR_PTR(-ENODEV)) conn->supply = NULL; return PTR_ERR_OR_ZERO(conn->supply); } static int display_connector_probe(struct platform_device *pdev) { struct display_connector *conn; unsigned int type; const char *label = NULL; int ret; conn = devm_kzalloc(&pdev->dev, sizeof(*conn), GFP_KERNEL); if (!conn) return -ENOMEM; platform_set_drvdata(pdev, conn); type = (uintptr_t)of_device_get_match_data(&pdev->dev); /* Get the exact connector type. */ switch (type) { case DRM_MODE_CONNECTOR_DVII: { bool analog, digital; analog = of_property_read_bool(pdev->dev.of_node, "analog"); digital = of_property_read_bool(pdev->dev.of_node, "digital"); if (analog && !digital) { conn->bridge.type = DRM_MODE_CONNECTOR_DVIA; } else if (!analog && digital) { conn->bridge.type = DRM_MODE_CONNECTOR_DVID; } else if (analog && digital) { conn->bridge.type = DRM_MODE_CONNECTOR_DVII; } else { dev_err(&pdev->dev, "DVI connector with no type\n"); return -EINVAL; } break; } case DRM_MODE_CONNECTOR_HDMIA: { const char *hdmi_type; ret = of_property_read_string(pdev->dev.of_node, "type", &hdmi_type); if (ret < 0) { dev_err(&pdev->dev, "HDMI connector with no type\n"); return -EINVAL; } if (!strcmp(hdmi_type, "a") || !strcmp(hdmi_type, "c") || !strcmp(hdmi_type, "d") || !strcmp(hdmi_type, "e")) { conn->bridge.type = DRM_MODE_CONNECTOR_HDMIA; } else if (!strcmp(hdmi_type, "b")) { conn->bridge.type = DRM_MODE_CONNECTOR_HDMIB; } else { dev_err(&pdev->dev, "Unsupported HDMI connector type '%s'\n", hdmi_type); return -EINVAL; } break; } default: conn->bridge.type = type; break; } /* All the supported connector types support interlaced modes. */ conn->bridge.interlace_allowed = true; /* Get the optional connector label. */ of_property_read_string(pdev->dev.of_node, "label", &label); /* * Get the HPD GPIO for DVI, HDMI and DP connectors. If the GPIO can provide * edge interrupts, register an interrupt handler. */ if (type == DRM_MODE_CONNECTOR_DVII || type == DRM_MODE_CONNECTOR_HDMIA || type == DRM_MODE_CONNECTOR_DisplayPort) { conn->hpd_gpio = devm_gpiod_get_optional(&pdev->dev, "hpd", GPIOD_IN); if (IS_ERR(conn->hpd_gpio)) return dev_err_probe(&pdev->dev, PTR_ERR(conn->hpd_gpio), "Unable to retrieve HPD GPIO\n"); conn->hpd_irq = gpiod_to_irq(conn->hpd_gpio); } else { conn->hpd_irq = -EINVAL; } if (conn->hpd_irq >= 0) { ret = devm_request_threaded_irq(&pdev->dev, conn->hpd_irq, NULL, display_connector_hpd_irq, IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING | IRQF_ONESHOT, "HPD", conn); if (ret) { dev_info(&pdev->dev, "Failed to request HPD edge interrupt, falling back to polling\n"); conn->hpd_irq = -EINVAL; } } /* Retrieve the DDC I2C adapter for DVI, HDMI and VGA connectors. */ if (type == DRM_MODE_CONNECTOR_DVII || type == DRM_MODE_CONNECTOR_HDMIA || type == DRM_MODE_CONNECTOR_VGA) { struct device_node *phandle; phandle = of_parse_phandle(pdev->dev.of_node, "ddc-i2c-bus", 0); if (phandle) { conn->bridge.ddc = of_get_i2c_adapter_by_node(phandle); of_node_put(phandle); if (!conn->bridge.ddc) return -EPROBE_DEFER; } else { dev_dbg(&pdev->dev, "No I2C bus specified, disabling EDID readout\n"); } } /* Get the DP PWR for DP connector. */ if (type == DRM_MODE_CONNECTOR_DisplayPort) { int ret; ret = display_connector_get_supply(pdev, conn, "dp-pwr"); if (ret < 0) return dev_err_probe(&pdev->dev, ret, "failed to get DP PWR regulator\n"); } /* enable DDC */ if (type == DRM_MODE_CONNECTOR_HDMIA) { int ret; conn->ddc_en = devm_gpiod_get_optional(&pdev->dev, "ddc-en", GPIOD_OUT_HIGH); if (IS_ERR(conn->ddc_en)) { dev_err(&pdev->dev, "Couldn't get ddc-en gpio\n"); return PTR_ERR(conn->ddc_en); } ret = display_connector_get_supply(pdev, conn, "hdmi-pwr"); if (ret < 0) return dev_err_probe(&pdev->dev, ret, "failed to get HDMI +5V Power regulator\n"); } if (conn->supply) { ret = regulator_enable(conn->supply); if (ret) { dev_err(&pdev->dev, "failed to enable PWR regulator: %d\n", ret); return ret; } } conn->bridge.funcs = &display_connector_bridge_funcs; conn->bridge.of_node = pdev->dev.of_node; if (conn->bridge.ddc) conn->bridge.ops |= DRM_BRIDGE_OP_EDID | DRM_BRIDGE_OP_DETECT; if (conn->hpd_gpio) conn->bridge.ops |= DRM_BRIDGE_OP_DETECT; if (conn->hpd_irq >= 0) conn->bridge.ops |= DRM_BRIDGE_OP_HPD; dev_dbg(&pdev->dev, "Found %s display connector '%s' %s DDC bus and %s HPD GPIO (ops 0x%x)\n", drm_get_connector_type_name(conn->bridge.type), label ? label : "<unlabelled>", conn->bridge.ddc ? "with" : "without", conn->hpd_gpio ? "with" : "without", conn->bridge.ops); drm_bridge_add(&conn->bridge); return 0; } static void display_connector_remove(struct platform_device *pdev) { struct display_connector *conn = platform_get_drvdata(pdev); if (conn->ddc_en) gpiod_set_value(conn->ddc_en, 0); if (conn->supply) regulator_disable(conn->supply); drm_bridge_remove(&conn->bridge); if (!IS_ERR(conn->bridge.ddc)) i2c_put_adapter(conn->bridge.ddc); } static const struct of_device_id display_connector_match[] = { { .compatible = "composite-video-connector", .data = (void *)DRM_MODE_CONNECTOR_Composite, }, { .compatible = "dvi-connector", .data = (void *)DRM_MODE_CONNECTOR_DVII, }, { .compatible = "hdmi-connector", .data = (void *)DRM_MODE_CONNECTOR_HDMIA, }, { .compatible = "svideo-connector", .data = (void *)DRM_MODE_CONNECTOR_SVIDEO, }, { .compatible = "vga-connector", .data = (void *)DRM_MODE_CONNECTOR_VGA, }, { .compatible = "dp-connector", .data = (void *)DRM_MODE_CONNECTOR_DisplayPort, }, {}, }; MODULE_DEVICE_TABLE(of, display_connector_match); static struct platform_driver display_connector_driver = { .probe = display_connector_probe, .remove_new = display_connector_remove, .driver = { .name = "display-connector", .of_match_table = display_connector_match, }, }; module_platform_driver(display_connector_driver); MODULE_AUTHOR("Laurent Pinchart <[email protected]>"); MODULE_DESCRIPTION("Display connector driver"); MODULE_LICENSE("GPL");
linux-master
drivers/gpu/drm/bridge/display-connector.c
// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) /* * Copyright (c) 2020, The Linux Foundation. All rights reserved. */ #include <linux/bits.h> #include <linux/delay.h> #include <linux/device.h> #include <linux/err.h> #include <linux/extcon.h> #include <linux/fs.h> #include <linux/gpio/consumer.h> #include <linux/i2c.h> #include <linux/interrupt.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/pm_runtime.h> #include <linux/regmap.h> #include <linux/regulator/consumer.h> #include <linux/types.h> #include <linux/wait.h> #include <crypto/hash.h> #include <drm/display/drm_dp_helper.h> #include <drm/display/drm_hdcp_helper.h> #include <drm/drm_atomic_helper.h> #include <drm/drm_bridge.h> #include <drm/drm_crtc.h> #include <drm/drm_edid.h> #include <drm/drm_print.h> #include <drm/drm_probe_helper.h> #include <sound/hdmi-codec.h> #define REG_IC_VER 0x04 #define REG_RESET_CTRL 0x05 #define VIDEO_RESET BIT(0) #define AUDIO_RESET BIT(1) #define ALL_LOGIC_RESET BIT(2) #define AUX_RESET BIT(3) #define HDCP_RESET BIT(4) #define INT_STATUS_01 0x06 #define INT_MASK_01 0x09 #define INT_HPD_CHANGE 0 #define INT_RECEIVE_HPD_IRQ 1 #define INT_SCDT_CHANGE 2 #define INT_HDCP_FAIL 3 #define INT_HDCP_DONE 4 #define BIT_OFFSET(x) (((x) - INT_STATUS_01) * BITS_PER_BYTE) #define BIT_INT_HPD INT_HPD_CHANGE #define BIT_INT_HPD_IRQ INT_RECEIVE_HPD_IRQ #define BIT_INT_SCDT INT_SCDT_CHANGE #define BIT_INT_HDCP_FAIL INT_HDCP_FAIL #define BIT_INT_HDCP_DONE INT_HDCP_DONE #define INT_STATUS_02 0x07 #define INT_MASK_02 0x0A #define INT_AUX_CMD_FAIL 0 #define INT_HDCP_KSV_CHECK 1 #define INT_AUDIO_FIFO_ERROR 2 #define BIT_INT_AUX_CMD_FAIL (BIT_OFFSET(0x07) + INT_AUX_CMD_FAIL) #define BIT_INT_HDCP_KSV_CHECK (BIT_OFFSET(0x07) + INT_HDCP_KSV_CHECK) #define BIT_INT_AUDIO_FIFO_ERROR (BIT_OFFSET(0x07) + INT_AUDIO_FIFO_ERROR) #define INT_STATUS_03 0x08 #define INT_MASK_03 0x0B #define INT_LINK_TRAIN_FAIL 4 #define INT_VID_FIFO_ERROR 5 #define INT_IO_LATCH_FIFO_OVERFLOW 7 #define BIT_INT_LINK_TRAIN_FAIL (BIT_OFFSET(0x08) + INT_LINK_TRAIN_FAIL) #define BIT_INT_VID_FIFO_ERROR (BIT_OFFSET(0x08) + INT_VID_FIFO_ERROR) #define BIT_INT_IO_FIFO_OVERFLOW (BIT_OFFSET(0x08) + INT_IO_LATCH_FIFO_OVERFLOW) #define REG_SYSTEM_STS 0x0D #define INT_STS BIT(0) #define HPD_STS BIT(1) #define VIDEO_STB BIT(2) #define REG_LINK_TRAIN_STS 0x0E #define LINK_STATE_CR BIT(2) #define LINK_STATE_EQ BIT(3) #define LINK_STATE_NORP BIT(4) #define REG_BANK_SEL 0x0F #define REG_CLK_CTRL0 0x10 #define M_PCLK_DELAY 0x03 #define REG_AUX_OPT 0x11 #define AUX_AUTO_RST BIT(0) #define AUX_FIX_FREQ BIT(3) #define REG_DATA_CTRL0 0x12 #define VIDEO_LATCH_EDGE BIT(4) #define ENABLE_PCLK_COUNTER BIT(7) #define REG_PCLK_COUNTER_VALUE 0x13 #define REG_501_FIFO_CTRL 0x15 #define RST_501_FIFO BIT(1) #define REG_TRAIN_CTRL0 0x16 #define FORCE_LBR BIT(0) #define LANE_COUNT_MASK 0x06 #define LANE_SWAP BIT(3) #define SPREAD_AMP_5 BIT(4) #define FORCE_CR_DONE BIT(5) #define FORCE_EQ_DONE BIT(6) #define REG_TRAIN_CTRL1 0x17 #define AUTO_TRAIN BIT(0) #define MANUAL_TRAIN BIT(1) #define FORCE_RETRAIN BIT(2) #define REG_AUX_CTRL 0x23 #define CLR_EDID_FIFO BIT(0) #define AUX_USER_MODE BIT(1) #define AUX_NO_SEGMENT_WR BIT(6) #define AUX_EN_FIFO_READ BIT(7) #define REG_AUX_ADR_0_7 0x24 #define REG_AUX_ADR_8_15 0x25 #define REG_AUX_ADR_16_19 0x26 #define REG_AUX_OUT_DATA0 0x27 #define REG_AUX_CMD_REQ 0x2B #define AUX_BUSY BIT(5) #define REG_AUX_DATA_0_7 0x2C #define REG_AUX_DATA_8_15 0x2D #define REG_AUX_DATA_16_23 0x2E #define REG_AUX_DATA_24_31 0x2F #define REG_AUX_DATA_FIFO 0x2F #define REG_AUX_ERROR_STS 0x9F #define M_AUX_REQ_FAIL 0x03 #define REG_HDCP_CTRL1 0x38 #define HDCP_CP_ENABLE BIT(0) #define REG_HDCP_TRIGGER 0x39 #define HDCP_TRIGGER_START BIT(0) #define HDCP_TRIGGER_CPIRQ BIT(1) #define HDCP_TRIGGER_KSV_DONE BIT(4) #define HDCP_TRIGGER_KSV_FAIL BIT(5) #define REG_HDCP_CTRL2 0x3A #define HDCP_AN_SEL BIT(0) #define HDCP_AN_GEN BIT(1) #define HDCP_HW_HPDIRQ_ACT BIT(2) #define HDCP_EN_M0_READ BIT(5) #define REG_M0_0_7 0x4C #define REG_AN_0_7 0x4C #define REG_SP_CTRL0 0x58 #define REG_IP_CTRL1 0x59 #define REG_IP_CTRL2 0x5A #define REG_LINK_DRV 0x5C #define DRV_HS BIT(1) #define REG_DRV_LN_DATA_SEL 0x5D #define REG_AUX 0x5E #define REG_VID_BUS_CTRL0 0x60 #define IN_DDR BIT(2) #define DDR_CD (0x01 << 6) #define REG_VID_BUS_CTRL1 0x61 #define TX_FIFO_RESET BIT(1) #define REG_INPUT_CTRL 0xA0 #define INPUT_HSYNC_POL BIT(0) #define INPUT_VSYNC_POL BIT(2) #define INPUT_INTERLACED BIT(4) #define REG_INPUT_HTOTAL 0xA1 #define REG_INPUT_HACTIVE_START 0xA3 #define REG_INPUT_HACTIVE_WIDTH 0xA5 #define REG_INPUT_HFRONT_PORCH 0xA7 #define REG_INPUT_HSYNC_WIDTH 0xA9 #define REG_INPUT_VTOTAL 0xAB #define REG_INPUT_VACTIVE_START 0xAD #define REG_INPUT_VACTIVE_WIDTH 0xAF #define REG_INPUT_VFRONT_PORCH 0xB1 #define REG_INPUT_VSYNC_WIDTH 0xB3 #define REG_AUDIO_SRC_CTRL 0xB8 #define M_AUDIO_I2S_EN 0x0F #define EN_I2S0 BIT(0) #define EN_I2S1 BIT(1) #define EN_I2S2 BIT(2) #define EN_I2S3 BIT(3) #define AUDIO_FIFO_RESET BIT(7) #define REG_AUDIO_FMT 0xB9 #define REG_AUDIO_FIFO_SEL 0xBA #define REG_AUDIO_CTRL0 0xBB #define AUDIO_FULL_PKT BIT(4) #define AUDIO_16B_BOUND BIT(5) #define REG_AUDIO_CTRL1 0xBC #define REG_AUDIO_INPUT_FREQ 0xBE #define REG_IEC958_STS0 0xBF #define REG_IEC958_STS1 0xC0 #define REG_IEC958_STS2 0xC1 #define REG_IEC958_STS3 0xC2 #define REG_IEC958_STS4 0xC3 #define REG_HPD_IRQ_TIME 0xC9 #define REG_AUX_DEBUG_MODE 0xCA #define REG_AUX_OPT2 0xCB #define REG_HDCP_OPT 0xCE #define REG_USER_DRV_PRE 0xCF #define REG_DATA_MUTE_CTRL 0xD3 #define ENABLE_ENHANCED_FRAME BIT(0) #define ENABLE_AUTO_VIDEO_FIFO_RESET BIT(1) #define EN_VID_MUTE BIT(4) #define EN_AUD_MUTE BIT(5) #define REG_TIME_STMP_CTRL 0xD4 #define EN_ENHANCE_VID_STMP BIT(0) #define EN_ENHANCE_AUD_STMP BIT(2) #define M_STAMP_STEP 0x30 #define EN_SSC_GAT BIT(6) #define REG_INFOFRAME_CTRL 0xE8 #define EN_AVI_PKT BIT(0) #define EN_AUD_PKT BIT(1) #define EN_MPG_PKT BIT(2) #define EN_GEN_PKT BIT(3) #define EN_VID_TIME_STMP BIT(4) #define EN_AUD_TIME_STMP BIT(5) #define EN_VID_CTRL_PKT (EN_AVI_PKT | EN_VID_TIME_STMP) #define EN_AUD_CTRL_PKT (EN_AUD_PKT | EN_AUD_TIME_STMP) #define REG_AUDIO_N_0_7 0xDE #define REG_AUDIO_N_8_15 0xDF #define REG_AUDIO_N_16_23 0xE0 #define REG_AVI_INFO_DB1 0xE9 #define REG_AVI_INFO_DB2 0xEA #define REG_AVI_INFO_DB3 0xEB #define REG_AVI_INFO_DB4 0xEC #define REG_AVI_INFO_DB5 0xED #define REG_AVI_INFO_SUM 0xF6 #define REG_AUD_INFOFRAM_DB1 0xF7 #define REG_AUD_INFOFRAM_DB2 0xF8 #define REG_AUD_INFOFRAM_DB3 0xF9 #define REG_AUD_INFOFRAM_DB4 0xFA #define REG_AUD_INFOFRAM_SUM 0xFB /* the following six registers are in bank1 */ #define REG_DRV_0_DB_800_MV 0x17E #define REG_PRE_0_DB_800_MV 0x17F #define REG_PRE_3P5_DB_800_MV 0x181 #define REG_SSC_CTRL0 0x188 #define REG_SSC_CTRL1 0x189 #define REG_SSC_CTRL2 0x18A #define RBR DP_LINK_BW_1_62 #define HBR DP_LINK_BW_2_7 #define HBR2 DP_LINK_BW_5_4 #define HBR3 DP_LINK_BW_8_1 #define DPCD_V_1_1 0x11 #define MISC_VERB 0xF0 #define MISC_VERC 0x70 #define I2S_INPUT_FORMAT_STANDARD 0 #define I2S_INPUT_FORMAT_32BIT 1 #define I2S_INPUT_LEFT_JUSTIFIED 0 #define I2S_INPUT_RIGHT_JUSTIFIED 1 #define I2S_DATA_1T_DELAY 0 #define I2S_DATA_NO_DELAY 1 #define I2S_WS_LEFT_CHANNEL 0 #define I2S_WS_RIGHT_CHANNEL 1 #define I2S_DATA_MSB_FIRST 0 #define I2S_DATA_LSB_FIRST 1 #define WORD_LENGTH_16BIT 0 #define WORD_LENGTH_18BIT 1 #define WORD_LENGTH_20BIT 2 #define WORD_LENGTH_24BIT 3 #define DEBUGFS_DIR_NAME "it6505-debugfs" #define READ_BUFFER_SIZE 400 /* Vendor option */ #define HDCP_DESIRED 1 #define MAX_LANE_COUNT 4 #define MAX_LINK_RATE HBR #define AUTO_TRAIN_RETRY 3 #define MAX_HDCP_DOWN_STREAM_COUNT 10 #define MAX_CR_LEVEL 0x03 #define MAX_EQ_LEVEL 0x03 #define AUX_WAIT_TIMEOUT_MS 15 #define AUX_FIFO_MAX_SIZE 32 #define PIXEL_CLK_DELAY 1 #define PIXEL_CLK_INVERSE 0 #define ADJUST_PHASE_THRESHOLD 80000 #define DPI_PIXEL_CLK_MAX 95000 #define HDCP_SHA1_FIFO_LEN (MAX_HDCP_DOWN_STREAM_COUNT * 5 + 10) #define DEFAULT_PWR_ON 0 #define DEFAULT_DRV_HOLD 0 #define AUDIO_SELECT I2S #define AUDIO_TYPE LPCM #define AUDIO_SAMPLE_RATE SAMPLE_RATE_48K #define AUDIO_CHANNEL_COUNT 2 #define I2S_INPUT_FORMAT I2S_INPUT_FORMAT_32BIT #define I2S_JUSTIFIED I2S_INPUT_LEFT_JUSTIFIED #define I2S_DATA_DELAY I2S_DATA_1T_DELAY #define I2S_WS_CHANNEL I2S_WS_LEFT_CHANNEL #define I2S_DATA_SEQUENCE I2S_DATA_MSB_FIRST #define AUDIO_WORD_LENGTH WORD_LENGTH_24BIT enum aux_cmd_type { CMD_AUX_NATIVE_READ = 0x0, CMD_AUX_NATIVE_WRITE = 0x5, CMD_AUX_I2C_EDID_READ = 0xB, }; enum aux_cmd_reply { REPLY_ACK, REPLY_NACK, REPLY_DEFER, }; enum link_train_status { LINK_IDLE, LINK_BUSY, LINK_OK, }; enum hdcp_state { HDCP_AUTH_IDLE, HDCP_AUTH_GOING, HDCP_AUTH_DONE, }; struct it6505_platform_data { struct regulator *pwr18; struct regulator *ovdd; struct gpio_desc *gpiod_reset; }; enum it6505_audio_select { I2S = 0, SPDIF, }; enum it6505_audio_sample_rate { SAMPLE_RATE_24K = 0x6, SAMPLE_RATE_32K = 0x3, SAMPLE_RATE_48K = 0x2, SAMPLE_RATE_96K = 0xA, SAMPLE_RATE_192K = 0xE, SAMPLE_RATE_44_1K = 0x0, SAMPLE_RATE_88_2K = 0x8, SAMPLE_RATE_176_4K = 0xC, }; enum it6505_audio_type { LPCM = 0, NLPCM, DSS, }; struct it6505_audio_data { enum it6505_audio_select select; enum it6505_audio_sample_rate sample_rate; enum it6505_audio_type type; u8 word_length; u8 channel_count; u8 i2s_input_format; u8 i2s_justified; u8 i2s_data_delay; u8 i2s_ws_channel; u8 i2s_data_sequence; }; struct it6505_audio_sample_rate_map { enum it6505_audio_sample_rate rate; int sample_rate_value; }; struct it6505_drm_dp_link { unsigned char revision; unsigned int rate; unsigned int num_lanes; unsigned long capabilities; }; struct debugfs_entries { char *name; const struct file_operations *fops; }; struct it6505 { struct drm_dp_aux aux; struct drm_bridge bridge; struct device *dev; struct it6505_drm_dp_link link; struct it6505_platform_data pdata; /* * Mutex protects extcon and interrupt functions from interfering * each other. */ struct mutex extcon_lock; struct mutex mode_lock; /* used to bridge_detect */ struct mutex aux_lock; /* used to aux data transfers */ struct regmap *regmap; struct drm_display_mode source_output_mode; struct drm_display_mode video_info; struct notifier_block event_nb; struct extcon_dev *extcon; struct work_struct extcon_wq; int extcon_state; enum drm_connector_status connector_status; enum link_train_status link_state; struct work_struct link_works; u8 dpcd[DP_RECEIVER_CAP_SIZE]; u8 lane_count; u8 link_rate_bw_code; u8 sink_count; bool step_train; bool branch_device; bool enable_ssc; bool lane_swap_disabled; bool lane_swap; bool powered; bool hpd_state; u32 afe_setting; u32 max_dpi_pixel_clock; u32 max_lane_count; enum hdcp_state hdcp_status; struct delayed_work hdcp_work; struct work_struct hdcp_wait_ksv_list; struct completion extcon_completion; u8 auto_train_retry; bool hdcp_desired; bool is_repeater; u8 hdcp_down_stream_count; u8 bksvs[DRM_HDCP_KSV_LEN]; u8 sha1_input[HDCP_SHA1_FIFO_LEN]; bool enable_enhanced_frame; hdmi_codec_plugged_cb plugged_cb; struct device *codec_dev; struct delayed_work delayed_audio; struct it6505_audio_data audio; struct dentry *debugfs; /* it6505 driver hold option */ bool enable_drv_hold; struct edid *cached_edid; }; struct it6505_step_train_para { u8 voltage_swing[MAX_LANE_COUNT]; u8 pre_emphasis[MAX_LANE_COUNT]; }; /* * Vendor option afe settings for different platforms * 0: without FPC cable * 1: with FPC cable */ static const u8 afe_setting_table[][3] = { {0x82, 0x00, 0x45}, {0x93, 0x2A, 0x85} }; static const struct it6505_audio_sample_rate_map audio_sample_rate_map[] = { {SAMPLE_RATE_24K, 24000}, {SAMPLE_RATE_32K, 32000}, {SAMPLE_RATE_48K, 48000}, {SAMPLE_RATE_96K, 96000}, {SAMPLE_RATE_192K, 192000}, {SAMPLE_RATE_44_1K, 44100}, {SAMPLE_RATE_88_2K, 88200}, {SAMPLE_RATE_176_4K, 176400}, }; static const struct regmap_range it6505_bridge_volatile_ranges[] = { { .range_min = 0, .range_max = 0x1FF }, }; static const struct regmap_access_table it6505_bridge_volatile_table = { .yes_ranges = it6505_bridge_volatile_ranges, .n_yes_ranges = ARRAY_SIZE(it6505_bridge_volatile_ranges), }; static const struct regmap_range_cfg it6505_regmap_banks[] = { { .name = "it6505", .range_min = 0x00, .range_max = 0x1FF, .selector_reg = REG_BANK_SEL, .selector_mask = 0x1, .selector_shift = 0, .window_start = 0x00, .window_len = 0x100, }, }; static const struct regmap_config it6505_regmap_config = { .reg_bits = 8, .val_bits = 8, .volatile_table = &it6505_bridge_volatile_table, .cache_type = REGCACHE_NONE, .ranges = it6505_regmap_banks, .num_ranges = ARRAY_SIZE(it6505_regmap_banks), .max_register = 0x1FF, }; static int it6505_read(struct it6505 *it6505, unsigned int reg_addr) { unsigned int value; int err; struct device *dev = it6505->dev; if (!it6505->powered) return -ENODEV; err = regmap_read(it6505->regmap, reg_addr, &value); if (err < 0) { dev_err(dev, "read failed reg[0x%x] err: %d", reg_addr, err); return err; } return value; } static int it6505_write(struct it6505 *it6505, unsigned int reg_addr, unsigned int reg_val) { int err; struct device *dev = it6505->dev; if (!it6505->powered) return -ENODEV; err = regmap_write(it6505->regmap, reg_addr, reg_val); if (err < 0) { dev_err(dev, "write failed reg[0x%x] = 0x%x err = %d", reg_addr, reg_val, err); return err; } return 0; } static int it6505_set_bits(struct it6505 *it6505, unsigned int reg, unsigned int mask, unsigned int value) { int err; struct device *dev = it6505->dev; if (!it6505->powered) return -ENODEV; err = regmap_update_bits(it6505->regmap, reg, mask, value); if (err < 0) { dev_err(dev, "write reg[0x%x] = 0x%x mask = 0x%x failed err %d", reg, value, mask, err); return err; } return 0; } static void it6505_debug_print(struct it6505 *it6505, unsigned int reg, const char *prefix) { struct device *dev = it6505->dev; int val; if (!drm_debug_enabled(DRM_UT_DRIVER)) return; val = it6505_read(it6505, reg); if (val < 0) DRM_DEV_DEBUG_DRIVER(dev, "%s reg[%02x] read error (%d)", prefix, reg, val); else DRM_DEV_DEBUG_DRIVER(dev, "%s reg[%02x] = 0x%02x", prefix, reg, val); } static int it6505_dpcd_read(struct it6505 *it6505, unsigned long offset) { u8 value; int ret; struct device *dev = it6505->dev; ret = drm_dp_dpcd_readb(&it6505->aux, offset, &value); if (ret < 0) { dev_err(dev, "DPCD read failed [0x%lx] ret: %d", offset, ret); return ret; } return value; } static int it6505_dpcd_write(struct it6505 *it6505, unsigned long offset, u8 datain) { int ret; struct device *dev = it6505->dev; ret = drm_dp_dpcd_writeb(&it6505->aux, offset, datain); if (ret < 0) { dev_err(dev, "DPCD write failed [0x%lx] ret: %d", offset, ret); return ret; } return 0; } static int it6505_get_dpcd(struct it6505 *it6505, int offset, u8 *dpcd, int num) { int ret; struct device *dev = it6505->dev; ret = drm_dp_dpcd_read(&it6505->aux, offset, dpcd, num); if (ret < 0) return ret; DRM_DEV_DEBUG_DRIVER(dev, "ret = %d DPCD[0x%x] = 0x%*ph", ret, offset, num, dpcd); return 0; } static void it6505_dump(struct it6505 *it6505) { unsigned int i, j; u8 regs[16]; struct device *dev = it6505->dev; for (i = 0; i <= 0xff; i += 16) { for (j = 0; j < 16; j++) regs[j] = it6505_read(it6505, i + j); DRM_DEV_DEBUG_DRIVER(dev, "[0x%02x] = %16ph", i, regs); } } static bool it6505_get_sink_hpd_status(struct it6505 *it6505) { int reg_0d; reg_0d = it6505_read(it6505, REG_SYSTEM_STS); if (reg_0d < 0) return false; return reg_0d & HPD_STS; } static int it6505_read_word(struct it6505 *it6505, unsigned int reg) { int val0, val1; val0 = it6505_read(it6505, reg); if (val0 < 0) return val0; val1 = it6505_read(it6505, reg + 1); if (val1 < 0) return val1; return (val1 << 8) | val0; } static void it6505_calc_video_info(struct it6505 *it6505) { struct device *dev = it6505->dev; int hsync_pol, vsync_pol, interlaced; int htotal, hdes, hdew, hfph, hsyncw; int vtotal, vdes, vdew, vfph, vsyncw; int rddata, i, pclk, sum = 0; usleep_range(10000, 15000); rddata = it6505_read(it6505, REG_INPUT_CTRL); hsync_pol = rddata & INPUT_HSYNC_POL; vsync_pol = (rddata & INPUT_VSYNC_POL) >> 2; interlaced = (rddata & INPUT_INTERLACED) >> 4; htotal = it6505_read_word(it6505, REG_INPUT_HTOTAL) & 0x1FFF; hdes = it6505_read_word(it6505, REG_INPUT_HACTIVE_START) & 0x1FFF; hdew = it6505_read_word(it6505, REG_INPUT_HACTIVE_WIDTH) & 0x1FFF; hfph = it6505_read_word(it6505, REG_INPUT_HFRONT_PORCH) & 0x1FFF; hsyncw = it6505_read_word(it6505, REG_INPUT_HSYNC_WIDTH) & 0x1FFF; vtotal = it6505_read_word(it6505, REG_INPUT_VTOTAL) & 0xFFF; vdes = it6505_read_word(it6505, REG_INPUT_VACTIVE_START) & 0xFFF; vdew = it6505_read_word(it6505, REG_INPUT_VACTIVE_WIDTH) & 0xFFF; vfph = it6505_read_word(it6505, REG_INPUT_VFRONT_PORCH) & 0xFFF; vsyncw = it6505_read_word(it6505, REG_INPUT_VSYNC_WIDTH) & 0xFFF; DRM_DEV_DEBUG_DRIVER(dev, "hsync_pol:%d, vsync_pol:%d, interlaced:%d", hsync_pol, vsync_pol, interlaced); DRM_DEV_DEBUG_DRIVER(dev, "hactive_start:%d, vactive_start:%d", hdes, vdes); for (i = 0; i < 3; i++) { it6505_set_bits(it6505, REG_DATA_CTRL0, ENABLE_PCLK_COUNTER, ENABLE_PCLK_COUNTER); usleep_range(10000, 15000); it6505_set_bits(it6505, REG_DATA_CTRL0, ENABLE_PCLK_COUNTER, 0x00); rddata = it6505_read_word(it6505, REG_PCLK_COUNTER_VALUE) & 0xFFF; sum += rddata; } if (sum == 0) { DRM_DEV_DEBUG_DRIVER(dev, "calc video timing error"); return; } sum /= 3; pclk = 13500 * 2048 / sum; it6505->video_info.clock = pclk; it6505->video_info.hdisplay = hdew; it6505->video_info.hsync_start = hdew + hfph; it6505->video_info.hsync_end = hdew + hfph + hsyncw; it6505->video_info.htotal = htotal; it6505->video_info.vdisplay = vdew; it6505->video_info.vsync_start = vdew + vfph; it6505->video_info.vsync_end = vdew + vfph + vsyncw; it6505->video_info.vtotal = vtotal; DRM_DEV_DEBUG_DRIVER(dev, DRM_MODE_FMT, DRM_MODE_ARG(&it6505->video_info)); } static int it6505_drm_dp_link_set_power(struct drm_dp_aux *aux, struct it6505_drm_dp_link *link, u8 mode) { u8 value; int err; /* DP_SET_POWER register is only available on DPCD v1.1 and later */ if (link->revision < DPCD_V_1_1) return 0; err = drm_dp_dpcd_readb(aux, DP_SET_POWER, &value); if (err < 0) return err; value &= ~DP_SET_POWER_MASK; value |= mode; err = drm_dp_dpcd_writeb(aux, DP_SET_POWER, value); if (err < 0) return err; if (mode == DP_SET_POWER_D0) { /* * According to the DP 1.1 specification, a "Sink Device must * exit the power saving state within 1 ms" (Section 2.5.3.1, * Table 5-52, "Sink Control Field" (register 0x600). */ usleep_range(1000, 2000); } return 0; } static void it6505_clear_int(struct it6505 *it6505) { it6505_write(it6505, INT_STATUS_01, 0xFF); it6505_write(it6505, INT_STATUS_02, 0xFF); it6505_write(it6505, INT_STATUS_03, 0xFF); } static void it6505_int_mask_enable(struct it6505 *it6505) { it6505_write(it6505, INT_MASK_01, BIT(INT_HPD_CHANGE) | BIT(INT_RECEIVE_HPD_IRQ) | BIT(INT_SCDT_CHANGE) | BIT(INT_HDCP_FAIL) | BIT(INT_HDCP_DONE)); it6505_write(it6505, INT_MASK_02, BIT(INT_AUX_CMD_FAIL) | BIT(INT_HDCP_KSV_CHECK) | BIT(INT_AUDIO_FIFO_ERROR)); it6505_write(it6505, INT_MASK_03, BIT(INT_LINK_TRAIN_FAIL) | BIT(INT_VID_FIFO_ERROR) | BIT(INT_IO_LATCH_FIFO_OVERFLOW)); } static void it6505_int_mask_disable(struct it6505 *it6505) { it6505_write(it6505, INT_MASK_01, 0x00); it6505_write(it6505, INT_MASK_02, 0x00); it6505_write(it6505, INT_MASK_03, 0x00); } static void it6505_lane_termination_on(struct it6505 *it6505) { int regcf; regcf = it6505_read(it6505, REG_USER_DRV_PRE); if (regcf == MISC_VERB) it6505_set_bits(it6505, REG_DRV_LN_DATA_SEL, 0x80, 0x00); if (regcf == MISC_VERC) { if (it6505->lane_swap) { switch (it6505->lane_count) { case 1: case 2: it6505_set_bits(it6505, REG_DRV_LN_DATA_SEL, 0x0C, 0x08); break; default: it6505_set_bits(it6505, REG_DRV_LN_DATA_SEL, 0x0C, 0x0C); break; } } else { switch (it6505->lane_count) { case 1: case 2: it6505_set_bits(it6505, REG_DRV_LN_DATA_SEL, 0x0C, 0x04); break; default: it6505_set_bits(it6505, REG_DRV_LN_DATA_SEL, 0x0C, 0x0C); break; } } } } static void it6505_lane_termination_off(struct it6505 *it6505) { int regcf; regcf = it6505_read(it6505, REG_USER_DRV_PRE); if (regcf == MISC_VERB) it6505_set_bits(it6505, REG_DRV_LN_DATA_SEL, 0x80, 0x80); if (regcf == MISC_VERC) it6505_set_bits(it6505, REG_DRV_LN_DATA_SEL, 0x0C, 0x00); } static void it6505_lane_power_on(struct it6505 *it6505) { it6505_set_bits(it6505, REG_LINK_DRV, 0xF1, (it6505->lane_swap ? GENMASK(7, 8 - it6505->lane_count) : GENMASK(3 + it6505->lane_count, 4)) | 0x01); } static void it6505_lane_power_off(struct it6505 *it6505) { it6505_set_bits(it6505, REG_LINK_DRV, 0xF0, 0x00); } static void it6505_lane_off(struct it6505 *it6505) { it6505_lane_power_off(it6505); it6505_lane_termination_off(it6505); } static void it6505_aux_termination_on(struct it6505 *it6505) { int regcf; regcf = it6505_read(it6505, REG_USER_DRV_PRE); if (regcf == MISC_VERB) it6505_lane_termination_on(it6505); if (regcf == MISC_VERC) it6505_set_bits(it6505, REG_DRV_LN_DATA_SEL, 0x80, 0x80); } static void it6505_aux_power_on(struct it6505 *it6505) { it6505_set_bits(it6505, REG_AUX, 0x02, 0x02); } static void it6505_aux_on(struct it6505 *it6505) { it6505_aux_power_on(it6505); it6505_aux_termination_on(it6505); } static void it6505_aux_reset(struct it6505 *it6505) { it6505_set_bits(it6505, REG_RESET_CTRL, AUX_RESET, AUX_RESET); it6505_set_bits(it6505, REG_RESET_CTRL, AUX_RESET, 0x00); } static void it6505_reset_logic(struct it6505 *it6505) { regmap_write(it6505->regmap, REG_RESET_CTRL, ALL_LOGIC_RESET); usleep_range(1000, 1500); } static bool it6505_aux_op_finished(struct it6505 *it6505) { int reg2b = it6505_read(it6505, REG_AUX_CMD_REQ); if (reg2b < 0) return false; return (reg2b & AUX_BUSY) == 0; } static int it6505_aux_wait(struct it6505 *it6505) { int status; unsigned long timeout; struct device *dev = it6505->dev; timeout = jiffies + msecs_to_jiffies(AUX_WAIT_TIMEOUT_MS) + 1; while (!it6505_aux_op_finished(it6505)) { if (time_after(jiffies, timeout)) { dev_err(dev, "Timed out waiting AUX to finish"); return -ETIMEDOUT; } usleep_range(1000, 2000); } status = it6505_read(it6505, REG_AUX_ERROR_STS); if (status < 0) { dev_err(dev, "Failed to read AUX channel: %d", status); return status; } return 0; } static ssize_t it6505_aux_operation(struct it6505 *it6505, enum aux_cmd_type cmd, unsigned int address, u8 *buffer, size_t size, enum aux_cmd_reply *reply) { int i, ret; bool aux_write_check = false; if (!it6505_get_sink_hpd_status(it6505)) return -EIO; /* set AUX user mode */ it6505_set_bits(it6505, REG_AUX_CTRL, AUX_USER_MODE, AUX_USER_MODE); aux_op_start: if (cmd == CMD_AUX_I2C_EDID_READ) { /* AUX EDID FIFO has max length of AUX_FIFO_MAX_SIZE bytes. */ size = min_t(size_t, size, AUX_FIFO_MAX_SIZE); /* Enable AUX FIFO read back and clear FIFO */ it6505_set_bits(it6505, REG_AUX_CTRL, AUX_EN_FIFO_READ | CLR_EDID_FIFO, AUX_EN_FIFO_READ | CLR_EDID_FIFO); it6505_set_bits(it6505, REG_AUX_CTRL, AUX_EN_FIFO_READ | CLR_EDID_FIFO, AUX_EN_FIFO_READ); } else { /* The DP AUX transmit buffer has 4 bytes. */ size = min_t(size_t, size, 4); it6505_set_bits(it6505, REG_AUX_CTRL, AUX_NO_SEGMENT_WR, AUX_NO_SEGMENT_WR); } /* Start Address[7:0] */ it6505_write(it6505, REG_AUX_ADR_0_7, (address >> 0) & 0xFF); /* Start Address[15:8] */ it6505_write(it6505, REG_AUX_ADR_8_15, (address >> 8) & 0xFF); /* WriteNum[3:0]+StartAdr[19:16] */ it6505_write(it6505, REG_AUX_ADR_16_19, ((address >> 16) & 0x0F) | ((size - 1) << 4)); if (cmd == CMD_AUX_NATIVE_WRITE) regmap_bulk_write(it6505->regmap, REG_AUX_OUT_DATA0, buffer, size); /* Aux Fire */ it6505_write(it6505, REG_AUX_CMD_REQ, cmd); ret = it6505_aux_wait(it6505); if (ret < 0) goto aux_op_err; ret = it6505_read(it6505, REG_AUX_ERROR_STS); if (ret < 0) goto aux_op_err; switch ((ret >> 6) & 0x3) { case 0: *reply = REPLY_ACK; break; case 1: *reply = REPLY_DEFER; ret = -EAGAIN; goto aux_op_err; case 2: *reply = REPLY_NACK; ret = -EIO; goto aux_op_err; case 3: ret = -ETIMEDOUT; goto aux_op_err; } /* Read back Native Write data */ if (cmd == CMD_AUX_NATIVE_WRITE) { aux_write_check = true; cmd = CMD_AUX_NATIVE_READ; goto aux_op_start; } if (cmd == CMD_AUX_I2C_EDID_READ) { for (i = 0; i < size; i++) { ret = it6505_read(it6505, REG_AUX_DATA_FIFO); if (ret < 0) goto aux_op_err; buffer[i] = ret; } } else { for (i = 0; i < size; i++) { ret = it6505_read(it6505, REG_AUX_DATA_0_7 + i); if (ret < 0) goto aux_op_err; if (aux_write_check && buffer[size - 1 - i] != ret) { ret = -EINVAL; goto aux_op_err; } buffer[size - 1 - i] = ret; } } ret = i; aux_op_err: if (cmd == CMD_AUX_I2C_EDID_READ) { /* clear AUX FIFO */ it6505_set_bits(it6505, REG_AUX_CTRL, AUX_EN_FIFO_READ | CLR_EDID_FIFO, AUX_EN_FIFO_READ | CLR_EDID_FIFO); it6505_set_bits(it6505, REG_AUX_CTRL, AUX_EN_FIFO_READ | CLR_EDID_FIFO, 0x00); } /* Leave AUX user mode */ it6505_set_bits(it6505, REG_AUX_CTRL, AUX_USER_MODE, 0); return ret; } static ssize_t it6505_aux_do_transfer(struct it6505 *it6505, enum aux_cmd_type cmd, unsigned int address, u8 *buffer, size_t size, enum aux_cmd_reply *reply) { int i, ret_size, ret = 0, request_size; mutex_lock(&it6505->aux_lock); for (i = 0; i < size; i += 4) { request_size = min((int)size - i, 4); ret_size = it6505_aux_operation(it6505, cmd, address + i, buffer + i, request_size, reply); if (ret_size < 0) { ret = ret_size; goto aux_op_err; } ret += ret_size; } aux_op_err: mutex_unlock(&it6505->aux_lock); return ret; } static ssize_t it6505_aux_transfer(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg) { struct it6505 *it6505 = container_of(aux, struct it6505, aux); u8 cmd; bool is_i2c = !(msg->request & DP_AUX_NATIVE_WRITE); int ret; enum aux_cmd_reply reply; /* IT6505 doesn't support arbitrary I2C read / write. */ if (is_i2c) return -EINVAL; switch (msg->request) { case DP_AUX_NATIVE_READ: cmd = CMD_AUX_NATIVE_READ; break; case DP_AUX_NATIVE_WRITE: cmd = CMD_AUX_NATIVE_WRITE; break; default: return -EINVAL; } ret = it6505_aux_do_transfer(it6505, cmd, msg->address, msg->buffer, msg->size, &reply); if (ret < 0) return ret; switch (reply) { case REPLY_ACK: msg->reply = DP_AUX_NATIVE_REPLY_ACK; break; case REPLY_NACK: msg->reply = DP_AUX_NATIVE_REPLY_NACK; break; case REPLY_DEFER: msg->reply = DP_AUX_NATIVE_REPLY_DEFER; break; } return ret; } static int it6505_get_edid_block(void *data, u8 *buf, unsigned int block, size_t len) { struct it6505 *it6505 = data; struct device *dev = it6505->dev; enum aux_cmd_reply reply; int offset, ret, aux_retry = 100; it6505_aux_reset(it6505); DRM_DEV_DEBUG_DRIVER(dev, "block number = %d", block); for (offset = 0; offset < EDID_LENGTH;) { ret = it6505_aux_do_transfer(it6505, CMD_AUX_I2C_EDID_READ, block * EDID_LENGTH + offset, buf + offset, 8, &reply); if (ret < 0 && ret != -EAGAIN) return ret; switch (reply) { case REPLY_ACK: DRM_DEV_DEBUG_DRIVER(dev, "[0x%02x]: %8ph", offset, buf + offset); offset += 8; aux_retry = 100; break; case REPLY_NACK: return -EIO; case REPLY_DEFER: msleep(20); if (!(--aux_retry)) return -EIO; } } return 0; } static void it6505_variable_config(struct it6505 *it6505) { it6505->link_rate_bw_code = HBR; it6505->lane_count = MAX_LANE_COUNT; it6505->link_state = LINK_IDLE; it6505->hdcp_desired = HDCP_DESIRED; it6505->auto_train_retry = AUTO_TRAIN_RETRY; it6505->audio.select = AUDIO_SELECT; it6505->audio.sample_rate = AUDIO_SAMPLE_RATE; it6505->audio.channel_count = AUDIO_CHANNEL_COUNT; it6505->audio.type = AUDIO_TYPE; it6505->audio.i2s_input_format = I2S_INPUT_FORMAT; it6505->audio.i2s_justified = I2S_JUSTIFIED; it6505->audio.i2s_data_delay = I2S_DATA_DELAY; it6505->audio.i2s_ws_channel = I2S_WS_CHANNEL; it6505->audio.i2s_data_sequence = I2S_DATA_SEQUENCE; it6505->audio.word_length = AUDIO_WORD_LENGTH; memset(it6505->sha1_input, 0, sizeof(it6505->sha1_input)); memset(it6505->bksvs, 0, sizeof(it6505->bksvs)); } static int it6505_send_video_infoframe(struct it6505 *it6505, struct hdmi_avi_infoframe *frame) { u8 buffer[HDMI_INFOFRAME_HEADER_SIZE + HDMI_AVI_INFOFRAME_SIZE]; int err; struct device *dev = it6505->dev; err = hdmi_avi_infoframe_pack(frame, buffer, sizeof(buffer)); if (err < 0) { dev_err(dev, "Failed to pack AVI infoframe: %d", err); return err; } err = it6505_set_bits(it6505, REG_INFOFRAME_CTRL, EN_AVI_PKT, 0x00); if (err) return err; err = regmap_bulk_write(it6505->regmap, REG_AVI_INFO_DB1, buffer + HDMI_INFOFRAME_HEADER_SIZE, frame->length); if (err) return err; err = it6505_set_bits(it6505, REG_INFOFRAME_CTRL, EN_AVI_PKT, EN_AVI_PKT); if (err) return err; return 0; } static void it6505_get_extcon_property(struct it6505 *it6505) { int err; union extcon_property_value property; struct device *dev = it6505->dev; if (it6505->extcon && !it6505->lane_swap_disabled) { err = extcon_get_property(it6505->extcon, EXTCON_DISP_DP, EXTCON_PROP_USB_TYPEC_POLARITY, &property); if (err) { dev_err(dev, "get property fail!"); return; } it6505->lane_swap = property.intval; } } static void it6505_clk_phase_adjustment(struct it6505 *it6505, const struct drm_display_mode *mode) { int clock = mode->clock; it6505_set_bits(it6505, REG_CLK_CTRL0, M_PCLK_DELAY, clock < ADJUST_PHASE_THRESHOLD ? PIXEL_CLK_DELAY : 0); it6505_set_bits(it6505, REG_DATA_CTRL0, VIDEO_LATCH_EDGE, PIXEL_CLK_INVERSE << 4); } static void it6505_link_reset_step_train(struct it6505 *it6505) { it6505_set_bits(it6505, REG_TRAIN_CTRL0, FORCE_CR_DONE | FORCE_EQ_DONE, 0x00); it6505_dpcd_write(it6505, DP_TRAINING_PATTERN_SET, DP_TRAINING_PATTERN_DISABLE); } static void it6505_init(struct it6505 *it6505) { it6505_write(it6505, REG_AUX_OPT, AUX_AUTO_RST | AUX_FIX_FREQ); it6505_write(it6505, REG_AUX_CTRL, AUX_NO_SEGMENT_WR); it6505_write(it6505, REG_HDCP_CTRL2, HDCP_AN_SEL | HDCP_HW_HPDIRQ_ACT); it6505_write(it6505, REG_VID_BUS_CTRL0, IN_DDR | DDR_CD); it6505_write(it6505, REG_VID_BUS_CTRL1, 0x01); it6505_write(it6505, REG_AUDIO_CTRL0, AUDIO_16B_BOUND); /* chip internal setting, don't modify */ it6505_write(it6505, REG_HPD_IRQ_TIME, 0xF5); it6505_write(it6505, REG_AUX_DEBUG_MODE, 0x4D); it6505_write(it6505, REG_AUX_OPT2, 0x17); it6505_write(it6505, REG_HDCP_OPT, 0x60); it6505_write(it6505, REG_DATA_MUTE_CTRL, EN_VID_MUTE | EN_AUD_MUTE | ENABLE_AUTO_VIDEO_FIFO_RESET); it6505_write(it6505, REG_TIME_STMP_CTRL, EN_SSC_GAT | EN_ENHANCE_VID_STMP | EN_ENHANCE_AUD_STMP); it6505_write(it6505, REG_INFOFRAME_CTRL, 0x00); it6505_write(it6505, REG_DRV_0_DB_800_MV, afe_setting_table[it6505->afe_setting][0]); it6505_write(it6505, REG_PRE_0_DB_800_MV, afe_setting_table[it6505->afe_setting][1]); it6505_write(it6505, REG_PRE_3P5_DB_800_MV, afe_setting_table[it6505->afe_setting][2]); it6505_write(it6505, REG_SSC_CTRL0, 0x9E); it6505_write(it6505, REG_SSC_CTRL1, 0x1C); it6505_write(it6505, REG_SSC_CTRL2, 0x42); } static void it6505_video_disable(struct it6505 *it6505) { it6505_set_bits(it6505, REG_DATA_MUTE_CTRL, EN_VID_MUTE, EN_VID_MUTE); it6505_set_bits(it6505, REG_INFOFRAME_CTRL, EN_VID_CTRL_PKT, 0x00); it6505_set_bits(it6505, REG_RESET_CTRL, VIDEO_RESET, VIDEO_RESET); } static void it6505_video_reset(struct it6505 *it6505) { it6505_link_reset_step_train(it6505); it6505_set_bits(it6505, REG_DATA_MUTE_CTRL, EN_VID_MUTE, EN_VID_MUTE); it6505_set_bits(it6505, REG_INFOFRAME_CTRL, EN_VID_CTRL_PKT, 0x00); it6505_set_bits(it6505, REG_RESET_CTRL, VIDEO_RESET, VIDEO_RESET); it6505_set_bits(it6505, REG_501_FIFO_CTRL, RST_501_FIFO, RST_501_FIFO); it6505_set_bits(it6505, REG_501_FIFO_CTRL, RST_501_FIFO, 0x00); it6505_set_bits(it6505, REG_RESET_CTRL, VIDEO_RESET, 0x00); } static void it6505_update_video_parameter(struct it6505 *it6505, const struct drm_display_mode *mode) { it6505_clk_phase_adjustment(it6505, mode); it6505_video_disable(it6505); } static bool it6505_audio_input(struct it6505 *it6505) { int reg05, regbe; reg05 = it6505_read(it6505, REG_RESET_CTRL); it6505_set_bits(it6505, REG_RESET_CTRL, AUDIO_RESET, 0x00); usleep_range(3000, 4000); regbe = it6505_read(it6505, REG_AUDIO_INPUT_FREQ); it6505_write(it6505, REG_RESET_CTRL, reg05); return regbe != 0xFF; } static void it6505_setup_audio_channel_status(struct it6505 *it6505) { enum it6505_audio_sample_rate sample_rate = it6505->audio.sample_rate; u8 audio_word_length_map[] = { 0x02, 0x04, 0x03, 0x0B }; /* Channel Status */ it6505_write(it6505, REG_IEC958_STS0, it6505->audio.type << 1); it6505_write(it6505, REG_IEC958_STS1, 0x00); it6505_write(it6505, REG_IEC958_STS2, 0x00); it6505_write(it6505, REG_IEC958_STS3, sample_rate); it6505_write(it6505, REG_IEC958_STS4, (~sample_rate << 4) | audio_word_length_map[it6505->audio.word_length]); } static void it6505_setup_audio_format(struct it6505 *it6505) { /* I2S MODE */ it6505_write(it6505, REG_AUDIO_FMT, (it6505->audio.word_length << 5) | (it6505->audio.i2s_data_sequence << 4) | (it6505->audio.i2s_ws_channel << 3) | (it6505->audio.i2s_data_delay << 2) | (it6505->audio.i2s_justified << 1) | it6505->audio.i2s_input_format); if (it6505->audio.select == SPDIF) { it6505_write(it6505, REG_AUDIO_FIFO_SEL, 0x00); /* 0x30 = 128*FS */ it6505_set_bits(it6505, REG_AUX_OPT, 0xF0, 0x30); } else { it6505_write(it6505, REG_AUDIO_FIFO_SEL, 0xE4); } it6505_write(it6505, REG_AUDIO_CTRL0, 0x20); it6505_write(it6505, REG_AUDIO_CTRL1, 0x00); } static void it6505_enable_audio_source(struct it6505 *it6505) { unsigned int audio_source_count; audio_source_count = BIT(DIV_ROUND_UP(it6505->audio.channel_count, 2)) - 1; audio_source_count |= it6505->audio.select << 4; it6505_write(it6505, REG_AUDIO_SRC_CTRL, audio_source_count); } static void it6505_enable_audio_infoframe(struct it6505 *it6505) { struct device *dev = it6505->dev; u8 audio_info_ca[] = { 0x00, 0x00, 0x01, 0x03, 0x07, 0x0B, 0x0F, 0x1F }; DRM_DEV_DEBUG_DRIVER(dev, "infoframe channel_allocation:0x%02x", audio_info_ca[it6505->audio.channel_count - 1]); it6505_write(it6505, REG_AUD_INFOFRAM_DB1, it6505->audio.channel_count - 1); it6505_write(it6505, REG_AUD_INFOFRAM_DB2, 0x00); it6505_write(it6505, REG_AUD_INFOFRAM_DB3, audio_info_ca[it6505->audio.channel_count - 1]); it6505_write(it6505, REG_AUD_INFOFRAM_DB4, 0x00); it6505_write(it6505, REG_AUD_INFOFRAM_SUM, 0x00); /* Enable Audio InfoFrame */ it6505_set_bits(it6505, REG_INFOFRAME_CTRL, EN_AUD_CTRL_PKT, EN_AUD_CTRL_PKT); } static void it6505_disable_audio(struct it6505 *it6505) { it6505_set_bits(it6505, REG_DATA_MUTE_CTRL, EN_AUD_MUTE, EN_AUD_MUTE); it6505_set_bits(it6505, REG_AUDIO_SRC_CTRL, M_AUDIO_I2S_EN, 0x00); it6505_set_bits(it6505, REG_INFOFRAME_CTRL, EN_AUD_CTRL_PKT, 0x00); it6505_set_bits(it6505, REG_RESET_CTRL, AUDIO_RESET, AUDIO_RESET); } static void it6505_enable_audio(struct it6505 *it6505) { struct device *dev = it6505->dev; int regbe; DRM_DEV_DEBUG_DRIVER(dev, "start"); it6505_disable_audio(it6505); it6505_setup_audio_channel_status(it6505); it6505_setup_audio_format(it6505); it6505_enable_audio_source(it6505); it6505_enable_audio_infoframe(it6505); it6505_write(it6505, REG_AUDIO_N_0_7, 0x00); it6505_write(it6505, REG_AUDIO_N_8_15, 0x80); it6505_write(it6505, REG_AUDIO_N_16_23, 0x00); it6505_set_bits(it6505, REG_AUDIO_SRC_CTRL, AUDIO_FIFO_RESET, AUDIO_FIFO_RESET); it6505_set_bits(it6505, REG_AUDIO_SRC_CTRL, AUDIO_FIFO_RESET, 0x00); it6505_set_bits(it6505, REG_RESET_CTRL, AUDIO_RESET, 0x00); regbe = it6505_read(it6505, REG_AUDIO_INPUT_FREQ); DRM_DEV_DEBUG_DRIVER(dev, "regbe:0x%02x audio input fs: %d.%d kHz", regbe, 6750 / regbe, (6750 % regbe) * 10 / regbe); it6505_set_bits(it6505, REG_DATA_MUTE_CTRL, EN_AUD_MUTE, 0x00); } static bool it6505_use_step_train_check(struct it6505 *it6505) { if (it6505->link.revision >= 0x12) return it6505->dpcd[DP_TRAINING_AUX_RD_INTERVAL] >= 0x01; return true; } static void it6505_parse_link_capabilities(struct it6505 *it6505) { struct device *dev = it6505->dev; struct it6505_drm_dp_link *link = &it6505->link; int bcaps; if (it6505->dpcd[0] == 0) { dev_err(dev, "DPCD is not initialized"); return; } memset(link, 0, sizeof(*link)); link->revision = it6505->dpcd[0]; link->rate = drm_dp_bw_code_to_link_rate(it6505->dpcd[1]); link->num_lanes = it6505->dpcd[2] & DP_MAX_LANE_COUNT_MASK; if (it6505->dpcd[2] & DP_ENHANCED_FRAME_CAP) link->capabilities = DP_ENHANCED_FRAME_CAP; DRM_DEV_DEBUG_DRIVER(dev, "DPCD Rev.: %d.%d", link->revision >> 4, link->revision & 0x0F); DRM_DEV_DEBUG_DRIVER(dev, "Sink max link rate: %d.%02d Gbps per lane", link->rate / 100000, link->rate / 1000 % 100); it6505->link_rate_bw_code = drm_dp_link_rate_to_bw_code(link->rate); DRM_DEV_DEBUG_DRIVER(dev, "link rate bw code:0x%02x", it6505->link_rate_bw_code); it6505->link_rate_bw_code = min_t(int, it6505->link_rate_bw_code, MAX_LINK_RATE); it6505->lane_count = link->num_lanes; DRM_DEV_DEBUG_DRIVER(dev, "Sink support %d lanes training", it6505->lane_count); it6505->lane_count = min_t(int, it6505->lane_count, it6505->max_lane_count); it6505->branch_device = drm_dp_is_branch(it6505->dpcd); DRM_DEV_DEBUG_DRIVER(dev, "Sink %sbranch device", it6505->branch_device ? "" : "Not "); it6505->enable_enhanced_frame = link->capabilities; DRM_DEV_DEBUG_DRIVER(dev, "Sink %sSupport Enhanced Framing", it6505->enable_enhanced_frame ? "" : "Not "); it6505->enable_ssc = (it6505->dpcd[DP_MAX_DOWNSPREAD] & DP_MAX_DOWNSPREAD_0_5); DRM_DEV_DEBUG_DRIVER(dev, "Maximum Down-Spread: %s, %ssupport SSC!", it6505->enable_ssc ? "0.5" : "0", it6505->enable_ssc ? "" : "Not "); it6505->step_train = it6505_use_step_train_check(it6505); if (it6505->step_train) DRM_DEV_DEBUG_DRIVER(dev, "auto train fail, will step train"); bcaps = it6505_dpcd_read(it6505, DP_AUX_HDCP_BCAPS); DRM_DEV_DEBUG_DRIVER(dev, "bcaps:0x%02x", bcaps); if (bcaps & DP_BCAPS_HDCP_CAPABLE) { it6505->is_repeater = (bcaps & DP_BCAPS_REPEATER_PRESENT); DRM_DEV_DEBUG_DRIVER(dev, "Support HDCP! Downstream is %s!", it6505->is_repeater ? "repeater" : "receiver"); } else { DRM_DEV_DEBUG_DRIVER(dev, "Sink not support HDCP!"); it6505->hdcp_desired = false; } DRM_DEV_DEBUG_DRIVER(dev, "HDCP %s", it6505->hdcp_desired ? "desired" : "undesired"); } static void it6505_setup_ssc(struct it6505 *it6505) { it6505_set_bits(it6505, REG_TRAIN_CTRL0, SPREAD_AMP_5, it6505->enable_ssc ? SPREAD_AMP_5 : 0x00); if (it6505->enable_ssc) { it6505_write(it6505, REG_SSC_CTRL0, 0x9E); it6505_write(it6505, REG_SSC_CTRL1, 0x1C); it6505_write(it6505, REG_SSC_CTRL2, 0x42); it6505_write(it6505, REG_SP_CTRL0, 0x07); it6505_write(it6505, REG_IP_CTRL1, 0x29); it6505_write(it6505, REG_IP_CTRL2, 0x03); /* Stamp Interrupt Step */ it6505_set_bits(it6505, REG_TIME_STMP_CTRL, M_STAMP_STEP, 0x10); it6505_dpcd_write(it6505, DP_DOWNSPREAD_CTRL, DP_SPREAD_AMP_0_5); } else { it6505_dpcd_write(it6505, DP_DOWNSPREAD_CTRL, 0x00); it6505_set_bits(it6505, REG_TIME_STMP_CTRL, M_STAMP_STEP, 0x00); } } static inline void it6505_link_rate_setup(struct it6505 *it6505) { it6505_set_bits(it6505, REG_TRAIN_CTRL0, FORCE_LBR, (it6505->link_rate_bw_code == RBR) ? FORCE_LBR : 0x00); it6505_set_bits(it6505, REG_LINK_DRV, DRV_HS, (it6505->link_rate_bw_code == RBR) ? 0x00 : DRV_HS); } static void it6505_lane_count_setup(struct it6505 *it6505) { it6505_get_extcon_property(it6505); it6505_set_bits(it6505, REG_TRAIN_CTRL0, LANE_SWAP, it6505->lane_swap ? LANE_SWAP : 0x00); it6505_set_bits(it6505, REG_TRAIN_CTRL0, LANE_COUNT_MASK, (it6505->lane_count - 1) << 1); } static void it6505_link_training_setup(struct it6505 *it6505) { struct device *dev = it6505->dev; if (it6505->enable_enhanced_frame) it6505_set_bits(it6505, REG_DATA_MUTE_CTRL, ENABLE_ENHANCED_FRAME, ENABLE_ENHANCED_FRAME); it6505_link_rate_setup(it6505); it6505_lane_count_setup(it6505); it6505_setup_ssc(it6505); DRM_DEV_DEBUG_DRIVER(dev, "%s, %d lanes, %sable ssc, %sable enhanced frame", it6505->link_rate_bw_code != RBR ? "HBR" : "RBR", it6505->lane_count, it6505->enable_ssc ? "en" : "dis", it6505->enable_enhanced_frame ? "en" : "dis"); } static bool it6505_link_start_auto_train(struct it6505 *it6505) { int timeout = 500, link_training_state; bool state = false; mutex_lock(&it6505->aux_lock); it6505_set_bits(it6505, REG_TRAIN_CTRL0, FORCE_CR_DONE | FORCE_EQ_DONE, 0x00); it6505_write(it6505, REG_TRAIN_CTRL1, FORCE_RETRAIN); it6505_write(it6505, REG_TRAIN_CTRL1, AUTO_TRAIN); while (timeout > 0) { usleep_range(1000, 2000); link_training_state = it6505_read(it6505, REG_LINK_TRAIN_STS); if (link_training_state > 0 && (link_training_state & LINK_STATE_NORP)) { state = true; goto unlock; } timeout--; } unlock: mutex_unlock(&it6505->aux_lock); return state; } static int it6505_drm_dp_link_configure(struct it6505 *it6505) { u8 values[2]; int err; struct drm_dp_aux *aux = &it6505->aux; values[0] = it6505->link_rate_bw_code; values[1] = it6505->lane_count; if (it6505->enable_enhanced_frame) values[1] |= DP_LANE_COUNT_ENHANCED_FRAME_EN; err = drm_dp_dpcd_write(aux, DP_LINK_BW_SET, values, sizeof(values)); if (err < 0) return err; return 0; } static bool it6505_check_voltage_swing_max(u8 lane_voltage_swing_pre_emphasis) { return ((lane_voltage_swing_pre_emphasis & 0x03) == MAX_CR_LEVEL); } static bool it6505_check_pre_emphasis_max(u8 lane_voltage_swing_pre_emphasis) { return ((lane_voltage_swing_pre_emphasis & 0x03) == MAX_EQ_LEVEL); } static bool it6505_check_max_voltage_swing_reached(u8 *lane_voltage_swing, u8 lane_count) { u8 i; for (i = 0; i < lane_count; i++) { if (lane_voltage_swing[i] & DP_TRAIN_MAX_SWING_REACHED) return true; } return false; } static bool step_train_lane_voltage_para_set(struct it6505 *it6505, struct it6505_step_train_para *lane_voltage_pre_emphasis, u8 *lane_voltage_pre_emphasis_set) { u8 *voltage_swing = lane_voltage_pre_emphasis->voltage_swing; u8 *pre_emphasis = lane_voltage_pre_emphasis->pre_emphasis; u8 i; for (i = 0; i < it6505->lane_count; i++) { voltage_swing[i] &= 0x03; lane_voltage_pre_emphasis_set[i] = voltage_swing[i]; if (it6505_check_voltage_swing_max(voltage_swing[i])) lane_voltage_pre_emphasis_set[i] |= DP_TRAIN_MAX_SWING_REACHED; pre_emphasis[i] &= 0x03; lane_voltage_pre_emphasis_set[i] |= pre_emphasis[i] << DP_TRAIN_PRE_EMPHASIS_SHIFT; if (it6505_check_pre_emphasis_max(pre_emphasis[i])) lane_voltage_pre_emphasis_set[i] |= DP_TRAIN_MAX_PRE_EMPHASIS_REACHED; it6505_dpcd_write(it6505, DP_TRAINING_LANE0_SET + i, lane_voltage_pre_emphasis_set[i]); if (lane_voltage_pre_emphasis_set[i] != it6505_dpcd_read(it6505, DP_TRAINING_LANE0_SET + i)) return false; } return true; } static bool it6505_step_cr_train(struct it6505 *it6505, struct it6505_step_train_para *lane_voltage_pre_emphasis) { u8 loop_count = 0, i = 0, j; u8 link_status[DP_LINK_STATUS_SIZE] = { 0 }; u8 lane_level_config[MAX_LANE_COUNT] = { 0 }; int pre_emphasis_adjust = -1, voltage_swing_adjust = -1; const struct drm_dp_aux *aux = &it6505->aux; it6505_dpcd_write(it6505, DP_DOWNSPREAD_CTRL, it6505->enable_ssc ? DP_SPREAD_AMP_0_5 : 0x00); it6505_dpcd_write(it6505, DP_TRAINING_PATTERN_SET, DP_TRAINING_PATTERN_1); while (loop_count < 5 && i < 10) { i++; if (!step_train_lane_voltage_para_set(it6505, lane_voltage_pre_emphasis, lane_level_config)) continue; drm_dp_link_train_clock_recovery_delay(aux, it6505->dpcd); drm_dp_dpcd_read_link_status(&it6505->aux, link_status); if (drm_dp_clock_recovery_ok(link_status, it6505->lane_count)) { it6505_set_bits(it6505, REG_TRAIN_CTRL0, FORCE_CR_DONE, FORCE_CR_DONE); return true; } DRM_DEV_DEBUG_DRIVER(it6505->dev, "cr not done"); if (it6505_check_max_voltage_swing_reached(lane_level_config, it6505->lane_count)) goto cr_train_fail; for (j = 0; j < it6505->lane_count; j++) { lane_voltage_pre_emphasis->voltage_swing[j] = drm_dp_get_adjust_request_voltage(link_status, j) >> DP_TRAIN_VOLTAGE_SWING_SHIFT; lane_voltage_pre_emphasis->pre_emphasis[j] = drm_dp_get_adjust_request_pre_emphasis(link_status, j) >> DP_TRAIN_PRE_EMPHASIS_SHIFT; if (voltage_swing_adjust == lane_voltage_pre_emphasis->voltage_swing[j] && pre_emphasis_adjust == lane_voltage_pre_emphasis->pre_emphasis[j]) { loop_count++; continue; } voltage_swing_adjust = lane_voltage_pre_emphasis->voltage_swing[j]; pre_emphasis_adjust = lane_voltage_pre_emphasis->pre_emphasis[j]; loop_count = 0; if (voltage_swing_adjust + pre_emphasis_adjust > MAX_EQ_LEVEL) lane_voltage_pre_emphasis->voltage_swing[j] = MAX_EQ_LEVEL - lane_voltage_pre_emphasis ->pre_emphasis[j]; } } cr_train_fail: it6505_dpcd_write(it6505, DP_TRAINING_PATTERN_SET, DP_TRAINING_PATTERN_DISABLE); return false; } static bool it6505_step_eq_train(struct it6505 *it6505, struct it6505_step_train_para *lane_voltage_pre_emphasis) { u8 loop_count = 0, i, link_status[DP_LINK_STATUS_SIZE] = { 0 }; u8 lane_level_config[MAX_LANE_COUNT] = { 0 }; const struct drm_dp_aux *aux = &it6505->aux; it6505_dpcd_write(it6505, DP_TRAINING_PATTERN_SET, DP_TRAINING_PATTERN_2); while (loop_count < 6) { loop_count++; if (!step_train_lane_voltage_para_set(it6505, lane_voltage_pre_emphasis, lane_level_config)) continue; drm_dp_link_train_channel_eq_delay(aux, it6505->dpcd); drm_dp_dpcd_read_link_status(&it6505->aux, link_status); if (!drm_dp_clock_recovery_ok(link_status, it6505->lane_count)) goto eq_train_fail; if (drm_dp_channel_eq_ok(link_status, it6505->lane_count)) { it6505_dpcd_write(it6505, DP_TRAINING_PATTERN_SET, DP_TRAINING_PATTERN_DISABLE); it6505_set_bits(it6505, REG_TRAIN_CTRL0, FORCE_EQ_DONE, FORCE_EQ_DONE); return true; } DRM_DEV_DEBUG_DRIVER(it6505->dev, "eq not done"); for (i = 0; i < it6505->lane_count; i++) { lane_voltage_pre_emphasis->voltage_swing[i] = drm_dp_get_adjust_request_voltage(link_status, i) >> DP_TRAIN_VOLTAGE_SWING_SHIFT; lane_voltage_pre_emphasis->pre_emphasis[i] = drm_dp_get_adjust_request_pre_emphasis(link_status, i) >> DP_TRAIN_PRE_EMPHASIS_SHIFT; if (lane_voltage_pre_emphasis->voltage_swing[i] + lane_voltage_pre_emphasis->pre_emphasis[i] > MAX_EQ_LEVEL) lane_voltage_pre_emphasis->voltage_swing[i] = 0x03 - lane_voltage_pre_emphasis ->pre_emphasis[i]; } } eq_train_fail: it6505_dpcd_write(it6505, DP_TRAINING_PATTERN_SET, DP_TRAINING_PATTERN_DISABLE); return false; } static bool it6505_link_start_step_train(struct it6505 *it6505) { int err; struct it6505_step_train_para lane_voltage_pre_emphasis = { .voltage_swing = { 0 }, .pre_emphasis = { 0 }, }; DRM_DEV_DEBUG_DRIVER(it6505->dev, "start"); err = it6505_drm_dp_link_configure(it6505); if (err < 0) return false; if (!it6505_step_cr_train(it6505, &lane_voltage_pre_emphasis)) return false; if (!it6505_step_eq_train(it6505, &lane_voltage_pre_emphasis)) return false; return true; } static bool it6505_get_video_status(struct it6505 *it6505) { int reg_0d; reg_0d = it6505_read(it6505, REG_SYSTEM_STS); if (reg_0d < 0) return false; return reg_0d & VIDEO_STB; } static void it6505_reset_hdcp(struct it6505 *it6505) { it6505->hdcp_status = HDCP_AUTH_IDLE; /* Disable CP_Desired */ it6505_set_bits(it6505, REG_HDCP_CTRL1, HDCP_CP_ENABLE, 0x00); it6505_set_bits(it6505, REG_RESET_CTRL, HDCP_RESET, HDCP_RESET); } static void it6505_start_hdcp(struct it6505 *it6505) { struct device *dev = it6505->dev; DRM_DEV_DEBUG_DRIVER(dev, "start"); it6505_reset_hdcp(it6505); queue_delayed_work(system_wq, &it6505->hdcp_work, msecs_to_jiffies(2400)); } static void it6505_stop_hdcp(struct it6505 *it6505) { it6505_reset_hdcp(it6505); cancel_delayed_work(&it6505->hdcp_work); } static bool it6505_hdcp_is_ksv_valid(u8 *ksv) { int i, ones = 0; /* KSV has 20 1's and 20 0's */ for (i = 0; i < DRM_HDCP_KSV_LEN; i++) ones += hweight8(ksv[i]); if (ones != 20) return false; return true; } static void it6505_hdcp_part1_auth(struct it6505 *it6505) { struct device *dev = it6505->dev; u8 hdcp_bcaps; it6505_set_bits(it6505, REG_RESET_CTRL, HDCP_RESET, 0x00); /* Disable CP_Desired */ it6505_set_bits(it6505, REG_HDCP_CTRL1, HDCP_CP_ENABLE, 0x00); usleep_range(1000, 1500); hdcp_bcaps = it6505_dpcd_read(it6505, DP_AUX_HDCP_BCAPS); DRM_DEV_DEBUG_DRIVER(dev, "DPCD[0x68028]: 0x%02x", hdcp_bcaps); if (!hdcp_bcaps) return; /* clear the repeater List Chk Done and fail bit */ it6505_set_bits(it6505, REG_HDCP_TRIGGER, HDCP_TRIGGER_KSV_DONE | HDCP_TRIGGER_KSV_FAIL, 0x00); /* Enable An Generator */ it6505_set_bits(it6505, REG_HDCP_CTRL2, HDCP_AN_GEN, HDCP_AN_GEN); /* delay1ms(10);*/ usleep_range(10000, 15000); /* Stop An Generator */ it6505_set_bits(it6505, REG_HDCP_CTRL2, HDCP_AN_GEN, 0x00); it6505_set_bits(it6505, REG_HDCP_CTRL1, HDCP_CP_ENABLE, HDCP_CP_ENABLE); it6505_set_bits(it6505, REG_HDCP_TRIGGER, HDCP_TRIGGER_START, HDCP_TRIGGER_START); it6505->hdcp_status = HDCP_AUTH_GOING; } static int it6505_sha1_digest(struct it6505 *it6505, u8 *sha1_input, unsigned int size, u8 *output_av) { struct shash_desc *desc; struct crypto_shash *tfm; int err; struct device *dev = it6505->dev; tfm = crypto_alloc_shash("sha1", 0, 0); if (IS_ERR(tfm)) { dev_err(dev, "crypto_alloc_shash sha1 failed"); return PTR_ERR(tfm); } desc = kzalloc(sizeof(*desc) + crypto_shash_descsize(tfm), GFP_KERNEL); if (!desc) { crypto_free_shash(tfm); return -ENOMEM; } desc->tfm = tfm; err = crypto_shash_digest(desc, sha1_input, size, output_av); if (err) dev_err(dev, "crypto_shash_digest sha1 failed"); crypto_free_shash(tfm); kfree(desc); return err; } static int it6505_setup_sha1_input(struct it6505 *it6505, u8 *sha1_input) { struct device *dev = it6505->dev; u8 binfo[2]; int down_stream_count, i, err, msg_count = 0; err = it6505_get_dpcd(it6505, DP_AUX_HDCP_BINFO, binfo, ARRAY_SIZE(binfo)); if (err < 0) { dev_err(dev, "Read binfo value Fail"); return err; } down_stream_count = binfo[0] & 0x7F; DRM_DEV_DEBUG_DRIVER(dev, "binfo:0x%*ph", (int)ARRAY_SIZE(binfo), binfo); if ((binfo[0] & BIT(7)) || (binfo[1] & BIT(3))) { dev_err(dev, "HDCP max cascade device exceed"); return 0; } if (!down_stream_count || down_stream_count > MAX_HDCP_DOWN_STREAM_COUNT) { dev_err(dev, "HDCP down stream count Error %d", down_stream_count); return 0; } for (i = 0; i < down_stream_count; i++) { err = it6505_get_dpcd(it6505, DP_AUX_HDCP_KSV_FIFO + (i % 3) * DRM_HDCP_KSV_LEN, sha1_input + msg_count, DRM_HDCP_KSV_LEN); if (err < 0) return err; msg_count += 5; } it6505->hdcp_down_stream_count = down_stream_count; sha1_input[msg_count++] = binfo[0]; sha1_input[msg_count++] = binfo[1]; it6505_set_bits(it6505, REG_HDCP_CTRL2, HDCP_EN_M0_READ, HDCP_EN_M0_READ); err = regmap_bulk_read(it6505->regmap, REG_M0_0_7, sha1_input + msg_count, 8); it6505_set_bits(it6505, REG_HDCP_CTRL2, HDCP_EN_M0_READ, 0x00); if (err < 0) { dev_err(dev, " Warning, Read M value Fail"); return err; } msg_count += 8; return msg_count; } static bool it6505_hdcp_part2_ksvlist_check(struct it6505 *it6505) { struct device *dev = it6505->dev; u8 av[5][4], bv[5][4]; int i, err; i = it6505_setup_sha1_input(it6505, it6505->sha1_input); if (i <= 0) { dev_err(dev, "SHA-1 Input length error %d", i); return false; } it6505_sha1_digest(it6505, it6505->sha1_input, i, (u8 *)av); err = it6505_get_dpcd(it6505, DP_AUX_HDCP_V_PRIME(0), (u8 *)bv, sizeof(bv)); if (err < 0) { dev_err(dev, "Read V' value Fail"); return false; } for (i = 0; i < 5; i++) if (bv[i][3] != av[i][0] || bv[i][2] != av[i][1] || bv[i][1] != av[i][2] || bv[i][0] != av[i][3]) return false; DRM_DEV_DEBUG_DRIVER(dev, "V' all match!!"); return true; } static void it6505_hdcp_wait_ksv_list(struct work_struct *work) { struct it6505 *it6505 = container_of(work, struct it6505, hdcp_wait_ksv_list); struct device *dev = it6505->dev; unsigned int timeout = 5000; u8 bstatus = 0; bool ksv_list_check; timeout /= 20; while (timeout > 0) { if (!it6505_get_sink_hpd_status(it6505)) return; bstatus = it6505_dpcd_read(it6505, DP_AUX_HDCP_BSTATUS); if (bstatus & DP_BSTATUS_READY) break; msleep(20); timeout--; } if (timeout == 0) { DRM_DEV_DEBUG_DRIVER(dev, "timeout and ksv list wait failed"); goto timeout; } ksv_list_check = it6505_hdcp_part2_ksvlist_check(it6505); DRM_DEV_DEBUG_DRIVER(dev, "ksv list ready, ksv list check %s", ksv_list_check ? "pass" : "fail"); if (ksv_list_check) { it6505_set_bits(it6505, REG_HDCP_TRIGGER, HDCP_TRIGGER_KSV_DONE, HDCP_TRIGGER_KSV_DONE); return; } timeout: it6505_set_bits(it6505, REG_HDCP_TRIGGER, HDCP_TRIGGER_KSV_DONE | HDCP_TRIGGER_KSV_FAIL, HDCP_TRIGGER_KSV_DONE | HDCP_TRIGGER_KSV_FAIL); } static void it6505_hdcp_work(struct work_struct *work) { struct it6505 *it6505 = container_of(work, struct it6505, hdcp_work.work); struct device *dev = it6505->dev; int ret; u8 link_status[DP_LINK_STATUS_SIZE] = { 0 }; DRM_DEV_DEBUG_DRIVER(dev, "start"); if (!it6505_get_sink_hpd_status(it6505)) return; ret = drm_dp_dpcd_read_link_status(&it6505->aux, link_status); DRM_DEV_DEBUG_DRIVER(dev, "ret: %d link_status: %*ph", ret, (int)sizeof(link_status), link_status); if (ret < 0 || !drm_dp_channel_eq_ok(link_status, it6505->lane_count) || !it6505_get_video_status(it6505)) { DRM_DEV_DEBUG_DRIVER(dev, "link train not done or no video"); return; } ret = it6505_get_dpcd(it6505, DP_AUX_HDCP_BKSV, it6505->bksvs, ARRAY_SIZE(it6505->bksvs)); if (ret < 0) { dev_err(dev, "fail to get bksv ret: %d", ret); it6505_set_bits(it6505, REG_HDCP_TRIGGER, HDCP_TRIGGER_KSV_FAIL, HDCP_TRIGGER_KSV_FAIL); } DRM_DEV_DEBUG_DRIVER(dev, "bksv = 0x%*ph", (int)ARRAY_SIZE(it6505->bksvs), it6505->bksvs); if (!it6505_hdcp_is_ksv_valid(it6505->bksvs)) { dev_err(dev, "Display Port bksv not valid"); it6505_set_bits(it6505, REG_HDCP_TRIGGER, HDCP_TRIGGER_KSV_FAIL, HDCP_TRIGGER_KSV_FAIL); } it6505_hdcp_part1_auth(it6505); } static void it6505_show_hdcp_info(struct it6505 *it6505) { struct device *dev = it6505->dev; int i; u8 *sha1 = it6505->sha1_input; DRM_DEV_DEBUG_DRIVER(dev, "hdcp_status: %d is_repeater: %d", it6505->hdcp_status, it6505->is_repeater); DRM_DEV_DEBUG_DRIVER(dev, "bksv = 0x%*ph", (int)ARRAY_SIZE(it6505->bksvs), it6505->bksvs); if (it6505->is_repeater) { DRM_DEV_DEBUG_DRIVER(dev, "hdcp_down_stream_count: %d", it6505->hdcp_down_stream_count); DRM_DEV_DEBUG_DRIVER(dev, "sha1_input: 0x%*ph", (int)ARRAY_SIZE(it6505->sha1_input), it6505->sha1_input); for (i = 0; i < it6505->hdcp_down_stream_count; i++) { DRM_DEV_DEBUG_DRIVER(dev, "KSV_%d = 0x%*ph", i, DRM_HDCP_KSV_LEN, sha1); sha1 += DRM_HDCP_KSV_LEN; } DRM_DEV_DEBUG_DRIVER(dev, "binfo: 0x%2ph M0: 0x%8ph", sha1, sha1 + 2); } } static void it6505_stop_link_train(struct it6505 *it6505) { it6505->link_state = LINK_IDLE; cancel_work_sync(&it6505->link_works); it6505_write(it6505, REG_TRAIN_CTRL1, FORCE_RETRAIN); } static void it6505_link_train_ok(struct it6505 *it6505) { struct device *dev = it6505->dev; it6505->link_state = LINK_OK; /* disalbe mute enable avi info frame */ it6505_set_bits(it6505, REG_DATA_MUTE_CTRL, EN_VID_MUTE, 0x00); it6505_set_bits(it6505, REG_INFOFRAME_CTRL, EN_VID_CTRL_PKT, EN_VID_CTRL_PKT); if (it6505_audio_input(it6505)) { DRM_DEV_DEBUG_DRIVER(dev, "Enable audio!"); it6505_enable_audio(it6505); } if (it6505->hdcp_desired) it6505_start_hdcp(it6505); } static void it6505_link_step_train_process(struct it6505 *it6505) { struct device *dev = it6505->dev; int ret, i, step_retry = 3; DRM_DEV_DEBUG_DRIVER(dev, "Start step train"); if (it6505->sink_count == 0) { DRM_DEV_DEBUG_DRIVER(dev, "it6505->sink_count:%d, force eq", it6505->sink_count); it6505_set_bits(it6505, REG_TRAIN_CTRL0, FORCE_EQ_DONE, FORCE_EQ_DONE); return; } if (!it6505->step_train) { DRM_DEV_DEBUG_DRIVER(dev, "not support step train"); return; } /* step training start here */ for (i = 0; i < step_retry; i++) { it6505_link_reset_step_train(it6505); ret = it6505_link_start_step_train(it6505); DRM_DEV_DEBUG_DRIVER(dev, "step train %s, retry:%d times", ret ? "pass" : "failed", i + 1); if (ret) { it6505_link_train_ok(it6505); return; } } DRM_DEV_DEBUG_DRIVER(dev, "training fail"); it6505->link_state = LINK_IDLE; it6505_video_reset(it6505); } static void it6505_link_training_work(struct work_struct *work) { struct it6505 *it6505 = container_of(work, struct it6505, link_works); struct device *dev = it6505->dev; int ret; DRM_DEV_DEBUG_DRIVER(dev, "it6505->sink_count: %d", it6505->sink_count); if (!it6505_get_sink_hpd_status(it6505)) return; it6505_link_training_setup(it6505); it6505_reset_hdcp(it6505); it6505_aux_reset(it6505); if (it6505->auto_train_retry < 1) { it6505_link_step_train_process(it6505); return; } ret = it6505_link_start_auto_train(it6505); DRM_DEV_DEBUG_DRIVER(dev, "auto train %s, auto_train_retry: %d", ret ? "pass" : "failed", it6505->auto_train_retry); it6505->auto_train_retry--; if (ret) { it6505_link_train_ok(it6505); return; } it6505_dump(it6505); } static void it6505_plugged_status_to_codec(struct it6505 *it6505) { enum drm_connector_status status = it6505->connector_status; if (it6505->plugged_cb && it6505->codec_dev) it6505->plugged_cb(it6505->codec_dev, status == connector_status_connected); } static void it6505_remove_edid(struct it6505 *it6505) { kfree(it6505->cached_edid); it6505->cached_edid = NULL; } static int it6505_process_hpd_irq(struct it6505 *it6505) { struct device *dev = it6505->dev; int ret, dpcd_sink_count, dp_irq_vector, bstatus; u8 link_status[DP_LINK_STATUS_SIZE]; if (!it6505_get_sink_hpd_status(it6505)) { DRM_DEV_DEBUG_DRIVER(dev, "HPD_IRQ HPD low"); it6505->sink_count = 0; return 0; } ret = it6505_dpcd_read(it6505, DP_SINK_COUNT); if (ret < 0) return ret; dpcd_sink_count = DP_GET_SINK_COUNT(ret); DRM_DEV_DEBUG_DRIVER(dev, "dpcd_sink_count: %d it6505->sink_count:%d", dpcd_sink_count, it6505->sink_count); if (it6505->branch_device && dpcd_sink_count != it6505->sink_count) { memset(it6505->dpcd, 0, sizeof(it6505->dpcd)); it6505->sink_count = dpcd_sink_count; it6505_reset_logic(it6505); it6505_int_mask_enable(it6505); it6505_init(it6505); it6505_remove_edid(it6505); return 0; } dp_irq_vector = it6505_dpcd_read(it6505, DP_DEVICE_SERVICE_IRQ_VECTOR); if (dp_irq_vector < 0) return dp_irq_vector; DRM_DEV_DEBUG_DRIVER(dev, "dp_irq_vector = 0x%02x", dp_irq_vector); if (dp_irq_vector & DP_CP_IRQ) { it6505_set_bits(it6505, REG_HDCP_TRIGGER, HDCP_TRIGGER_CPIRQ, HDCP_TRIGGER_CPIRQ); bstatus = it6505_dpcd_read(it6505, DP_AUX_HDCP_BSTATUS); if (bstatus < 0) return bstatus; DRM_DEV_DEBUG_DRIVER(dev, "Bstatus = 0x%02x", bstatus); } ret = drm_dp_dpcd_read_link_status(&it6505->aux, link_status); if (ret < 0) { dev_err(dev, "Fail to read link status ret: %d", ret); return ret; } DRM_DEV_DEBUG_DRIVER(dev, "link status = 0x%*ph", (int)ARRAY_SIZE(link_status), link_status); if (!drm_dp_channel_eq_ok(link_status, it6505->lane_count)) { it6505->auto_train_retry = AUTO_TRAIN_RETRY; it6505_video_reset(it6505); } return 0; } static void it6505_irq_hpd(struct it6505 *it6505) { struct device *dev = it6505->dev; int dp_sink_count; it6505->hpd_state = it6505_get_sink_hpd_status(it6505); DRM_DEV_DEBUG_DRIVER(dev, "hpd change interrupt, change to %s", it6505->hpd_state ? "high" : "low"); if (it6505->hpd_state) { wait_for_completion_timeout(&it6505->extcon_completion, msecs_to_jiffies(1000)); it6505_aux_on(it6505); if (it6505->dpcd[0] == 0) { it6505_get_dpcd(it6505, DP_DPCD_REV, it6505->dpcd, ARRAY_SIZE(it6505->dpcd)); it6505_variable_config(it6505); it6505_parse_link_capabilities(it6505); } it6505->auto_train_retry = AUTO_TRAIN_RETRY; it6505_drm_dp_link_set_power(&it6505->aux, &it6505->link, DP_SET_POWER_D0); dp_sink_count = it6505_dpcd_read(it6505, DP_SINK_COUNT); it6505->sink_count = DP_GET_SINK_COUNT(dp_sink_count); DRM_DEV_DEBUG_DRIVER(dev, "it6505->sink_count: %d", it6505->sink_count); it6505_lane_termination_on(it6505); it6505_lane_power_on(it6505); /* * for some dongle which issue HPD_irq * when sink count change from 0->1 * it6505 not able to receive HPD_IRQ * if HW never go into trainig done */ if (it6505->branch_device && it6505->sink_count == 0) schedule_work(&it6505->link_works); if (!it6505_get_video_status(it6505)) it6505_video_reset(it6505); } else { memset(it6505->dpcd, 0, sizeof(it6505->dpcd)); it6505_remove_edid(it6505); if (it6505->hdcp_desired) it6505_stop_hdcp(it6505); it6505_video_disable(it6505); it6505_disable_audio(it6505); it6505_stop_link_train(it6505); it6505_lane_off(it6505); it6505_link_reset_step_train(it6505); } if (it6505->bridge.dev) drm_helper_hpd_irq_event(it6505->bridge.dev); } static void it6505_irq_hpd_irq(struct it6505 *it6505) { struct device *dev = it6505->dev; DRM_DEV_DEBUG_DRIVER(dev, "hpd_irq interrupt"); if (it6505_process_hpd_irq(it6505) < 0) DRM_DEV_DEBUG_DRIVER(dev, "process hpd_irq fail!"); } static void it6505_irq_scdt(struct it6505 *it6505) { struct device *dev = it6505->dev; bool data; data = it6505_get_video_status(it6505); DRM_DEV_DEBUG_DRIVER(dev, "video stable change interrupt, %s", data ? "stable" : "unstable"); it6505_calc_video_info(it6505); it6505_link_reset_step_train(it6505); if (data) schedule_work(&it6505->link_works); } static void it6505_irq_hdcp_done(struct it6505 *it6505) { struct device *dev = it6505->dev; DRM_DEV_DEBUG_DRIVER(dev, "hdcp done interrupt"); it6505->hdcp_status = HDCP_AUTH_DONE; it6505_show_hdcp_info(it6505); } static void it6505_irq_hdcp_fail(struct it6505 *it6505) { struct device *dev = it6505->dev; DRM_DEV_DEBUG_DRIVER(dev, "hdcp fail interrupt"); it6505->hdcp_status = HDCP_AUTH_IDLE; it6505_show_hdcp_info(it6505); it6505_start_hdcp(it6505); } static void it6505_irq_aux_cmd_fail(struct it6505 *it6505) { struct device *dev = it6505->dev; DRM_DEV_DEBUG_DRIVER(dev, "AUX PC Request Fail Interrupt"); } static void it6505_irq_hdcp_ksv_check(struct it6505 *it6505) { struct device *dev = it6505->dev; DRM_DEV_DEBUG_DRIVER(dev, "HDCP event Interrupt"); schedule_work(&it6505->hdcp_wait_ksv_list); } static void it6505_irq_audio_fifo_error(struct it6505 *it6505) { struct device *dev = it6505->dev; DRM_DEV_DEBUG_DRIVER(dev, "audio fifo error Interrupt"); if (it6505_audio_input(it6505)) it6505_enable_audio(it6505); } static void it6505_irq_link_train_fail(struct it6505 *it6505) { struct device *dev = it6505->dev; DRM_DEV_DEBUG_DRIVER(dev, "link training fail interrupt"); schedule_work(&it6505->link_works); } static void it6505_irq_video_fifo_error(struct it6505 *it6505) { struct device *dev = it6505->dev; DRM_DEV_DEBUG_DRIVER(dev, "video fifo overflow interrupt"); it6505->auto_train_retry = AUTO_TRAIN_RETRY; flush_work(&it6505->link_works); it6505_stop_hdcp(it6505); it6505_video_reset(it6505); } static void it6505_irq_io_latch_fifo_overflow(struct it6505 *it6505) { struct device *dev = it6505->dev; DRM_DEV_DEBUG_DRIVER(dev, "IO latch fifo overflow interrupt"); it6505->auto_train_retry = AUTO_TRAIN_RETRY; flush_work(&it6505->link_works); it6505_stop_hdcp(it6505); it6505_video_reset(it6505); } static bool it6505_test_bit(unsigned int bit, const unsigned int *addr) { return 1 & (addr[bit / BITS_PER_BYTE] >> (bit % BITS_PER_BYTE)); } static irqreturn_t it6505_int_threaded_handler(int unused, void *data) { struct it6505 *it6505 = data; struct device *dev = it6505->dev; static const struct { int bit; void (*handler)(struct it6505 *it6505); } irq_vec[] = { { BIT_INT_HPD, it6505_irq_hpd }, { BIT_INT_HPD_IRQ, it6505_irq_hpd_irq }, { BIT_INT_SCDT, it6505_irq_scdt }, { BIT_INT_HDCP_FAIL, it6505_irq_hdcp_fail }, { BIT_INT_HDCP_DONE, it6505_irq_hdcp_done }, { BIT_INT_AUX_CMD_FAIL, it6505_irq_aux_cmd_fail }, { BIT_INT_HDCP_KSV_CHECK, it6505_irq_hdcp_ksv_check }, { BIT_INT_AUDIO_FIFO_ERROR, it6505_irq_audio_fifo_error }, { BIT_INT_LINK_TRAIN_FAIL, it6505_irq_link_train_fail }, { BIT_INT_VID_FIFO_ERROR, it6505_irq_video_fifo_error }, { BIT_INT_IO_FIFO_OVERFLOW, it6505_irq_io_latch_fifo_overflow }, }; int int_status[3], i; if (it6505->enable_drv_hold || !it6505->powered) return IRQ_HANDLED; pm_runtime_get_sync(dev); int_status[0] = it6505_read(it6505, INT_STATUS_01); int_status[1] = it6505_read(it6505, INT_STATUS_02); int_status[2] = it6505_read(it6505, INT_STATUS_03); it6505_write(it6505, INT_STATUS_01, int_status[0]); it6505_write(it6505, INT_STATUS_02, int_status[1]); it6505_write(it6505, INT_STATUS_03, int_status[2]); DRM_DEV_DEBUG_DRIVER(dev, "reg06 = 0x%02x", int_status[0]); DRM_DEV_DEBUG_DRIVER(dev, "reg07 = 0x%02x", int_status[1]); DRM_DEV_DEBUG_DRIVER(dev, "reg08 = 0x%02x", int_status[2]); it6505_debug_print(it6505, REG_SYSTEM_STS, ""); if (it6505_test_bit(irq_vec[0].bit, (unsigned int *)int_status)) irq_vec[0].handler(it6505); if (it6505->hpd_state) { for (i = 1; i < ARRAY_SIZE(irq_vec); i++) { if (it6505_test_bit(irq_vec[i].bit, (unsigned int *)int_status)) irq_vec[i].handler(it6505); } } pm_runtime_put_sync(dev); return IRQ_HANDLED; } static int it6505_poweron(struct it6505 *it6505) { struct device *dev = it6505->dev; struct it6505_platform_data *pdata = &it6505->pdata; int err; DRM_DEV_DEBUG_DRIVER(dev, "it6505 start powered on"); if (it6505->powered) { DRM_DEV_DEBUG_DRIVER(dev, "it6505 already powered on"); return 0; } if (pdata->pwr18) { err = regulator_enable(pdata->pwr18); if (err) { DRM_DEV_DEBUG_DRIVER(dev, "Failed to enable VDD18: %d", err); return err; } } if (pdata->ovdd) { /* time interval between IVDD and OVDD at least be 1ms */ usleep_range(1000, 2000); err = regulator_enable(pdata->ovdd); if (err) { regulator_disable(pdata->pwr18); return err; } } /* time interval between OVDD and SYSRSTN at least be 10ms */ if (pdata->gpiod_reset) { usleep_range(10000, 20000); gpiod_set_value_cansleep(pdata->gpiod_reset, 0); usleep_range(1000, 2000); gpiod_set_value_cansleep(pdata->gpiod_reset, 1); usleep_range(10000, 20000); } it6505->powered = true; it6505_reset_logic(it6505); it6505_int_mask_enable(it6505); it6505_init(it6505); it6505_lane_off(it6505); return 0; } static int it6505_poweroff(struct it6505 *it6505) { struct device *dev = it6505->dev; struct it6505_platform_data *pdata = &it6505->pdata; int err; DRM_DEV_DEBUG_DRIVER(dev, "it6505 start power off"); if (!it6505->powered) { DRM_DEV_DEBUG_DRIVER(dev, "power had been already off"); return 0; } if (pdata->gpiod_reset) gpiod_set_value_cansleep(pdata->gpiod_reset, 0); if (pdata->pwr18) { err = regulator_disable(pdata->pwr18); if (err) return err; } if (pdata->ovdd) { err = regulator_disable(pdata->ovdd); if (err) return err; } it6505->powered = false; it6505->sink_count = 0; return 0; } static enum drm_connector_status it6505_detect(struct it6505 *it6505) { struct device *dev = it6505->dev; enum drm_connector_status status = connector_status_disconnected; int dp_sink_count; DRM_DEV_DEBUG_DRIVER(dev, "it6505->sink_count:%d powered:%d", it6505->sink_count, it6505->powered); mutex_lock(&it6505->mode_lock); if (!it6505->powered) goto unlock; if (it6505->enable_drv_hold) { status = it6505->hpd_state ? connector_status_connected : connector_status_disconnected; goto unlock; } if (it6505->hpd_state) { it6505_drm_dp_link_set_power(&it6505->aux, &it6505->link, DP_SET_POWER_D0); dp_sink_count = it6505_dpcd_read(it6505, DP_SINK_COUNT); it6505->sink_count = DP_GET_SINK_COUNT(dp_sink_count); DRM_DEV_DEBUG_DRIVER(dev, "it6505->sink_count:%d branch:%d", it6505->sink_count, it6505->branch_device); if (it6505->branch_device) { status = (it6505->sink_count != 0) ? connector_status_connected : connector_status_disconnected; } else { status = connector_status_connected; } } else { it6505->sink_count = 0; memset(it6505->dpcd, 0, sizeof(it6505->dpcd)); } unlock: if (it6505->connector_status != status) { it6505->connector_status = status; it6505_plugged_status_to_codec(it6505); } mutex_unlock(&it6505->mode_lock); return status; } static int it6505_extcon_notifier(struct notifier_block *self, unsigned long event, void *ptr) { struct it6505 *it6505 = container_of(self, struct it6505, event_nb); schedule_work(&it6505->extcon_wq); return NOTIFY_DONE; } static void it6505_extcon_work(struct work_struct *work) { struct it6505 *it6505 = container_of(work, struct it6505, extcon_wq); struct device *dev = it6505->dev; int state, ret; if (it6505->enable_drv_hold) return; mutex_lock(&it6505->extcon_lock); state = extcon_get_state(it6505->extcon, EXTCON_DISP_DP); DRM_DEV_DEBUG_DRIVER(dev, "EXTCON_DISP_DP = 0x%02x", state); if (state == it6505->extcon_state || unlikely(state < 0)) goto unlock; it6505->extcon_state = state; if (state) { DRM_DEV_DEBUG_DRIVER(dev, "start to power on"); msleep(100); ret = pm_runtime_get_sync(dev); /* * On system resume, extcon_work can be triggered before * pm_runtime_force_resume re-enables runtime power management. * Handling the error here to make sure the bridge is powered on. */ if (ret < 0) it6505_poweron(it6505); complete_all(&it6505->extcon_completion); } else { DRM_DEV_DEBUG_DRIVER(dev, "start to power off"); pm_runtime_put_sync(dev); reinit_completion(&it6505->extcon_completion); drm_helper_hpd_irq_event(it6505->bridge.dev); memset(it6505->dpcd, 0, sizeof(it6505->dpcd)); DRM_DEV_DEBUG_DRIVER(dev, "power off it6505 success!"); } unlock: mutex_unlock(&it6505->extcon_lock); } static int it6505_use_notifier_module(struct it6505 *it6505) { int ret; struct device *dev = it6505->dev; it6505->event_nb.notifier_call = it6505_extcon_notifier; INIT_WORK(&it6505->extcon_wq, it6505_extcon_work); ret = devm_extcon_register_notifier(it6505->dev, it6505->extcon, EXTCON_DISP_DP, &it6505->event_nb); if (ret) { dev_err(dev, "failed to register notifier for DP"); return ret; } schedule_work(&it6505->extcon_wq); return 0; } static void it6505_remove_notifier_module(struct it6505 *it6505) { if (it6505->extcon) { devm_extcon_unregister_notifier(it6505->dev, it6505->extcon, EXTCON_DISP_DP, &it6505->event_nb); flush_work(&it6505->extcon_wq); } } static void __maybe_unused it6505_delayed_audio(struct work_struct *work) { struct it6505 *it6505 = container_of(work, struct it6505, delayed_audio.work); DRM_DEV_DEBUG_DRIVER(it6505->dev, "start"); if (!it6505->powered) return; if (!it6505->enable_drv_hold) it6505_enable_audio(it6505); } static int __maybe_unused it6505_audio_setup_hw_params(struct it6505 *it6505, struct hdmi_codec_params *params) { struct device *dev = it6505->dev; int i = 0; DRM_DEV_DEBUG_DRIVER(dev, "%s %d Hz, %d bit, %d channels\n", __func__, params->sample_rate, params->sample_width, params->cea.channels); if (!it6505->bridge.encoder) return -ENODEV; if (params->cea.channels <= 1 || params->cea.channels > 8) { DRM_DEV_DEBUG_DRIVER(dev, "channel number: %d not support", it6505->audio.channel_count); return -EINVAL; } it6505->audio.channel_count = params->cea.channels; while (i < ARRAY_SIZE(audio_sample_rate_map) && params->sample_rate != audio_sample_rate_map[i].sample_rate_value) { i++; } if (i == ARRAY_SIZE(audio_sample_rate_map)) { DRM_DEV_DEBUG_DRIVER(dev, "sample rate: %d Hz not support", params->sample_rate); return -EINVAL; } it6505->audio.sample_rate = audio_sample_rate_map[i].rate; switch (params->sample_width) { case 16: it6505->audio.word_length = WORD_LENGTH_16BIT; break; case 18: it6505->audio.word_length = WORD_LENGTH_18BIT; break; case 20: it6505->audio.word_length = WORD_LENGTH_20BIT; break; case 24: case 32: it6505->audio.word_length = WORD_LENGTH_24BIT; break; default: DRM_DEV_DEBUG_DRIVER(dev, "wordlength: %d bit not support", params->sample_width); return -EINVAL; } return 0; } static void __maybe_unused it6505_audio_shutdown(struct device *dev, void *data) { struct it6505 *it6505 = dev_get_drvdata(dev); if (it6505->powered) it6505_disable_audio(it6505); } static int __maybe_unused it6505_audio_hook_plugged_cb(struct device *dev, void *data, hdmi_codec_plugged_cb fn, struct device *codec_dev) { struct it6505 *it6505 = data; it6505->plugged_cb = fn; it6505->codec_dev = codec_dev; it6505_plugged_status_to_codec(it6505); return 0; } static inline struct it6505 *bridge_to_it6505(struct drm_bridge *bridge) { return container_of(bridge, struct it6505, bridge); } static int it6505_bridge_attach(struct drm_bridge *bridge, enum drm_bridge_attach_flags flags) { struct it6505 *it6505 = bridge_to_it6505(bridge); struct device *dev = it6505->dev; int ret; if (!(flags & DRM_BRIDGE_ATTACH_NO_CONNECTOR)) { DRM_ERROR("DRM_BRIDGE_ATTACH_NO_CONNECTOR must be supplied"); return -EINVAL; } if (!bridge->encoder) { dev_err(dev, "Parent encoder object not found"); return -ENODEV; } /* Register aux channel */ it6505->aux.drm_dev = bridge->dev; ret = drm_dp_aux_register(&it6505->aux); if (ret < 0) { dev_err(dev, "Failed to register aux: %d", ret); return ret; } if (it6505->extcon) { ret = it6505_use_notifier_module(it6505); if (ret < 0) { dev_err(dev, "use notifier module failed"); return ret; } } return 0; } static void it6505_bridge_detach(struct drm_bridge *bridge) { struct it6505 *it6505 = bridge_to_it6505(bridge); flush_work(&it6505->link_works); it6505_remove_notifier_module(it6505); } static enum drm_mode_status it6505_bridge_mode_valid(struct drm_bridge *bridge, const struct drm_display_info *info, const struct drm_display_mode *mode) { struct it6505 *it6505 = bridge_to_it6505(bridge); if (mode->flags & DRM_MODE_FLAG_INTERLACE) return MODE_NO_INTERLACE; if (mode->clock > it6505->max_dpi_pixel_clock) return MODE_CLOCK_HIGH; it6505->video_info.clock = mode->clock; return MODE_OK; } static void it6505_bridge_atomic_enable(struct drm_bridge *bridge, struct drm_bridge_state *old_state) { struct it6505 *it6505 = bridge_to_it6505(bridge); struct device *dev = it6505->dev; struct drm_atomic_state *state = old_state->base.state; struct hdmi_avi_infoframe frame; struct drm_crtc_state *crtc_state; struct drm_connector_state *conn_state; struct drm_display_mode *mode; struct drm_connector *connector; int ret; DRM_DEV_DEBUG_DRIVER(dev, "start"); connector = drm_atomic_get_new_connector_for_encoder(state, bridge->encoder); if (WARN_ON(!connector)) return; conn_state = drm_atomic_get_new_connector_state(state, connector); if (WARN_ON(!conn_state)) return; crtc_state = drm_atomic_get_new_crtc_state(state, conn_state->crtc); if (WARN_ON(!crtc_state)) return; mode = &crtc_state->adjusted_mode; if (WARN_ON(!mode)) return; ret = drm_hdmi_avi_infoframe_from_display_mode(&frame, connector, mode); if (ret) dev_err(dev, "Failed to setup AVI infoframe: %d", ret); it6505_update_video_parameter(it6505, mode); ret = it6505_send_video_infoframe(it6505, &frame); if (ret) dev_err(dev, "Failed to send AVI infoframe: %d", ret); it6505_int_mask_enable(it6505); it6505_video_reset(it6505); it6505_drm_dp_link_set_power(&it6505->aux, &it6505->link, DP_SET_POWER_D0); } static void it6505_bridge_atomic_disable(struct drm_bridge *bridge, struct drm_bridge_state *old_state) { struct it6505 *it6505 = bridge_to_it6505(bridge); struct device *dev = it6505->dev; DRM_DEV_DEBUG_DRIVER(dev, "start"); if (it6505->powered) { it6505_drm_dp_link_set_power(&it6505->aux, &it6505->link, DP_SET_POWER_D3); it6505_video_disable(it6505); } } static void it6505_bridge_atomic_pre_enable(struct drm_bridge *bridge, struct drm_bridge_state *old_state) { struct it6505 *it6505 = bridge_to_it6505(bridge); struct device *dev = it6505->dev; DRM_DEV_DEBUG_DRIVER(dev, "start"); pm_runtime_get_sync(dev); } static void it6505_bridge_atomic_post_disable(struct drm_bridge *bridge, struct drm_bridge_state *old_state) { struct it6505 *it6505 = bridge_to_it6505(bridge); struct device *dev = it6505->dev; DRM_DEV_DEBUG_DRIVER(dev, "start"); pm_runtime_put_sync(dev); } static enum drm_connector_status it6505_bridge_detect(struct drm_bridge *bridge) { struct it6505 *it6505 = bridge_to_it6505(bridge); return it6505_detect(it6505); } static struct edid *it6505_bridge_get_edid(struct drm_bridge *bridge, struct drm_connector *connector) { struct it6505 *it6505 = bridge_to_it6505(bridge); struct device *dev = it6505->dev; if (!it6505->cached_edid) { it6505->cached_edid = drm_do_get_edid(connector, it6505_get_edid_block, it6505); if (!it6505->cached_edid) { DRM_DEV_DEBUG_DRIVER(dev, "failed to get edid!"); return NULL; } } return drm_edid_duplicate(it6505->cached_edid); } static const struct drm_bridge_funcs it6505_bridge_funcs = { .atomic_duplicate_state = drm_atomic_helper_bridge_duplicate_state, .atomic_destroy_state = drm_atomic_helper_bridge_destroy_state, .atomic_reset = drm_atomic_helper_bridge_reset, .attach = it6505_bridge_attach, .detach = it6505_bridge_detach, .mode_valid = it6505_bridge_mode_valid, .atomic_enable = it6505_bridge_atomic_enable, .atomic_disable = it6505_bridge_atomic_disable, .atomic_pre_enable = it6505_bridge_atomic_pre_enable, .atomic_post_disable = it6505_bridge_atomic_post_disable, .detect = it6505_bridge_detect, .get_edid = it6505_bridge_get_edid, }; static __maybe_unused int it6505_bridge_resume(struct device *dev) { struct it6505 *it6505 = dev_get_drvdata(dev); return it6505_poweron(it6505); } static __maybe_unused int it6505_bridge_suspend(struct device *dev) { struct it6505 *it6505 = dev_get_drvdata(dev); return it6505_poweroff(it6505); } static const struct dev_pm_ops it6505_bridge_pm_ops = { SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend, pm_runtime_force_resume) SET_RUNTIME_PM_OPS(it6505_bridge_suspend, it6505_bridge_resume, NULL) }; static int it6505_init_pdata(struct it6505 *it6505) { struct it6505_platform_data *pdata = &it6505->pdata; struct device *dev = it6505->dev; /* 1.0V digital core power regulator */ pdata->pwr18 = devm_regulator_get(dev, "pwr18"); if (IS_ERR(pdata->pwr18)) { dev_err(dev, "pwr18 regulator not found"); return PTR_ERR(pdata->pwr18); } pdata->ovdd = devm_regulator_get(dev, "ovdd"); if (IS_ERR(pdata->ovdd)) { dev_err(dev, "ovdd regulator not found"); return PTR_ERR(pdata->ovdd); } pdata->gpiod_reset = devm_gpiod_get(dev, "reset", GPIOD_OUT_LOW); if (IS_ERR(pdata->gpiod_reset)) { dev_err(dev, "gpiod_reset gpio not found"); return PTR_ERR(pdata->gpiod_reset); } return 0; } static int it6505_get_data_lanes_count(const struct device_node *endpoint, const unsigned int min, const unsigned int max) { int ret; ret = of_property_count_u32_elems(endpoint, "data-lanes"); if (ret < 0) return ret; if (ret < min || ret > max) return -EINVAL; return ret; } static void it6505_parse_dt(struct it6505 *it6505) { struct device *dev = it6505->dev; struct device_node *np = dev->of_node, *ep = NULL; int len; u64 link_frequencies; u32 data_lanes[4]; u32 *afe_setting = &it6505->afe_setting; u32 *max_lane_count = &it6505->max_lane_count; u32 *max_dpi_pixel_clock = &it6505->max_dpi_pixel_clock; it6505->lane_swap_disabled = device_property_read_bool(dev, "no-laneswap"); if (it6505->lane_swap_disabled) it6505->lane_swap = false; if (device_property_read_u32(dev, "afe-setting", afe_setting) == 0) { if (*afe_setting >= ARRAY_SIZE(afe_setting_table)) { dev_err(dev, "afe setting error, use default"); *afe_setting = 0; } } else { *afe_setting = 0; } ep = of_graph_get_endpoint_by_regs(np, 1, 0); of_node_put(ep); if (ep) { len = it6505_get_data_lanes_count(ep, 1, 4); if (len > 0 && len != 3) { of_property_read_u32_array(ep, "data-lanes", data_lanes, len); *max_lane_count = len; } else { *max_lane_count = MAX_LANE_COUNT; dev_err(dev, "error data-lanes, use default"); } } else { *max_lane_count = MAX_LANE_COUNT; dev_err(dev, "error endpoint, use default"); } ep = of_graph_get_endpoint_by_regs(np, 0, 0); of_node_put(ep); if (ep) { len = of_property_read_variable_u64_array(ep, "link-frequencies", &link_frequencies, 0, 1); if (len >= 0) { do_div(link_frequencies, 1000); if (link_frequencies > 297000) { dev_err(dev, "max pixel clock error, use default"); *max_dpi_pixel_clock = DPI_PIXEL_CLK_MAX; } else { *max_dpi_pixel_clock = link_frequencies; } } else { dev_err(dev, "error link frequencies, use default"); *max_dpi_pixel_clock = DPI_PIXEL_CLK_MAX; } } else { dev_err(dev, "error endpoint, use default"); *max_dpi_pixel_clock = DPI_PIXEL_CLK_MAX; } DRM_DEV_DEBUG_DRIVER(dev, "using afe_setting: %u, max_lane_count: %u", it6505->afe_setting, it6505->max_lane_count); DRM_DEV_DEBUG_DRIVER(dev, "using max_dpi_pixel_clock: %u kHz", it6505->max_dpi_pixel_clock); } static ssize_t receive_timing_debugfs_show(struct file *file, char __user *buf, size_t len, loff_t *ppos) { struct it6505 *it6505 = file->private_data; struct drm_display_mode *vid; u8 read_buf[READ_BUFFER_SIZE]; u8 *str = read_buf, *end = read_buf + READ_BUFFER_SIZE; ssize_t ret, count; if (!it6505) return -ENODEV; it6505_calc_video_info(it6505); vid = &it6505->video_info; str += scnprintf(str, end - str, "---video timing---\n"); str += scnprintf(str, end - str, "PCLK:%d.%03dMHz\n", vid->clock / 1000, vid->clock % 1000); str += scnprintf(str, end - str, "HTotal:%d\n", vid->htotal); str += scnprintf(str, end - str, "HActive:%d\n", vid->hdisplay); str += scnprintf(str, end - str, "HFrontPorch:%d\n", vid->hsync_start - vid->hdisplay); str += scnprintf(str, end - str, "HSyncWidth:%d\n", vid->hsync_end - vid->hsync_start); str += scnprintf(str, end - str, "HBackPorch:%d\n", vid->htotal - vid->hsync_end); str += scnprintf(str, end - str, "VTotal:%d\n", vid->vtotal); str += scnprintf(str, end - str, "VActive:%d\n", vid->vdisplay); str += scnprintf(str, end - str, "VFrontPorch:%d\n", vid->vsync_start - vid->vdisplay); str += scnprintf(str, end - str, "VSyncWidth:%d\n", vid->vsync_end - vid->vsync_start); str += scnprintf(str, end - str, "VBackPorch:%d\n", vid->vtotal - vid->vsync_end); count = str - read_buf; ret = simple_read_from_buffer(buf, len, ppos, read_buf, count); return ret; } static int force_power_on_off_debugfs_write(void *data, u64 value) { struct it6505 *it6505 = data; if (!it6505) return -ENODEV; if (value) it6505_poweron(it6505); else it6505_poweroff(it6505); return 0; } static int enable_drv_hold_debugfs_show(void *data, u64 *buf) { struct it6505 *it6505 = data; if (!it6505) return -ENODEV; *buf = it6505->enable_drv_hold; return 0; } static int enable_drv_hold_debugfs_write(void *data, u64 drv_hold) { struct it6505 *it6505 = data; if (!it6505) return -ENODEV; it6505->enable_drv_hold = drv_hold; if (it6505->enable_drv_hold) { it6505_int_mask_disable(it6505); } else { it6505_clear_int(it6505); it6505_int_mask_enable(it6505); if (it6505->powered) { it6505->connector_status = it6505_get_sink_hpd_status(it6505) ? connector_status_connected : connector_status_disconnected; } else { it6505->connector_status = connector_status_disconnected; } } return 0; } static const struct file_operations receive_timing_fops = { .owner = THIS_MODULE, .open = simple_open, .read = receive_timing_debugfs_show, .llseek = default_llseek, }; DEFINE_DEBUGFS_ATTRIBUTE(fops_force_power, NULL, force_power_on_off_debugfs_write, "%llu\n"); DEFINE_DEBUGFS_ATTRIBUTE(fops_enable_drv_hold, enable_drv_hold_debugfs_show, enable_drv_hold_debugfs_write, "%llu\n"); static const struct debugfs_entries debugfs_entry[] = { { "receive_timing", &receive_timing_fops }, { "force_power_on_off", &fops_force_power }, { "enable_drv_hold", &fops_enable_drv_hold }, { NULL, NULL }, }; static void debugfs_create_files(struct it6505 *it6505) { int i = 0; while (debugfs_entry[i].name && debugfs_entry[i].fops) { debugfs_create_file(debugfs_entry[i].name, 0644, it6505->debugfs, it6505, debugfs_entry[i].fops); i++; } } static void debugfs_init(struct it6505 *it6505) { struct device *dev = it6505->dev; it6505->debugfs = debugfs_create_dir(DEBUGFS_DIR_NAME, NULL); if (IS_ERR(it6505->debugfs)) { dev_err(dev, "failed to create debugfs root"); return; } debugfs_create_files(it6505); } static void it6505_debugfs_remove(struct it6505 *it6505) { debugfs_remove_recursive(it6505->debugfs); } static void it6505_shutdown(struct i2c_client *client) { struct it6505 *it6505 = dev_get_drvdata(&client->dev); if (it6505->powered) it6505_lane_off(it6505); } static int it6505_i2c_probe(struct i2c_client *client) { struct it6505 *it6505; struct device *dev = &client->dev; struct extcon_dev *extcon; int err, intp_irq; it6505 = devm_kzalloc(&client->dev, sizeof(*it6505), GFP_KERNEL); if (!it6505) return -ENOMEM; mutex_init(&it6505->extcon_lock); mutex_init(&it6505->mode_lock); mutex_init(&it6505->aux_lock); it6505->bridge.of_node = client->dev.of_node; it6505->connector_status = connector_status_disconnected; it6505->dev = &client->dev; i2c_set_clientdata(client, it6505); /* get extcon device from DTS */ extcon = extcon_get_edev_by_phandle(dev, 0); if (PTR_ERR(extcon) == -EPROBE_DEFER) return -EPROBE_DEFER; if (IS_ERR(extcon)) { dev_err(dev, "can not get extcon device!"); return PTR_ERR(extcon); } it6505->extcon = extcon; it6505->regmap = devm_regmap_init_i2c(client, &it6505_regmap_config); if (IS_ERR(it6505->regmap)) { dev_err(dev, "regmap i2c init failed"); err = PTR_ERR(it6505->regmap); return err; } err = it6505_init_pdata(it6505); if (err) { dev_err(dev, "Failed to initialize pdata: %d", err); return err; } it6505_parse_dt(it6505); intp_irq = client->irq; if (!intp_irq) { dev_err(dev, "Failed to get INTP IRQ"); err = -ENODEV; return err; } err = devm_request_threaded_irq(&client->dev, intp_irq, NULL, it6505_int_threaded_handler, IRQF_TRIGGER_LOW | IRQF_ONESHOT, "it6505-intp", it6505); if (err) { dev_err(dev, "Failed to request INTP threaded IRQ: %d", err); return err; } INIT_WORK(&it6505->link_works, it6505_link_training_work); INIT_WORK(&it6505->hdcp_wait_ksv_list, it6505_hdcp_wait_ksv_list); INIT_DELAYED_WORK(&it6505->hdcp_work, it6505_hdcp_work); init_completion(&it6505->extcon_completion); memset(it6505->dpcd, 0, sizeof(it6505->dpcd)); it6505->powered = false; it6505->enable_drv_hold = DEFAULT_DRV_HOLD; if (DEFAULT_PWR_ON) it6505_poweron(it6505); DRM_DEV_DEBUG_DRIVER(dev, "it6505 device name: %s", dev_name(dev)); debugfs_init(it6505); pm_runtime_enable(dev); it6505->aux.name = "DP-AUX"; it6505->aux.dev = dev; it6505->aux.transfer = it6505_aux_transfer; drm_dp_aux_init(&it6505->aux); it6505->bridge.funcs = &it6505_bridge_funcs; it6505->bridge.type = DRM_MODE_CONNECTOR_DisplayPort; it6505->bridge.ops = DRM_BRIDGE_OP_DETECT | DRM_BRIDGE_OP_EDID | DRM_BRIDGE_OP_HPD; drm_bridge_add(&it6505->bridge); return 0; } static void it6505_i2c_remove(struct i2c_client *client) { struct it6505 *it6505 = i2c_get_clientdata(client); drm_bridge_remove(&it6505->bridge); drm_dp_aux_unregister(&it6505->aux); it6505_debugfs_remove(it6505); it6505_poweroff(it6505); it6505_remove_edid(it6505); } static const struct i2c_device_id it6505_id[] = { { "it6505", 0 }, { } }; MODULE_DEVICE_TABLE(i2c, it6505_id); static const struct of_device_id it6505_of_match[] = { { .compatible = "ite,it6505" }, { } }; static struct i2c_driver it6505_i2c_driver = { .driver = { .name = "it6505", .of_match_table = it6505_of_match, .pm = &it6505_bridge_pm_ops, }, .probe = it6505_i2c_probe, .remove = it6505_i2c_remove, .shutdown = it6505_shutdown, .id_table = it6505_id, }; module_i2c_driver(it6505_i2c_driver); MODULE_AUTHOR("Allen Chen <[email protected]>"); MODULE_DESCRIPTION("IT6505 DisplayPort Transmitter driver"); MODULE_LICENSE("GPL v2");
linux-master
drivers/gpu/drm/bridge/ite-it6505.c
// SPDX-License-Identifier: GPL-2.0 /* * TI SN65DSI83,84,85 driver * * Currently supported: * - SN65DSI83 * = 1x Single-link DSI ~ 1x Single-link LVDS * - Supported * - Single-link LVDS mode tested * - SN65DSI84 * = 1x Single-link DSI ~ 2x Single-link or 1x Dual-link LVDS * - Supported * - Dual-link LVDS mode tested * - 2x Single-link LVDS mode unsupported * (should be easy to add by someone who has the HW) * - SN65DSI85 * = 2x Single-link or 1x Dual-link DSI ~ 2x Single-link or 1x Dual-link LVDS * - Unsupported * (should be easy to add by someone who has the HW) * * Copyright (C) 2021 Marek Vasut <[email protected]> * * Based on previous work of: * Valentin Raevsky <[email protected]> * Philippe Schenker <[email protected]> */ #include <linux/bits.h> #include <linux/clk.h> #include <linux/gpio/consumer.h> #include <linux/i2c.h> #include <linux/media-bus-format.h> #include <linux/module.h> #include <linux/of.h> #include <linux/of_graph.h> #include <linux/regmap.h> #include <linux/regulator/consumer.h> #include <drm/drm_atomic_helper.h> #include <drm/drm_bridge.h> #include <drm/drm_mipi_dsi.h> #include <drm/drm_of.h> #include <drm/drm_panel.h> #include <drm/drm_print.h> #include <drm/drm_probe_helper.h> /* ID registers */ #define REG_ID(n) (0x00 + (n)) /* Reset and clock registers */ #define REG_RC_RESET 0x09 #define REG_RC_RESET_SOFT_RESET BIT(0) #define REG_RC_LVDS_PLL 0x0a #define REG_RC_LVDS_PLL_PLL_EN_STAT BIT(7) #define REG_RC_LVDS_PLL_LVDS_CLK_RANGE(n) (((n) & 0x7) << 1) #define REG_RC_LVDS_PLL_HS_CLK_SRC_DPHY BIT(0) #define REG_RC_DSI_CLK 0x0b #define REG_RC_DSI_CLK_DSI_CLK_DIVIDER(n) (((n) & 0x1f) << 3) #define REG_RC_DSI_CLK_REFCLK_MULTIPLIER(n) ((n) & 0x3) #define REG_RC_PLL_EN 0x0d #define REG_RC_PLL_EN_PLL_EN BIT(0) /* DSI registers */ #define REG_DSI_LANE 0x10 #define REG_DSI_LANE_LEFT_RIGHT_PIXELS BIT(7) /* DSI85-only */ #define REG_DSI_LANE_DSI_CHANNEL_MODE_DUAL 0 /* DSI85-only */ #define REG_DSI_LANE_DSI_CHANNEL_MODE_2SINGLE BIT(6) /* DSI85-only */ #define REG_DSI_LANE_DSI_CHANNEL_MODE_SINGLE BIT(5) #define REG_DSI_LANE_CHA_DSI_LANES(n) (((n) & 0x3) << 3) #define REG_DSI_LANE_CHB_DSI_LANES(n) (((n) & 0x3) << 1) #define REG_DSI_LANE_SOT_ERR_TOL_DIS BIT(0) #define REG_DSI_EQ 0x11 #define REG_DSI_EQ_CHA_DSI_DATA_EQ(n) (((n) & 0x3) << 6) #define REG_DSI_EQ_CHA_DSI_CLK_EQ(n) (((n) & 0x3) << 2) #define REG_DSI_CLK 0x12 #define REG_DSI_CLK_CHA_DSI_CLK_RANGE(n) ((n) & 0xff) /* LVDS registers */ #define REG_LVDS_FMT 0x18 #define REG_LVDS_FMT_DE_NEG_POLARITY BIT(7) #define REG_LVDS_FMT_HS_NEG_POLARITY BIT(6) #define REG_LVDS_FMT_VS_NEG_POLARITY BIT(5) #define REG_LVDS_FMT_LVDS_LINK_CFG BIT(4) /* 0:AB 1:A-only */ #define REG_LVDS_FMT_CHA_24BPP_MODE BIT(3) #define REG_LVDS_FMT_CHB_24BPP_MODE BIT(2) #define REG_LVDS_FMT_CHA_24BPP_FORMAT1 BIT(1) #define REG_LVDS_FMT_CHB_24BPP_FORMAT1 BIT(0) #define REG_LVDS_VCOM 0x19 #define REG_LVDS_VCOM_CHA_LVDS_VOCM BIT(6) #define REG_LVDS_VCOM_CHB_LVDS_VOCM BIT(4) #define REG_LVDS_VCOM_CHA_LVDS_VOD_SWING(n) (((n) & 0x3) << 2) #define REG_LVDS_VCOM_CHB_LVDS_VOD_SWING(n) ((n) & 0x3) #define REG_LVDS_LANE 0x1a #define REG_LVDS_LANE_EVEN_ODD_SWAP BIT(6) #define REG_LVDS_LANE_CHA_REVERSE_LVDS BIT(5) #define REG_LVDS_LANE_CHB_REVERSE_LVDS BIT(4) #define REG_LVDS_LANE_CHA_LVDS_TERM BIT(1) #define REG_LVDS_LANE_CHB_LVDS_TERM BIT(0) #define REG_LVDS_CM 0x1b #define REG_LVDS_CM_CHA_LVDS_CM_ADJUST(n) (((n) & 0x3) << 4) #define REG_LVDS_CM_CHB_LVDS_CM_ADJUST(n) ((n) & 0x3) /* Video registers */ #define REG_VID_CHA_ACTIVE_LINE_LENGTH_LOW 0x20 #define REG_VID_CHA_ACTIVE_LINE_LENGTH_HIGH 0x21 #define REG_VID_CHA_VERTICAL_DISPLAY_SIZE_LOW 0x24 #define REG_VID_CHA_VERTICAL_DISPLAY_SIZE_HIGH 0x25 #define REG_VID_CHA_SYNC_DELAY_LOW 0x28 #define REG_VID_CHA_SYNC_DELAY_HIGH 0x29 #define REG_VID_CHA_HSYNC_PULSE_WIDTH_LOW 0x2c #define REG_VID_CHA_HSYNC_PULSE_WIDTH_HIGH 0x2d #define REG_VID_CHA_VSYNC_PULSE_WIDTH_LOW 0x30 #define REG_VID_CHA_VSYNC_PULSE_WIDTH_HIGH 0x31 #define REG_VID_CHA_HORIZONTAL_BACK_PORCH 0x34 #define REG_VID_CHA_VERTICAL_BACK_PORCH 0x36 #define REG_VID_CHA_HORIZONTAL_FRONT_PORCH 0x38 #define REG_VID_CHA_VERTICAL_FRONT_PORCH 0x3a #define REG_VID_CHA_TEST_PATTERN 0x3c /* IRQ registers */ #define REG_IRQ_GLOBAL 0xe0 #define REG_IRQ_GLOBAL_IRQ_EN BIT(0) #define REG_IRQ_EN 0xe1 #define REG_IRQ_EN_CHA_SYNCH_ERR_EN BIT(7) #define REG_IRQ_EN_CHA_CRC_ERR_EN BIT(6) #define REG_IRQ_EN_CHA_UNC_ECC_ERR_EN BIT(5) #define REG_IRQ_EN_CHA_COR_ECC_ERR_EN BIT(4) #define REG_IRQ_EN_CHA_LLP_ERR_EN BIT(3) #define REG_IRQ_EN_CHA_SOT_BIT_ERR_EN BIT(2) #define REG_IRQ_EN_CHA_PLL_UNLOCK_EN BIT(0) #define REG_IRQ_STAT 0xe5 #define REG_IRQ_STAT_CHA_SYNCH_ERR BIT(7) #define REG_IRQ_STAT_CHA_CRC_ERR BIT(6) #define REG_IRQ_STAT_CHA_UNC_ECC_ERR BIT(5) #define REG_IRQ_STAT_CHA_COR_ECC_ERR BIT(4) #define REG_IRQ_STAT_CHA_LLP_ERR BIT(3) #define REG_IRQ_STAT_CHA_SOT_BIT_ERR BIT(2) #define REG_IRQ_STAT_CHA_PLL_UNLOCK BIT(0) enum sn65dsi83_model { MODEL_SN65DSI83, MODEL_SN65DSI84, }; struct sn65dsi83 { struct drm_bridge bridge; struct device *dev; struct regmap *regmap; struct mipi_dsi_device *dsi; struct drm_bridge *panel_bridge; struct gpio_desc *enable_gpio; struct regulator *vcc; bool lvds_dual_link; bool lvds_dual_link_even_odd_swap; }; static const struct regmap_range sn65dsi83_readable_ranges[] = { regmap_reg_range(REG_ID(0), REG_ID(8)), regmap_reg_range(REG_RC_LVDS_PLL, REG_RC_DSI_CLK), regmap_reg_range(REG_RC_PLL_EN, REG_RC_PLL_EN), regmap_reg_range(REG_DSI_LANE, REG_DSI_CLK), regmap_reg_range(REG_LVDS_FMT, REG_LVDS_CM), regmap_reg_range(REG_VID_CHA_ACTIVE_LINE_LENGTH_LOW, REG_VID_CHA_ACTIVE_LINE_LENGTH_HIGH), regmap_reg_range(REG_VID_CHA_VERTICAL_DISPLAY_SIZE_LOW, REG_VID_CHA_VERTICAL_DISPLAY_SIZE_HIGH), regmap_reg_range(REG_VID_CHA_SYNC_DELAY_LOW, REG_VID_CHA_SYNC_DELAY_HIGH), regmap_reg_range(REG_VID_CHA_HSYNC_PULSE_WIDTH_LOW, REG_VID_CHA_HSYNC_PULSE_WIDTH_HIGH), regmap_reg_range(REG_VID_CHA_VSYNC_PULSE_WIDTH_LOW, REG_VID_CHA_VSYNC_PULSE_WIDTH_HIGH), regmap_reg_range(REG_VID_CHA_HORIZONTAL_BACK_PORCH, REG_VID_CHA_HORIZONTAL_BACK_PORCH), regmap_reg_range(REG_VID_CHA_VERTICAL_BACK_PORCH, REG_VID_CHA_VERTICAL_BACK_PORCH), regmap_reg_range(REG_VID_CHA_HORIZONTAL_FRONT_PORCH, REG_VID_CHA_HORIZONTAL_FRONT_PORCH), regmap_reg_range(REG_VID_CHA_VERTICAL_FRONT_PORCH, REG_VID_CHA_VERTICAL_FRONT_PORCH), regmap_reg_range(REG_VID_CHA_TEST_PATTERN, REG_VID_CHA_TEST_PATTERN), regmap_reg_range(REG_IRQ_GLOBAL, REG_IRQ_EN), regmap_reg_range(REG_IRQ_STAT, REG_IRQ_STAT), }; static const struct regmap_access_table sn65dsi83_readable_table = { .yes_ranges = sn65dsi83_readable_ranges, .n_yes_ranges = ARRAY_SIZE(sn65dsi83_readable_ranges), }; static const struct regmap_range sn65dsi83_writeable_ranges[] = { regmap_reg_range(REG_RC_RESET, REG_RC_DSI_CLK), regmap_reg_range(REG_RC_PLL_EN, REG_RC_PLL_EN), regmap_reg_range(REG_DSI_LANE, REG_DSI_CLK), regmap_reg_range(REG_LVDS_FMT, REG_LVDS_CM), regmap_reg_range(REG_VID_CHA_ACTIVE_LINE_LENGTH_LOW, REG_VID_CHA_ACTIVE_LINE_LENGTH_HIGH), regmap_reg_range(REG_VID_CHA_VERTICAL_DISPLAY_SIZE_LOW, REG_VID_CHA_VERTICAL_DISPLAY_SIZE_HIGH), regmap_reg_range(REG_VID_CHA_SYNC_DELAY_LOW, REG_VID_CHA_SYNC_DELAY_HIGH), regmap_reg_range(REG_VID_CHA_HSYNC_PULSE_WIDTH_LOW, REG_VID_CHA_HSYNC_PULSE_WIDTH_HIGH), regmap_reg_range(REG_VID_CHA_VSYNC_PULSE_WIDTH_LOW, REG_VID_CHA_VSYNC_PULSE_WIDTH_HIGH), regmap_reg_range(REG_VID_CHA_HORIZONTAL_BACK_PORCH, REG_VID_CHA_HORIZONTAL_BACK_PORCH), regmap_reg_range(REG_VID_CHA_VERTICAL_BACK_PORCH, REG_VID_CHA_VERTICAL_BACK_PORCH), regmap_reg_range(REG_VID_CHA_HORIZONTAL_FRONT_PORCH, REG_VID_CHA_HORIZONTAL_FRONT_PORCH), regmap_reg_range(REG_VID_CHA_VERTICAL_FRONT_PORCH, REG_VID_CHA_VERTICAL_FRONT_PORCH), regmap_reg_range(REG_VID_CHA_TEST_PATTERN, REG_VID_CHA_TEST_PATTERN), regmap_reg_range(REG_IRQ_GLOBAL, REG_IRQ_EN), regmap_reg_range(REG_IRQ_STAT, REG_IRQ_STAT), }; static const struct regmap_access_table sn65dsi83_writeable_table = { .yes_ranges = sn65dsi83_writeable_ranges, .n_yes_ranges = ARRAY_SIZE(sn65dsi83_writeable_ranges), }; static const struct regmap_range sn65dsi83_volatile_ranges[] = { regmap_reg_range(REG_RC_RESET, REG_RC_RESET), regmap_reg_range(REG_RC_LVDS_PLL, REG_RC_LVDS_PLL), regmap_reg_range(REG_IRQ_STAT, REG_IRQ_STAT), }; static const struct regmap_access_table sn65dsi83_volatile_table = { .yes_ranges = sn65dsi83_volatile_ranges, .n_yes_ranges = ARRAY_SIZE(sn65dsi83_volatile_ranges), }; static const struct regmap_config sn65dsi83_regmap_config = { .reg_bits = 8, .val_bits = 8, .rd_table = &sn65dsi83_readable_table, .wr_table = &sn65dsi83_writeable_table, .volatile_table = &sn65dsi83_volatile_table, .cache_type = REGCACHE_RBTREE, .max_register = REG_IRQ_STAT, }; static struct sn65dsi83 *bridge_to_sn65dsi83(struct drm_bridge *bridge) { return container_of(bridge, struct sn65dsi83, bridge); } static int sn65dsi83_attach(struct drm_bridge *bridge, enum drm_bridge_attach_flags flags) { struct sn65dsi83 *ctx = bridge_to_sn65dsi83(bridge); return drm_bridge_attach(bridge->encoder, ctx->panel_bridge, &ctx->bridge, flags); } static void sn65dsi83_detach(struct drm_bridge *bridge) { struct sn65dsi83 *ctx = bridge_to_sn65dsi83(bridge); if (!ctx->dsi) return; ctx->dsi = NULL; } static u8 sn65dsi83_get_lvds_range(struct sn65dsi83 *ctx, const struct drm_display_mode *mode) { /* * The encoding of the LVDS_CLK_RANGE is as follows: * 000 - 25 MHz <= LVDS_CLK < 37.5 MHz * 001 - 37.5 MHz <= LVDS_CLK < 62.5 MHz * 010 - 62.5 MHz <= LVDS_CLK < 87.5 MHz * 011 - 87.5 MHz <= LVDS_CLK < 112.5 MHz * 100 - 112.5 MHz <= LVDS_CLK < 137.5 MHz * 101 - 137.5 MHz <= LVDS_CLK <= 154 MHz * which is a range of 12.5MHz..162.5MHz in 50MHz steps, except that * the ends of the ranges are clamped to the supported range. Since * sn65dsi83_mode_valid() already filters the valid modes and limits * the clock to 25..154 MHz, the range calculation can be simplified * as follows: */ int mode_clock = mode->clock; if (ctx->lvds_dual_link) mode_clock /= 2; return (mode_clock - 12500) / 25000; } static u8 sn65dsi83_get_dsi_range(struct sn65dsi83 *ctx, const struct drm_display_mode *mode) { /* * The encoding of the CHA_DSI_CLK_RANGE is as follows: * 0x00 through 0x07 - Reserved * 0x08 - 40 <= DSI_CLK < 45 MHz * 0x09 - 45 <= DSI_CLK < 50 MHz * ... * 0x63 - 495 <= DSI_CLK < 500 MHz * 0x64 - 500 MHz * 0x65 through 0xFF - Reserved * which is DSI clock in 5 MHz steps, clamped to 40..500 MHz. * The DSI clock are calculated as: * DSI_CLK = mode clock * bpp / dsi_data_lanes / 2 * the 2 is there because the bus is DDR. */ return DIV_ROUND_UP(clamp((unsigned int)mode->clock * mipi_dsi_pixel_format_to_bpp(ctx->dsi->format) / ctx->dsi->lanes / 2, 40000U, 500000U), 5000U); } static u8 sn65dsi83_get_dsi_div(struct sn65dsi83 *ctx) { /* The divider is (DSI_CLK / LVDS_CLK) - 1, which really is: */ unsigned int dsi_div = mipi_dsi_pixel_format_to_bpp(ctx->dsi->format); dsi_div /= ctx->dsi->lanes; if (!ctx->lvds_dual_link) dsi_div /= 2; return dsi_div - 1; } static void sn65dsi83_atomic_pre_enable(struct drm_bridge *bridge, struct drm_bridge_state *old_bridge_state) { struct sn65dsi83 *ctx = bridge_to_sn65dsi83(bridge); struct drm_atomic_state *state = old_bridge_state->base.state; const struct drm_bridge_state *bridge_state; const struct drm_crtc_state *crtc_state; const struct drm_display_mode *mode; struct drm_connector *connector; struct drm_crtc *crtc; bool lvds_format_24bpp; bool lvds_format_jeida; unsigned int pval; __le16 le16val; u16 val; int ret; ret = regulator_enable(ctx->vcc); if (ret) { dev_err(ctx->dev, "Failed to enable vcc: %d\n", ret); return; } /* Deassert reset */ gpiod_set_value_cansleep(ctx->enable_gpio, 1); usleep_range(10000, 11000); /* Get the LVDS format from the bridge state. */ bridge_state = drm_atomic_get_new_bridge_state(state, bridge); switch (bridge_state->output_bus_cfg.format) { case MEDIA_BUS_FMT_RGB666_1X7X3_SPWG: lvds_format_24bpp = false; lvds_format_jeida = true; break; case MEDIA_BUS_FMT_RGB888_1X7X4_JEIDA: lvds_format_24bpp = true; lvds_format_jeida = true; break; case MEDIA_BUS_FMT_RGB888_1X7X4_SPWG: lvds_format_24bpp = true; lvds_format_jeida = false; break; default: /* * Some bridges still don't set the correct * LVDS bus pixel format, use SPWG24 default * format until those are fixed. */ lvds_format_24bpp = true; lvds_format_jeida = false; dev_warn(ctx->dev, "Unsupported LVDS bus format 0x%04x, please check output bridge driver. Falling back to SPWG24.\n", bridge_state->output_bus_cfg.format); break; } /* * Retrieve the CRTC adjusted mode. This requires a little dance to go * from the bridge to the encoder, to the connector and to the CRTC. */ connector = drm_atomic_get_new_connector_for_encoder(state, bridge->encoder); crtc = drm_atomic_get_new_connector_state(state, connector)->crtc; crtc_state = drm_atomic_get_new_crtc_state(state, crtc); mode = &crtc_state->adjusted_mode; /* Clear reset, disable PLL */ regmap_write(ctx->regmap, REG_RC_RESET, 0x00); regmap_write(ctx->regmap, REG_RC_PLL_EN, 0x00); /* Reference clock derived from DSI link clock. */ regmap_write(ctx->regmap, REG_RC_LVDS_PLL, REG_RC_LVDS_PLL_LVDS_CLK_RANGE(sn65dsi83_get_lvds_range(ctx, mode)) | REG_RC_LVDS_PLL_HS_CLK_SRC_DPHY); regmap_write(ctx->regmap, REG_DSI_CLK, REG_DSI_CLK_CHA_DSI_CLK_RANGE(sn65dsi83_get_dsi_range(ctx, mode))); regmap_write(ctx->regmap, REG_RC_DSI_CLK, REG_RC_DSI_CLK_DSI_CLK_DIVIDER(sn65dsi83_get_dsi_div(ctx))); /* Set number of DSI lanes and LVDS link config. */ regmap_write(ctx->regmap, REG_DSI_LANE, REG_DSI_LANE_DSI_CHANNEL_MODE_SINGLE | REG_DSI_LANE_CHA_DSI_LANES(~(ctx->dsi->lanes - 1)) | /* CHB is DSI85-only, set to default on DSI83/DSI84 */ REG_DSI_LANE_CHB_DSI_LANES(3)); /* No equalization. */ regmap_write(ctx->regmap, REG_DSI_EQ, 0x00); /* Set up sync signal polarity. */ val = (mode->flags & DRM_MODE_FLAG_NHSYNC ? REG_LVDS_FMT_HS_NEG_POLARITY : 0) | (mode->flags & DRM_MODE_FLAG_NVSYNC ? REG_LVDS_FMT_VS_NEG_POLARITY : 0); /* Set up bits-per-pixel, 18bpp or 24bpp. */ if (lvds_format_24bpp) { val |= REG_LVDS_FMT_CHA_24BPP_MODE; if (ctx->lvds_dual_link) val |= REG_LVDS_FMT_CHB_24BPP_MODE; } /* Set up LVDS format, JEIDA/Format 1 or SPWG/Format 2 */ if (lvds_format_jeida) { val |= REG_LVDS_FMT_CHA_24BPP_FORMAT1; if (ctx->lvds_dual_link) val |= REG_LVDS_FMT_CHB_24BPP_FORMAT1; } /* Set up LVDS output config (DSI84,DSI85) */ if (!ctx->lvds_dual_link) val |= REG_LVDS_FMT_LVDS_LINK_CFG; regmap_write(ctx->regmap, REG_LVDS_FMT, val); regmap_write(ctx->regmap, REG_LVDS_VCOM, 0x05); regmap_write(ctx->regmap, REG_LVDS_LANE, (ctx->lvds_dual_link_even_odd_swap ? REG_LVDS_LANE_EVEN_ODD_SWAP : 0) | REG_LVDS_LANE_CHA_LVDS_TERM | REG_LVDS_LANE_CHB_LVDS_TERM); regmap_write(ctx->regmap, REG_LVDS_CM, 0x00); le16val = cpu_to_le16(mode->hdisplay); regmap_bulk_write(ctx->regmap, REG_VID_CHA_ACTIVE_LINE_LENGTH_LOW, &le16val, 2); le16val = cpu_to_le16(mode->vdisplay); regmap_bulk_write(ctx->regmap, REG_VID_CHA_VERTICAL_DISPLAY_SIZE_LOW, &le16val, 2); /* 32 + 1 pixel clock to ensure proper operation */ le16val = cpu_to_le16(32 + 1); regmap_bulk_write(ctx->regmap, REG_VID_CHA_SYNC_DELAY_LOW, &le16val, 2); le16val = cpu_to_le16(mode->hsync_end - mode->hsync_start); regmap_bulk_write(ctx->regmap, REG_VID_CHA_HSYNC_PULSE_WIDTH_LOW, &le16val, 2); le16val = cpu_to_le16(mode->vsync_end - mode->vsync_start); regmap_bulk_write(ctx->regmap, REG_VID_CHA_VSYNC_PULSE_WIDTH_LOW, &le16val, 2); regmap_write(ctx->regmap, REG_VID_CHA_HORIZONTAL_BACK_PORCH, mode->htotal - mode->hsync_end); regmap_write(ctx->regmap, REG_VID_CHA_VERTICAL_BACK_PORCH, mode->vtotal - mode->vsync_end); regmap_write(ctx->regmap, REG_VID_CHA_HORIZONTAL_FRONT_PORCH, mode->hsync_start - mode->hdisplay); regmap_write(ctx->regmap, REG_VID_CHA_VERTICAL_FRONT_PORCH, mode->vsync_start - mode->vdisplay); regmap_write(ctx->regmap, REG_VID_CHA_TEST_PATTERN, 0x00); /* Enable PLL */ regmap_write(ctx->regmap, REG_RC_PLL_EN, REG_RC_PLL_EN_PLL_EN); usleep_range(3000, 4000); ret = regmap_read_poll_timeout(ctx->regmap, REG_RC_LVDS_PLL, pval, pval & REG_RC_LVDS_PLL_PLL_EN_STAT, 1000, 100000); if (ret) { dev_err(ctx->dev, "failed to lock PLL, ret=%i\n", ret); /* On failure, disable PLL again and exit. */ regmap_write(ctx->regmap, REG_RC_PLL_EN, 0x00); regulator_disable(ctx->vcc); return; } /* Trigger reset after CSR register update. */ regmap_write(ctx->regmap, REG_RC_RESET, REG_RC_RESET_SOFT_RESET); /* Wait for 10ms after soft reset as specified in datasheet */ usleep_range(10000, 12000); } static void sn65dsi83_atomic_enable(struct drm_bridge *bridge, struct drm_bridge_state *old_bridge_state) { struct sn65dsi83 *ctx = bridge_to_sn65dsi83(bridge); unsigned int pval; /* Clear all errors that got asserted during initialization. */ regmap_read(ctx->regmap, REG_IRQ_STAT, &pval); regmap_write(ctx->regmap, REG_IRQ_STAT, pval); /* Wait for 1ms and check for errors in status register */ usleep_range(1000, 1100); regmap_read(ctx->regmap, REG_IRQ_STAT, &pval); if (pval) dev_err(ctx->dev, "Unexpected link status 0x%02x\n", pval); } static void sn65dsi83_atomic_disable(struct drm_bridge *bridge, struct drm_bridge_state *old_bridge_state) { struct sn65dsi83 *ctx = bridge_to_sn65dsi83(bridge); int ret; /* Put the chip in reset, pull EN line low, and assure 10ms reset low timing. */ gpiod_set_value_cansleep(ctx->enable_gpio, 0); usleep_range(10000, 11000); ret = regulator_disable(ctx->vcc); if (ret) dev_err(ctx->dev, "Failed to disable vcc: %d\n", ret); regcache_mark_dirty(ctx->regmap); } static enum drm_mode_status sn65dsi83_mode_valid(struct drm_bridge *bridge, const struct drm_display_info *info, const struct drm_display_mode *mode) { /* LVDS output clock range 25..154 MHz */ if (mode->clock < 25000) return MODE_CLOCK_LOW; if (mode->clock > 154000) return MODE_CLOCK_HIGH; return MODE_OK; } #define MAX_INPUT_SEL_FORMATS 1 static u32 * sn65dsi83_atomic_get_input_bus_fmts(struct drm_bridge *bridge, struct drm_bridge_state *bridge_state, struct drm_crtc_state *crtc_state, struct drm_connector_state *conn_state, u32 output_fmt, unsigned int *num_input_fmts) { u32 *input_fmts; *num_input_fmts = 0; input_fmts = kcalloc(MAX_INPUT_SEL_FORMATS, sizeof(*input_fmts), GFP_KERNEL); if (!input_fmts) return NULL; /* This is the DSI-end bus format */ input_fmts[0] = MEDIA_BUS_FMT_RGB888_1X24; *num_input_fmts = 1; return input_fmts; } static const struct drm_bridge_funcs sn65dsi83_funcs = { .attach = sn65dsi83_attach, .detach = sn65dsi83_detach, .atomic_enable = sn65dsi83_atomic_enable, .atomic_pre_enable = sn65dsi83_atomic_pre_enable, .atomic_disable = sn65dsi83_atomic_disable, .mode_valid = sn65dsi83_mode_valid, .atomic_duplicate_state = drm_atomic_helper_bridge_duplicate_state, .atomic_destroy_state = drm_atomic_helper_bridge_destroy_state, .atomic_reset = drm_atomic_helper_bridge_reset, .atomic_get_input_bus_fmts = sn65dsi83_atomic_get_input_bus_fmts, }; static int sn65dsi83_parse_dt(struct sn65dsi83 *ctx, enum sn65dsi83_model model) { struct drm_bridge *panel_bridge; struct device *dev = ctx->dev; ctx->lvds_dual_link = false; ctx->lvds_dual_link_even_odd_swap = false; if (model != MODEL_SN65DSI83) { struct device_node *port2, *port3; int dual_link; port2 = of_graph_get_port_by_id(dev->of_node, 2); port3 = of_graph_get_port_by_id(dev->of_node, 3); dual_link = drm_of_lvds_get_dual_link_pixel_order(port2, port3); of_node_put(port2); of_node_put(port3); if (dual_link == DRM_LVDS_DUAL_LINK_ODD_EVEN_PIXELS) { ctx->lvds_dual_link = true; /* Odd pixels to LVDS Channel A, even pixels to B */ ctx->lvds_dual_link_even_odd_swap = false; } else if (dual_link == DRM_LVDS_DUAL_LINK_EVEN_ODD_PIXELS) { ctx->lvds_dual_link = true; /* Even pixels to LVDS Channel A, odd pixels to B */ ctx->lvds_dual_link_even_odd_swap = true; } } panel_bridge = devm_drm_of_get_bridge(dev, dev->of_node, 2, 0); if (IS_ERR(panel_bridge)) return PTR_ERR(panel_bridge); ctx->panel_bridge = panel_bridge; ctx->vcc = devm_regulator_get(dev, "vcc"); if (IS_ERR(ctx->vcc)) return dev_err_probe(dev, PTR_ERR(ctx->vcc), "Failed to get supply 'vcc'\n"); return 0; } static int sn65dsi83_host_attach(struct sn65dsi83 *ctx) { struct device *dev = ctx->dev; struct device_node *host_node; struct device_node *endpoint; struct mipi_dsi_device *dsi; struct mipi_dsi_host *host; const struct mipi_dsi_device_info info = { .type = "sn65dsi83", .channel = 0, .node = NULL, }; int dsi_lanes, ret; endpoint = of_graph_get_endpoint_by_regs(dev->of_node, 0, -1); dsi_lanes = drm_of_get_data_lanes_count(endpoint, 1, 4); host_node = of_graph_get_remote_port_parent(endpoint); host = of_find_mipi_dsi_host_by_node(host_node); of_node_put(host_node); of_node_put(endpoint); if (!host) return -EPROBE_DEFER; if (dsi_lanes < 0) return dsi_lanes; dsi = devm_mipi_dsi_device_register_full(dev, host, &info); if (IS_ERR(dsi)) return dev_err_probe(dev, PTR_ERR(dsi), "failed to create dsi device\n"); ctx->dsi = dsi; dsi->lanes = dsi_lanes; dsi->format = MIPI_DSI_FMT_RGB888; dsi->mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_BURST | MIPI_DSI_MODE_VIDEO_NO_HFP | MIPI_DSI_MODE_VIDEO_NO_HBP | MIPI_DSI_MODE_VIDEO_NO_HSA | MIPI_DSI_MODE_NO_EOT_PACKET; ret = devm_mipi_dsi_attach(dev, dsi); if (ret < 0) { dev_err(dev, "failed to attach dsi to host: %d\n", ret); return ret; } return 0; } static int sn65dsi83_probe(struct i2c_client *client) { const struct i2c_device_id *id = i2c_client_get_device_id(client); struct device *dev = &client->dev; enum sn65dsi83_model model; struct sn65dsi83 *ctx; int ret; ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL); if (!ctx) return -ENOMEM; ctx->dev = dev; if (dev->of_node) { model = (enum sn65dsi83_model)(uintptr_t) of_device_get_match_data(dev); } else { model = id->driver_data; } /* Put the chip in reset, pull EN line low, and assure 10ms reset low timing. */ ctx->enable_gpio = devm_gpiod_get_optional(ctx->dev, "enable", GPIOD_OUT_LOW); if (IS_ERR(ctx->enable_gpio)) return dev_err_probe(dev, PTR_ERR(ctx->enable_gpio), "failed to get enable GPIO\n"); usleep_range(10000, 11000); ret = sn65dsi83_parse_dt(ctx, model); if (ret) return ret; ctx->regmap = devm_regmap_init_i2c(client, &sn65dsi83_regmap_config); if (IS_ERR(ctx->regmap)) return dev_err_probe(dev, PTR_ERR(ctx->regmap), "failed to get regmap\n"); dev_set_drvdata(dev, ctx); i2c_set_clientdata(client, ctx); ctx->bridge.funcs = &sn65dsi83_funcs; ctx->bridge.of_node = dev->of_node; ctx->bridge.pre_enable_prev_first = true; drm_bridge_add(&ctx->bridge); ret = sn65dsi83_host_attach(ctx); if (ret) { dev_err_probe(dev, ret, "failed to attach DSI host\n"); goto err_remove_bridge; } return 0; err_remove_bridge: drm_bridge_remove(&ctx->bridge); return ret; } static void sn65dsi83_remove(struct i2c_client *client) { struct sn65dsi83 *ctx = i2c_get_clientdata(client); drm_bridge_remove(&ctx->bridge); } static struct i2c_device_id sn65dsi83_id[] = { { "ti,sn65dsi83", MODEL_SN65DSI83 }, { "ti,sn65dsi84", MODEL_SN65DSI84 }, {}, }; MODULE_DEVICE_TABLE(i2c, sn65dsi83_id); static const struct of_device_id sn65dsi83_match_table[] = { { .compatible = "ti,sn65dsi83", .data = (void *)MODEL_SN65DSI83 }, { .compatible = "ti,sn65dsi84", .data = (void *)MODEL_SN65DSI84 }, {}, }; MODULE_DEVICE_TABLE(of, sn65dsi83_match_table); static struct i2c_driver sn65dsi83_driver = { .probe = sn65dsi83_probe, .remove = sn65dsi83_remove, .id_table = sn65dsi83_id, .driver = { .name = "sn65dsi83", .of_match_table = sn65dsi83_match_table, }, }; module_i2c_driver(sn65dsi83_driver); MODULE_AUTHOR("Marek Vasut <[email protected]>"); MODULE_DESCRIPTION("TI SN65DSI83 DSI to LVDS bridge driver"); MODULE_LICENSE("GPL v2");
linux-master
drivers/gpu/drm/bridge/ti-sn65dsi83.c
// SPDX-License-Identifier: GPL-2.0-only /* * Samsung MIPI DSIM bridge driver. * * Copyright (C) 2021 Amarula Solutions(India) * Copyright (c) 2014 Samsung Electronics Co., Ltd * Author: Jagan Teki <[email protected]> * * Based on exynos_drm_dsi from * Tomasz Figa <[email protected]> */ #include <asm/unaligned.h> #include <linux/clk.h> #include <linux/delay.h> #include <linux/irq.h> #include <linux/media-bus-format.h> #include <linux/of.h> #include <linux/phy/phy.h> #include <linux/platform_device.h> #include <video/mipi_display.h> #include <drm/bridge/samsung-dsim.h> #include <drm/drm_panel.h> #include <drm/drm_print.h> /* returns true iff both arguments logically differs */ #define NEQV(a, b) (!(a) ^ !(b)) /* DSIM_STATUS */ #define DSIM_STOP_STATE_DAT(x) (((x) & 0xf) << 0) #define DSIM_STOP_STATE_CLK BIT(8) #define DSIM_TX_READY_HS_CLK BIT(10) #define DSIM_PLL_STABLE BIT(31) /* DSIM_SWRST */ #define DSIM_FUNCRST BIT(16) #define DSIM_SWRST BIT(0) /* DSIM_TIMEOUT */ #define DSIM_LPDR_TIMEOUT(x) ((x) << 0) #define DSIM_BTA_TIMEOUT(x) ((x) << 16) /* DSIM_CLKCTRL */ #define DSIM_ESC_PRESCALER(x) (((x) & 0xffff) << 0) #define DSIM_ESC_PRESCALER_MASK (0xffff << 0) #define DSIM_LANE_ESC_CLK_EN_CLK BIT(19) #define DSIM_LANE_ESC_CLK_EN_DATA(x) (((x) & 0xf) << 20) #define DSIM_LANE_ESC_CLK_EN_DATA_MASK (0xf << 20) #define DSIM_BYTE_CLKEN BIT(24) #define DSIM_BYTE_CLK_SRC(x) (((x) & 0x3) << 25) #define DSIM_BYTE_CLK_SRC_MASK (0x3 << 25) #define DSIM_PLL_BYPASS BIT(27) #define DSIM_ESC_CLKEN BIT(28) #define DSIM_TX_REQUEST_HSCLK BIT(31) /* DSIM_CONFIG */ #define DSIM_LANE_EN_CLK BIT(0) #define DSIM_LANE_EN(x) (((x) & 0xf) << 1) #define DSIM_NUM_OF_DATA_LANE(x) (((x) & 0x3) << 5) #define DSIM_SUB_PIX_FORMAT(x) (((x) & 0x7) << 8) #define DSIM_MAIN_PIX_FORMAT_MASK (0x7 << 12) #define DSIM_MAIN_PIX_FORMAT_RGB888 (0x7 << 12) #define DSIM_MAIN_PIX_FORMAT_RGB666 (0x6 << 12) #define DSIM_MAIN_PIX_FORMAT_RGB666_P (0x5 << 12) #define DSIM_MAIN_PIX_FORMAT_RGB565 (0x4 << 12) #define DSIM_SUB_VC (((x) & 0x3) << 16) #define DSIM_MAIN_VC (((x) & 0x3) << 18) #define DSIM_HSA_DISABLE_MODE BIT(20) #define DSIM_HBP_DISABLE_MODE BIT(21) #define DSIM_HFP_DISABLE_MODE BIT(22) /* * The i.MX 8M Mini Applications Processor Reference Manual, * Rev. 3, 11/2020 Page 4091 * The i.MX 8M Nano Applications Processor Reference Manual, * Rev. 2, 07/2022 Page 3058 * The i.MX 8M Plus Applications Processor Reference Manual, * Rev. 1, 06/2021 Page 5436 * all claims this bit is 'HseDisableMode' with the definition * 0 = Disables transfer * 1 = Enables transfer * * This clearly states that HSE is not a disabled bit. * * The naming convention follows as per the manual and the * driver logic is based on the MIPI_DSI_MODE_VIDEO_HSE flag. */ #define DSIM_HSE_DISABLE_MODE BIT(23) #define DSIM_AUTO_MODE BIT(24) #define DSIM_VIDEO_MODE BIT(25) #define DSIM_BURST_MODE BIT(26) #define DSIM_SYNC_INFORM BIT(27) #define DSIM_EOT_DISABLE BIT(28) #define DSIM_MFLUSH_VS BIT(29) /* This flag is valid only for exynos3250/3472/5260/5430 */ #define DSIM_CLKLANE_STOP BIT(30) /* DSIM_ESCMODE */ #define DSIM_TX_TRIGGER_RST BIT(4) #define DSIM_TX_LPDT_LP BIT(6) #define DSIM_CMD_LPDT_LP BIT(7) #define DSIM_FORCE_BTA BIT(16) #define DSIM_FORCE_STOP_STATE BIT(20) #define DSIM_STOP_STATE_CNT(x) (((x) & 0x7ff) << 21) #define DSIM_STOP_STATE_CNT_MASK (0x7ff << 21) /* DSIM_MDRESOL */ #define DSIM_MAIN_STAND_BY BIT(31) #define DSIM_MAIN_VRESOL(x, num_bits) (((x) & ((1 << (num_bits)) - 1)) << 16) #define DSIM_MAIN_HRESOL(x, num_bits) (((x) & ((1 << (num_bits)) - 1)) << 0) /* DSIM_MVPORCH */ #define DSIM_CMD_ALLOW(x) ((x) << 28) #define DSIM_STABLE_VFP(x) ((x) << 16) #define DSIM_MAIN_VBP(x) ((x) << 0) #define DSIM_CMD_ALLOW_MASK (0xf << 28) #define DSIM_STABLE_VFP_MASK (0x7ff << 16) #define DSIM_MAIN_VBP_MASK (0x7ff << 0) /* DSIM_MHPORCH */ #define DSIM_MAIN_HFP(x) ((x) << 16) #define DSIM_MAIN_HBP(x) ((x) << 0) #define DSIM_MAIN_HFP_MASK ((0xffff) << 16) #define DSIM_MAIN_HBP_MASK ((0xffff) << 0) /* DSIM_MSYNC */ #define DSIM_MAIN_VSA(x) ((x) << 22) #define DSIM_MAIN_HSA(x) ((x) << 0) #define DSIM_MAIN_VSA_MASK ((0x3ff) << 22) #define DSIM_MAIN_HSA_MASK ((0xffff) << 0) /* DSIM_SDRESOL */ #define DSIM_SUB_STANDY(x) ((x) << 31) #define DSIM_SUB_VRESOL(x) ((x) << 16) #define DSIM_SUB_HRESOL(x) ((x) << 0) #define DSIM_SUB_STANDY_MASK ((0x1) << 31) #define DSIM_SUB_VRESOL_MASK ((0x7ff) << 16) #define DSIM_SUB_HRESOL_MASK ((0x7ff) << 0) /* DSIM_INTSRC */ #define DSIM_INT_PLL_STABLE BIT(31) #define DSIM_INT_SW_RST_RELEASE BIT(30) #define DSIM_INT_SFR_FIFO_EMPTY BIT(29) #define DSIM_INT_SFR_HDR_FIFO_EMPTY BIT(28) #define DSIM_INT_BTA BIT(25) #define DSIM_INT_FRAME_DONE BIT(24) #define DSIM_INT_RX_TIMEOUT BIT(21) #define DSIM_INT_BTA_TIMEOUT BIT(20) #define DSIM_INT_RX_DONE BIT(18) #define DSIM_INT_RX_TE BIT(17) #define DSIM_INT_RX_ACK BIT(16) #define DSIM_INT_RX_ECC_ERR BIT(15) #define DSIM_INT_RX_CRC_ERR BIT(14) /* DSIM_FIFOCTRL */ #define DSIM_RX_DATA_FULL BIT(25) #define DSIM_RX_DATA_EMPTY BIT(24) #define DSIM_SFR_HEADER_FULL BIT(23) #define DSIM_SFR_HEADER_EMPTY BIT(22) #define DSIM_SFR_PAYLOAD_FULL BIT(21) #define DSIM_SFR_PAYLOAD_EMPTY BIT(20) #define DSIM_I80_HEADER_FULL BIT(19) #define DSIM_I80_HEADER_EMPTY BIT(18) #define DSIM_I80_PAYLOAD_FULL BIT(17) #define DSIM_I80_PAYLOAD_EMPTY BIT(16) #define DSIM_SD_HEADER_FULL BIT(15) #define DSIM_SD_HEADER_EMPTY BIT(14) #define DSIM_SD_PAYLOAD_FULL BIT(13) #define DSIM_SD_PAYLOAD_EMPTY BIT(12) #define DSIM_MD_HEADER_FULL BIT(11) #define DSIM_MD_HEADER_EMPTY BIT(10) #define DSIM_MD_PAYLOAD_FULL BIT(9) #define DSIM_MD_PAYLOAD_EMPTY BIT(8) #define DSIM_RX_FIFO BIT(4) #define DSIM_SFR_FIFO BIT(3) #define DSIM_I80_FIFO BIT(2) #define DSIM_SD_FIFO BIT(1) #define DSIM_MD_FIFO BIT(0) /* DSIM_PHYACCHR */ #define DSIM_AFC_EN BIT(14) #define DSIM_AFC_CTL(x) (((x) & 0x7) << 5) /* DSIM_PLLCTRL */ #define DSIM_PLL_DPDNSWAP_CLK (1 << 25) #define DSIM_PLL_DPDNSWAP_DAT (1 << 24) #define DSIM_FREQ_BAND(x) ((x) << 24) #define DSIM_PLL_EN BIT(23) #define DSIM_PLL_P(x, offset) ((x) << (offset)) #define DSIM_PLL_M(x) ((x) << 4) #define DSIM_PLL_S(x) ((x) << 1) /* DSIM_PHYCTRL */ #define DSIM_PHYCTRL_ULPS_EXIT(x) (((x) & 0x1ff) << 0) #define DSIM_PHYCTRL_B_DPHYCTL_VREG_LP BIT(30) #define DSIM_PHYCTRL_B_DPHYCTL_SLEW_UP BIT(14) /* DSIM_PHYTIMING */ #define DSIM_PHYTIMING_LPX(x) ((x) << 8) #define DSIM_PHYTIMING_HS_EXIT(x) ((x) << 0) /* DSIM_PHYTIMING1 */ #define DSIM_PHYTIMING1_CLK_PREPARE(x) ((x) << 24) #define DSIM_PHYTIMING1_CLK_ZERO(x) ((x) << 16) #define DSIM_PHYTIMING1_CLK_POST(x) ((x) << 8) #define DSIM_PHYTIMING1_CLK_TRAIL(x) ((x) << 0) /* DSIM_PHYTIMING2 */ #define DSIM_PHYTIMING2_HS_PREPARE(x) ((x) << 16) #define DSIM_PHYTIMING2_HS_ZERO(x) ((x) << 8) #define DSIM_PHYTIMING2_HS_TRAIL(x) ((x) << 0) #define DSI_MAX_BUS_WIDTH 4 #define DSI_NUM_VIRTUAL_CHANNELS 4 #define DSI_TX_FIFO_SIZE 2048 #define DSI_RX_FIFO_SIZE 256 #define DSI_XFER_TIMEOUT_MS 100 #define DSI_RX_FIFO_EMPTY 0x30800002 #define OLD_SCLK_MIPI_CLK_NAME "pll_clk" #define PS_TO_CYCLE(ps, hz) DIV64_U64_ROUND_CLOSEST(((ps) * (hz)), 1000000000000ULL) static const char *const clk_names[5] = { "bus_clk", "sclk_mipi", "phyclk_mipidphy0_bitclkdiv8", "phyclk_mipidphy0_rxclkesc0", "sclk_rgb_vclk_to_dsim0" }; enum samsung_dsim_transfer_type { EXYNOS_DSI_TX, EXYNOS_DSI_RX, }; enum reg_idx { DSIM_STATUS_REG, /* Status register */ DSIM_SWRST_REG, /* Software reset register */ DSIM_CLKCTRL_REG, /* Clock control register */ DSIM_TIMEOUT_REG, /* Time out register */ DSIM_CONFIG_REG, /* Configuration register */ DSIM_ESCMODE_REG, /* Escape mode register */ DSIM_MDRESOL_REG, DSIM_MVPORCH_REG, /* Main display Vporch register */ DSIM_MHPORCH_REG, /* Main display Hporch register */ DSIM_MSYNC_REG, /* Main display sync area register */ DSIM_INTSRC_REG, /* Interrupt source register */ DSIM_INTMSK_REG, /* Interrupt mask register */ DSIM_PKTHDR_REG, /* Packet Header FIFO register */ DSIM_PAYLOAD_REG, /* Payload FIFO register */ DSIM_RXFIFO_REG, /* Read FIFO register */ DSIM_FIFOCTRL_REG, /* FIFO status and control register */ DSIM_PLLCTRL_REG, /* PLL control register */ DSIM_PHYCTRL_REG, DSIM_PHYTIMING_REG, DSIM_PHYTIMING1_REG, DSIM_PHYTIMING2_REG, NUM_REGS }; static const unsigned int exynos_reg_ofs[] = { [DSIM_STATUS_REG] = 0x00, [DSIM_SWRST_REG] = 0x04, [DSIM_CLKCTRL_REG] = 0x08, [DSIM_TIMEOUT_REG] = 0x0c, [DSIM_CONFIG_REG] = 0x10, [DSIM_ESCMODE_REG] = 0x14, [DSIM_MDRESOL_REG] = 0x18, [DSIM_MVPORCH_REG] = 0x1c, [DSIM_MHPORCH_REG] = 0x20, [DSIM_MSYNC_REG] = 0x24, [DSIM_INTSRC_REG] = 0x2c, [DSIM_INTMSK_REG] = 0x30, [DSIM_PKTHDR_REG] = 0x34, [DSIM_PAYLOAD_REG] = 0x38, [DSIM_RXFIFO_REG] = 0x3c, [DSIM_FIFOCTRL_REG] = 0x44, [DSIM_PLLCTRL_REG] = 0x4c, [DSIM_PHYCTRL_REG] = 0x5c, [DSIM_PHYTIMING_REG] = 0x64, [DSIM_PHYTIMING1_REG] = 0x68, [DSIM_PHYTIMING2_REG] = 0x6c, }; static const unsigned int exynos5433_reg_ofs[] = { [DSIM_STATUS_REG] = 0x04, [DSIM_SWRST_REG] = 0x0C, [DSIM_CLKCTRL_REG] = 0x10, [DSIM_TIMEOUT_REG] = 0x14, [DSIM_CONFIG_REG] = 0x18, [DSIM_ESCMODE_REG] = 0x1C, [DSIM_MDRESOL_REG] = 0x20, [DSIM_MVPORCH_REG] = 0x24, [DSIM_MHPORCH_REG] = 0x28, [DSIM_MSYNC_REG] = 0x2C, [DSIM_INTSRC_REG] = 0x34, [DSIM_INTMSK_REG] = 0x38, [DSIM_PKTHDR_REG] = 0x3C, [DSIM_PAYLOAD_REG] = 0x40, [DSIM_RXFIFO_REG] = 0x44, [DSIM_FIFOCTRL_REG] = 0x4C, [DSIM_PLLCTRL_REG] = 0x94, [DSIM_PHYCTRL_REG] = 0xA4, [DSIM_PHYTIMING_REG] = 0xB4, [DSIM_PHYTIMING1_REG] = 0xB8, [DSIM_PHYTIMING2_REG] = 0xBC, }; enum reg_value_idx { RESET_TYPE, PLL_TIMER, STOP_STATE_CNT, PHYCTRL_ULPS_EXIT, PHYCTRL_VREG_LP, PHYCTRL_SLEW_UP, PHYTIMING_LPX, PHYTIMING_HS_EXIT, PHYTIMING_CLK_PREPARE, PHYTIMING_CLK_ZERO, PHYTIMING_CLK_POST, PHYTIMING_CLK_TRAIL, PHYTIMING_HS_PREPARE, PHYTIMING_HS_ZERO, PHYTIMING_HS_TRAIL }; static const unsigned int reg_values[] = { [RESET_TYPE] = DSIM_SWRST, [PLL_TIMER] = 500, [STOP_STATE_CNT] = 0xf, [PHYCTRL_ULPS_EXIT] = DSIM_PHYCTRL_ULPS_EXIT(0x0af), [PHYCTRL_VREG_LP] = 0, [PHYCTRL_SLEW_UP] = 0, [PHYTIMING_LPX] = DSIM_PHYTIMING_LPX(0x06), [PHYTIMING_HS_EXIT] = DSIM_PHYTIMING_HS_EXIT(0x0b), [PHYTIMING_CLK_PREPARE] = DSIM_PHYTIMING1_CLK_PREPARE(0x07), [PHYTIMING_CLK_ZERO] = DSIM_PHYTIMING1_CLK_ZERO(0x27), [PHYTIMING_CLK_POST] = DSIM_PHYTIMING1_CLK_POST(0x0d), [PHYTIMING_CLK_TRAIL] = DSIM_PHYTIMING1_CLK_TRAIL(0x08), [PHYTIMING_HS_PREPARE] = DSIM_PHYTIMING2_HS_PREPARE(0x09), [PHYTIMING_HS_ZERO] = DSIM_PHYTIMING2_HS_ZERO(0x0d), [PHYTIMING_HS_TRAIL] = DSIM_PHYTIMING2_HS_TRAIL(0x0b), }; static const unsigned int exynos5422_reg_values[] = { [RESET_TYPE] = DSIM_SWRST, [PLL_TIMER] = 500, [STOP_STATE_CNT] = 0xf, [PHYCTRL_ULPS_EXIT] = DSIM_PHYCTRL_ULPS_EXIT(0xaf), [PHYCTRL_VREG_LP] = 0, [PHYCTRL_SLEW_UP] = 0, [PHYTIMING_LPX] = DSIM_PHYTIMING_LPX(0x08), [PHYTIMING_HS_EXIT] = DSIM_PHYTIMING_HS_EXIT(0x0d), [PHYTIMING_CLK_PREPARE] = DSIM_PHYTIMING1_CLK_PREPARE(0x09), [PHYTIMING_CLK_ZERO] = DSIM_PHYTIMING1_CLK_ZERO(0x30), [PHYTIMING_CLK_POST] = DSIM_PHYTIMING1_CLK_POST(0x0e), [PHYTIMING_CLK_TRAIL] = DSIM_PHYTIMING1_CLK_TRAIL(0x0a), [PHYTIMING_HS_PREPARE] = DSIM_PHYTIMING2_HS_PREPARE(0x0c), [PHYTIMING_HS_ZERO] = DSIM_PHYTIMING2_HS_ZERO(0x11), [PHYTIMING_HS_TRAIL] = DSIM_PHYTIMING2_HS_TRAIL(0x0d), }; static const unsigned int exynos5433_reg_values[] = { [RESET_TYPE] = DSIM_FUNCRST, [PLL_TIMER] = 22200, [STOP_STATE_CNT] = 0xa, [PHYCTRL_ULPS_EXIT] = DSIM_PHYCTRL_ULPS_EXIT(0x190), [PHYCTRL_VREG_LP] = DSIM_PHYCTRL_B_DPHYCTL_VREG_LP, [PHYCTRL_SLEW_UP] = DSIM_PHYCTRL_B_DPHYCTL_SLEW_UP, [PHYTIMING_LPX] = DSIM_PHYTIMING_LPX(0x07), [PHYTIMING_HS_EXIT] = DSIM_PHYTIMING_HS_EXIT(0x0c), [PHYTIMING_CLK_PREPARE] = DSIM_PHYTIMING1_CLK_PREPARE(0x09), [PHYTIMING_CLK_ZERO] = DSIM_PHYTIMING1_CLK_ZERO(0x2d), [PHYTIMING_CLK_POST] = DSIM_PHYTIMING1_CLK_POST(0x0e), [PHYTIMING_CLK_TRAIL] = DSIM_PHYTIMING1_CLK_TRAIL(0x09), [PHYTIMING_HS_PREPARE] = DSIM_PHYTIMING2_HS_PREPARE(0x0b), [PHYTIMING_HS_ZERO] = DSIM_PHYTIMING2_HS_ZERO(0x10), [PHYTIMING_HS_TRAIL] = DSIM_PHYTIMING2_HS_TRAIL(0x0c), }; static const unsigned int imx8mm_dsim_reg_values[] = { [RESET_TYPE] = DSIM_SWRST, [PLL_TIMER] = 500, [STOP_STATE_CNT] = 0xf, [PHYCTRL_ULPS_EXIT] = 0, [PHYCTRL_VREG_LP] = 0, [PHYCTRL_SLEW_UP] = 0, [PHYTIMING_LPX] = DSIM_PHYTIMING_LPX(0x06), [PHYTIMING_HS_EXIT] = DSIM_PHYTIMING_HS_EXIT(0x0b), [PHYTIMING_CLK_PREPARE] = DSIM_PHYTIMING1_CLK_PREPARE(0x07), [PHYTIMING_CLK_ZERO] = DSIM_PHYTIMING1_CLK_ZERO(0x26), [PHYTIMING_CLK_POST] = DSIM_PHYTIMING1_CLK_POST(0x0d), [PHYTIMING_CLK_TRAIL] = DSIM_PHYTIMING1_CLK_TRAIL(0x08), [PHYTIMING_HS_PREPARE] = DSIM_PHYTIMING2_HS_PREPARE(0x08), [PHYTIMING_HS_ZERO] = DSIM_PHYTIMING2_HS_ZERO(0x0d), [PHYTIMING_HS_TRAIL] = DSIM_PHYTIMING2_HS_TRAIL(0x0b), }; static const struct samsung_dsim_driver_data exynos3_dsi_driver_data = { .reg_ofs = exynos_reg_ofs, .plltmr_reg = 0x50, .has_freqband = 1, .has_clklane_stop = 1, .num_clks = 2, .max_freq = 1000, .wait_for_reset = 1, .num_bits_resol = 11, .pll_p_offset = 13, .reg_values = reg_values, .m_min = 41, .m_max = 125, .min_freq = 500, }; static const struct samsung_dsim_driver_data exynos4_dsi_driver_data = { .reg_ofs = exynos_reg_ofs, .plltmr_reg = 0x50, .has_freqband = 1, .has_clklane_stop = 1, .num_clks = 2, .max_freq = 1000, .wait_for_reset = 1, .num_bits_resol = 11, .pll_p_offset = 13, .reg_values = reg_values, .m_min = 41, .m_max = 125, .min_freq = 500, }; static const struct samsung_dsim_driver_data exynos5_dsi_driver_data = { .reg_ofs = exynos_reg_ofs, .plltmr_reg = 0x58, .num_clks = 2, .max_freq = 1000, .wait_for_reset = 1, .num_bits_resol = 11, .pll_p_offset = 13, .reg_values = reg_values, .m_min = 41, .m_max = 125, .min_freq = 500, }; static const struct samsung_dsim_driver_data exynos5433_dsi_driver_data = { .reg_ofs = exynos5433_reg_ofs, .plltmr_reg = 0xa0, .has_clklane_stop = 1, .num_clks = 5, .max_freq = 1500, .wait_for_reset = 0, .num_bits_resol = 12, .pll_p_offset = 13, .reg_values = exynos5433_reg_values, .m_min = 41, .m_max = 125, .min_freq = 500, }; static const struct samsung_dsim_driver_data exynos5422_dsi_driver_data = { .reg_ofs = exynos5433_reg_ofs, .plltmr_reg = 0xa0, .has_clklane_stop = 1, .num_clks = 2, .max_freq = 1500, .wait_for_reset = 1, .num_bits_resol = 12, .pll_p_offset = 13, .reg_values = exynos5422_reg_values, .m_min = 41, .m_max = 125, .min_freq = 500, }; static const struct samsung_dsim_driver_data imx8mm_dsi_driver_data = { .reg_ofs = exynos5433_reg_ofs, .plltmr_reg = 0xa0, .has_clklane_stop = 1, .num_clks = 2, .max_freq = 2100, .wait_for_reset = 0, .num_bits_resol = 12, /* * Unlike Exynos, PLL_P(PMS_P) offset 14 is used in i.MX8M Mini/Nano/Plus * downstream driver - drivers/gpu/drm/bridge/sec-dsim.c */ .pll_p_offset = 14, .reg_values = imx8mm_dsim_reg_values, .m_min = 64, .m_max = 1023, .min_freq = 1050, }; static const struct samsung_dsim_driver_data * samsung_dsim_types[DSIM_TYPE_COUNT] = { [DSIM_TYPE_EXYNOS3250] = &exynos3_dsi_driver_data, [DSIM_TYPE_EXYNOS4210] = &exynos4_dsi_driver_data, [DSIM_TYPE_EXYNOS5410] = &exynos5_dsi_driver_data, [DSIM_TYPE_EXYNOS5422] = &exynos5422_dsi_driver_data, [DSIM_TYPE_EXYNOS5433] = &exynos5433_dsi_driver_data, [DSIM_TYPE_IMX8MM] = &imx8mm_dsi_driver_data, [DSIM_TYPE_IMX8MP] = &imx8mm_dsi_driver_data, }; static inline struct samsung_dsim *host_to_dsi(struct mipi_dsi_host *h) { return container_of(h, struct samsung_dsim, dsi_host); } static inline struct samsung_dsim *bridge_to_dsi(struct drm_bridge *b) { return container_of(b, struct samsung_dsim, bridge); } static inline void samsung_dsim_write(struct samsung_dsim *dsi, enum reg_idx idx, u32 val) { writel(val, dsi->reg_base + dsi->driver_data->reg_ofs[idx]); } static inline u32 samsung_dsim_read(struct samsung_dsim *dsi, enum reg_idx idx) { return readl(dsi->reg_base + dsi->driver_data->reg_ofs[idx]); } static void samsung_dsim_wait_for_reset(struct samsung_dsim *dsi) { if (wait_for_completion_timeout(&dsi->completed, msecs_to_jiffies(300))) return; dev_err(dsi->dev, "timeout waiting for reset\n"); } static void samsung_dsim_reset(struct samsung_dsim *dsi) { u32 reset_val = dsi->driver_data->reg_values[RESET_TYPE]; reinit_completion(&dsi->completed); samsung_dsim_write(dsi, DSIM_SWRST_REG, reset_val); } #ifndef MHZ #define MHZ (1000 * 1000) #endif static unsigned long samsung_dsim_pll_find_pms(struct samsung_dsim *dsi, unsigned long fin, unsigned long fout, u8 *p, u16 *m, u8 *s) { const struct samsung_dsim_driver_data *driver_data = dsi->driver_data; unsigned long best_freq = 0; u32 min_delta = 0xffffffff; u8 p_min, p_max; u8 _p, best_p; u16 _m, best_m; u8 _s, best_s; p_min = DIV_ROUND_UP(fin, (12 * MHZ)); p_max = fin / (6 * MHZ); for (_p = p_min; _p <= p_max; ++_p) { for (_s = 0; _s <= 5; ++_s) { u64 tmp; u32 delta; tmp = (u64)fout * (_p << _s); do_div(tmp, fin); _m = tmp; if (_m < driver_data->m_min || _m > driver_data->m_max) continue; tmp = (u64)_m * fin; do_div(tmp, _p); if (tmp < driver_data->min_freq * MHZ || tmp > driver_data->max_freq * MHZ) continue; tmp = (u64)_m * fin; do_div(tmp, _p << _s); delta = abs(fout - tmp); if (delta < min_delta) { best_p = _p; best_m = _m; best_s = _s; min_delta = delta; best_freq = tmp; } } } if (best_freq) { *p = best_p; *m = best_m; *s = best_s; } return best_freq; } static unsigned long samsung_dsim_set_pll(struct samsung_dsim *dsi, unsigned long freq) { const struct samsung_dsim_driver_data *driver_data = dsi->driver_data; unsigned long fin, fout; int timeout; u8 p, s; u16 m; u32 reg; fin = dsi->pll_clk_rate; fout = samsung_dsim_pll_find_pms(dsi, fin, freq, &p, &m, &s); if (!fout) { dev_err(dsi->dev, "failed to find PLL PMS for requested frequency\n"); return 0; } dev_dbg(dsi->dev, "PLL freq %lu, (p %d, m %d, s %d)\n", fout, p, m, s); writel(driver_data->reg_values[PLL_TIMER], dsi->reg_base + driver_data->plltmr_reg); reg = DSIM_PLL_EN | DSIM_PLL_P(p, driver_data->pll_p_offset) | DSIM_PLL_M(m) | DSIM_PLL_S(s); if (driver_data->has_freqband) { static const unsigned long freq_bands[] = { 100 * MHZ, 120 * MHZ, 160 * MHZ, 200 * MHZ, 270 * MHZ, 320 * MHZ, 390 * MHZ, 450 * MHZ, 510 * MHZ, 560 * MHZ, 640 * MHZ, 690 * MHZ, 770 * MHZ, 870 * MHZ, 950 * MHZ, }; int band; for (band = 0; band < ARRAY_SIZE(freq_bands); ++band) if (fout < freq_bands[band]) break; dev_dbg(dsi->dev, "band %d\n", band); reg |= DSIM_FREQ_BAND(band); } if (dsi->swap_dn_dp_clk) reg |= DSIM_PLL_DPDNSWAP_CLK; if (dsi->swap_dn_dp_data) reg |= DSIM_PLL_DPDNSWAP_DAT; samsung_dsim_write(dsi, DSIM_PLLCTRL_REG, reg); timeout = 1000; do { if (timeout-- == 0) { dev_err(dsi->dev, "PLL failed to stabilize\n"); return 0; } reg = samsung_dsim_read(dsi, DSIM_STATUS_REG); } while ((reg & DSIM_PLL_STABLE) == 0); dsi->hs_clock = fout; return fout; } static int samsung_dsim_enable_clock(struct samsung_dsim *dsi) { unsigned long hs_clk, byte_clk, esc_clk, pix_clk; unsigned long esc_div; u32 reg; struct drm_display_mode *m = &dsi->mode; int bpp = mipi_dsi_pixel_format_to_bpp(dsi->format); /* m->clock is in KHz */ pix_clk = m->clock * 1000; /* Use burst_clk_rate if available, otherwise use the pix_clk */ if (dsi->burst_clk_rate) hs_clk = samsung_dsim_set_pll(dsi, dsi->burst_clk_rate); else hs_clk = samsung_dsim_set_pll(dsi, DIV_ROUND_UP(pix_clk * bpp, dsi->lanes)); if (!hs_clk) { dev_err(dsi->dev, "failed to configure DSI PLL\n"); return -EFAULT; } byte_clk = hs_clk / 8; esc_div = DIV_ROUND_UP(byte_clk, dsi->esc_clk_rate); esc_clk = byte_clk / esc_div; if (esc_clk > 20 * MHZ) { ++esc_div; esc_clk = byte_clk / esc_div; } dev_dbg(dsi->dev, "hs_clk = %lu, byte_clk = %lu, esc_clk = %lu\n", hs_clk, byte_clk, esc_clk); reg = samsung_dsim_read(dsi, DSIM_CLKCTRL_REG); reg &= ~(DSIM_ESC_PRESCALER_MASK | DSIM_LANE_ESC_CLK_EN_CLK | DSIM_LANE_ESC_CLK_EN_DATA_MASK | DSIM_PLL_BYPASS | DSIM_BYTE_CLK_SRC_MASK); reg |= DSIM_ESC_CLKEN | DSIM_BYTE_CLKEN | DSIM_ESC_PRESCALER(esc_div) | DSIM_LANE_ESC_CLK_EN_CLK | DSIM_LANE_ESC_CLK_EN_DATA(BIT(dsi->lanes) - 1) | DSIM_BYTE_CLK_SRC(0) | DSIM_TX_REQUEST_HSCLK; samsung_dsim_write(dsi, DSIM_CLKCTRL_REG, reg); return 0; } static void samsung_dsim_set_phy_ctrl(struct samsung_dsim *dsi) { const struct samsung_dsim_driver_data *driver_data = dsi->driver_data; const unsigned int *reg_values = driver_data->reg_values; u32 reg; struct phy_configure_opts_mipi_dphy cfg; int clk_prepare, lpx, clk_zero, clk_post, clk_trail; int hs_exit, hs_prepare, hs_zero, hs_trail; unsigned long long byte_clock = dsi->hs_clock / 8; if (driver_data->has_freqband) return; phy_mipi_dphy_get_default_config_for_hsclk(dsi->hs_clock, dsi->lanes, &cfg); /* * TODO: * The tech Applications Processor manuals for i.MX8M Mini, Nano, * and Plus don't state what the definition of the PHYTIMING * bits are beyond their address and bit position. * After reviewing NXP's downstream code, it appears * that the various PHYTIMING registers take the number * of cycles and use various dividers on them. This * calculation does not result in an exact match to the * downstream code, but it is very close to the values * generated by their lookup table, and it appears * to sync at a variety of resolutions. If someone * can get a more accurate mathematical equation needed * for these registers, this should be updated. */ lpx = PS_TO_CYCLE(cfg.lpx, byte_clock); hs_exit = PS_TO_CYCLE(cfg.hs_exit, byte_clock); clk_prepare = PS_TO_CYCLE(cfg.clk_prepare, byte_clock); clk_zero = PS_TO_CYCLE(cfg.clk_zero, byte_clock); clk_post = PS_TO_CYCLE(cfg.clk_post, byte_clock); clk_trail = PS_TO_CYCLE(cfg.clk_trail, byte_clock); hs_prepare = PS_TO_CYCLE(cfg.hs_prepare, byte_clock); hs_zero = PS_TO_CYCLE(cfg.hs_zero, byte_clock); hs_trail = PS_TO_CYCLE(cfg.hs_trail, byte_clock); /* B D-PHY: D-PHY Master & Slave Analog Block control */ reg = reg_values[PHYCTRL_ULPS_EXIT] | reg_values[PHYCTRL_VREG_LP] | reg_values[PHYCTRL_SLEW_UP]; samsung_dsim_write(dsi, DSIM_PHYCTRL_REG, reg); /* * T LPX: Transmitted length of any Low-Power state period * T HS-EXIT: Time that the transmitter drives LP-11 following a HS * burst */ reg = DSIM_PHYTIMING_LPX(lpx) | DSIM_PHYTIMING_HS_EXIT(hs_exit); samsung_dsim_write(dsi, DSIM_PHYTIMING_REG, reg); /* * T CLK-PREPARE: Time that the transmitter drives the Clock Lane LP-00 * Line state immediately before the HS-0 Line state starting the * HS transmission * T CLK-ZERO: Time that the transmitter drives the HS-0 state prior to * transmitting the Clock. * T CLK_POST: Time that the transmitter continues to send HS clock * after the last associated Data Lane has transitioned to LP Mode * Interval is defined as the period from the end of T HS-TRAIL to * the beginning of T CLK-TRAIL * T CLK-TRAIL: Time that the transmitter drives the HS-0 state after * the last payload clock bit of a HS transmission burst */ reg = DSIM_PHYTIMING1_CLK_PREPARE(clk_prepare) | DSIM_PHYTIMING1_CLK_ZERO(clk_zero) | DSIM_PHYTIMING1_CLK_POST(clk_post) | DSIM_PHYTIMING1_CLK_TRAIL(clk_trail); samsung_dsim_write(dsi, DSIM_PHYTIMING1_REG, reg); /* * T HS-PREPARE: Time that the transmitter drives the Data Lane LP-00 * Line state immediately before the HS-0 Line state starting the * HS transmission * T HS-ZERO: Time that the transmitter drives the HS-0 state prior to * transmitting the Sync sequence. * T HS-TRAIL: Time that the transmitter drives the flipped differential * state after last payload data bit of a HS transmission burst */ reg = DSIM_PHYTIMING2_HS_PREPARE(hs_prepare) | DSIM_PHYTIMING2_HS_ZERO(hs_zero) | DSIM_PHYTIMING2_HS_TRAIL(hs_trail); samsung_dsim_write(dsi, DSIM_PHYTIMING2_REG, reg); } static void samsung_dsim_disable_clock(struct samsung_dsim *dsi) { u32 reg; reg = samsung_dsim_read(dsi, DSIM_CLKCTRL_REG); reg &= ~(DSIM_LANE_ESC_CLK_EN_CLK | DSIM_LANE_ESC_CLK_EN_DATA_MASK | DSIM_ESC_CLKEN | DSIM_BYTE_CLKEN); samsung_dsim_write(dsi, DSIM_CLKCTRL_REG, reg); reg = samsung_dsim_read(dsi, DSIM_PLLCTRL_REG); reg &= ~DSIM_PLL_EN; samsung_dsim_write(dsi, DSIM_PLLCTRL_REG, reg); } static void samsung_dsim_enable_lane(struct samsung_dsim *dsi, u32 lane) { u32 reg = samsung_dsim_read(dsi, DSIM_CONFIG_REG); reg |= (DSIM_NUM_OF_DATA_LANE(dsi->lanes - 1) | DSIM_LANE_EN_CLK | DSIM_LANE_EN(lane)); samsung_dsim_write(dsi, DSIM_CONFIG_REG, reg); } static int samsung_dsim_init_link(struct samsung_dsim *dsi) { const struct samsung_dsim_driver_data *driver_data = dsi->driver_data; int timeout; u32 reg; u32 lanes_mask; /* Initialize FIFO pointers */ reg = samsung_dsim_read(dsi, DSIM_FIFOCTRL_REG); reg &= ~0x1f; samsung_dsim_write(dsi, DSIM_FIFOCTRL_REG, reg); usleep_range(9000, 11000); reg |= 0x1f; samsung_dsim_write(dsi, DSIM_FIFOCTRL_REG, reg); usleep_range(9000, 11000); /* DSI configuration */ reg = 0; /* * The first bit of mode_flags specifies display configuration. * If this bit is set[= MIPI_DSI_MODE_VIDEO], dsi will support video * mode, otherwise it will support command mode. */ if (dsi->mode_flags & MIPI_DSI_MODE_VIDEO) { reg |= DSIM_VIDEO_MODE; /* * The user manual describes that following bits are ignored in * command mode. */ if (!(dsi->mode_flags & MIPI_DSI_MODE_VSYNC_FLUSH)) reg |= DSIM_MFLUSH_VS; if (dsi->mode_flags & MIPI_DSI_MODE_VIDEO_SYNC_PULSE) reg |= DSIM_SYNC_INFORM; if (dsi->mode_flags & MIPI_DSI_MODE_VIDEO_BURST) reg |= DSIM_BURST_MODE; if (dsi->mode_flags & MIPI_DSI_MODE_VIDEO_AUTO_VERT) reg |= DSIM_AUTO_MODE; if (dsi->mode_flags & MIPI_DSI_MODE_VIDEO_HSE) reg |= DSIM_HSE_DISABLE_MODE; if (dsi->mode_flags & MIPI_DSI_MODE_VIDEO_NO_HFP) reg |= DSIM_HFP_DISABLE_MODE; if (dsi->mode_flags & MIPI_DSI_MODE_VIDEO_NO_HBP) reg |= DSIM_HBP_DISABLE_MODE; if (dsi->mode_flags & MIPI_DSI_MODE_VIDEO_NO_HSA) reg |= DSIM_HSA_DISABLE_MODE; } if (dsi->mode_flags & MIPI_DSI_MODE_NO_EOT_PACKET) reg |= DSIM_EOT_DISABLE; switch (dsi->format) { case MIPI_DSI_FMT_RGB888: reg |= DSIM_MAIN_PIX_FORMAT_RGB888; break; case MIPI_DSI_FMT_RGB666: reg |= DSIM_MAIN_PIX_FORMAT_RGB666; break; case MIPI_DSI_FMT_RGB666_PACKED: reg |= DSIM_MAIN_PIX_FORMAT_RGB666_P; break; case MIPI_DSI_FMT_RGB565: reg |= DSIM_MAIN_PIX_FORMAT_RGB565; break; default: dev_err(dsi->dev, "invalid pixel format\n"); return -EINVAL; } /* * Use non-continuous clock mode if the periparal wants and * host controller supports * * In non-continous clock mode, host controller will turn off * the HS clock between high-speed transmissions to reduce * power consumption. */ if (driver_data->has_clklane_stop && dsi->mode_flags & MIPI_DSI_CLOCK_NON_CONTINUOUS) reg |= DSIM_CLKLANE_STOP; samsung_dsim_write(dsi, DSIM_CONFIG_REG, reg); lanes_mask = BIT(dsi->lanes) - 1; samsung_dsim_enable_lane(dsi, lanes_mask); /* Check clock and data lane state are stop state */ timeout = 100; do { if (timeout-- == 0) { dev_err(dsi->dev, "waiting for bus lanes timed out\n"); return -EFAULT; } reg = samsung_dsim_read(dsi, DSIM_STATUS_REG); if ((reg & DSIM_STOP_STATE_DAT(lanes_mask)) != DSIM_STOP_STATE_DAT(lanes_mask)) continue; } while (!(reg & (DSIM_STOP_STATE_CLK | DSIM_TX_READY_HS_CLK))); reg = samsung_dsim_read(dsi, DSIM_ESCMODE_REG); reg &= ~DSIM_STOP_STATE_CNT_MASK; reg |= DSIM_STOP_STATE_CNT(driver_data->reg_values[STOP_STATE_CNT]); if (!samsung_dsim_hw_is_exynos(dsi->plat_data->hw_type)) reg |= DSIM_FORCE_STOP_STATE; samsung_dsim_write(dsi, DSIM_ESCMODE_REG, reg); reg = DSIM_BTA_TIMEOUT(0xff) | DSIM_LPDR_TIMEOUT(0xffff); samsung_dsim_write(dsi, DSIM_TIMEOUT_REG, reg); return 0; } static void samsung_dsim_set_display_mode(struct samsung_dsim *dsi) { struct drm_display_mode *m = &dsi->mode; unsigned int num_bits_resol = dsi->driver_data->num_bits_resol; u32 reg; if (dsi->mode_flags & MIPI_DSI_MODE_VIDEO) { int byte_clk_khz = dsi->hs_clock / 1000 / 8; int hfp = (m->hsync_start - m->hdisplay) * byte_clk_khz / m->clock; int hbp = (m->htotal - m->hsync_end) * byte_clk_khz / m->clock; int hsa = (m->hsync_end - m->hsync_start) * byte_clk_khz / m->clock; /* remove packet overhead when possible */ hfp = max(hfp - 6, 0); hbp = max(hbp - 6, 0); hsa = max(hsa - 6, 0); dev_dbg(dsi->dev, "calculated hfp: %u, hbp: %u, hsa: %u", hfp, hbp, hsa); reg = DSIM_CMD_ALLOW(0xf) | DSIM_STABLE_VFP(m->vsync_start - m->vdisplay) | DSIM_MAIN_VBP(m->vtotal - m->vsync_end); samsung_dsim_write(dsi, DSIM_MVPORCH_REG, reg); reg = DSIM_MAIN_HFP(hfp) | DSIM_MAIN_HBP(hbp); samsung_dsim_write(dsi, DSIM_MHPORCH_REG, reg); reg = DSIM_MAIN_VSA(m->vsync_end - m->vsync_start) | DSIM_MAIN_HSA(hsa); samsung_dsim_write(dsi, DSIM_MSYNC_REG, reg); } reg = DSIM_MAIN_HRESOL(m->hdisplay, num_bits_resol) | DSIM_MAIN_VRESOL(m->vdisplay, num_bits_resol); samsung_dsim_write(dsi, DSIM_MDRESOL_REG, reg); dev_dbg(dsi->dev, "LCD size = %dx%d\n", m->hdisplay, m->vdisplay); } static void samsung_dsim_set_display_enable(struct samsung_dsim *dsi, bool enable) { u32 reg; reg = samsung_dsim_read(dsi, DSIM_MDRESOL_REG); if (enable) reg |= DSIM_MAIN_STAND_BY; else reg &= ~DSIM_MAIN_STAND_BY; samsung_dsim_write(dsi, DSIM_MDRESOL_REG, reg); } static int samsung_dsim_wait_for_hdr_fifo(struct samsung_dsim *dsi) { int timeout = 2000; do { u32 reg = samsung_dsim_read(dsi, DSIM_FIFOCTRL_REG); if (reg & DSIM_SFR_HEADER_EMPTY) return 0; if (!cond_resched()) usleep_range(950, 1050); } while (--timeout); return -ETIMEDOUT; } static void samsung_dsim_set_cmd_lpm(struct samsung_dsim *dsi, bool lpm) { u32 v = samsung_dsim_read(dsi, DSIM_ESCMODE_REG); if (lpm) v |= DSIM_CMD_LPDT_LP; else v &= ~DSIM_CMD_LPDT_LP; samsung_dsim_write(dsi, DSIM_ESCMODE_REG, v); } static void samsung_dsim_force_bta(struct samsung_dsim *dsi) { u32 v = samsung_dsim_read(dsi, DSIM_ESCMODE_REG); v |= DSIM_FORCE_BTA; samsung_dsim_write(dsi, DSIM_ESCMODE_REG, v); } static void samsung_dsim_send_to_fifo(struct samsung_dsim *dsi, struct samsung_dsim_transfer *xfer) { struct device *dev = dsi->dev; struct mipi_dsi_packet *pkt = &xfer->packet; const u8 *payload = pkt->payload + xfer->tx_done; u16 length = pkt->payload_length - xfer->tx_done; bool first = !xfer->tx_done; u32 reg; dev_dbg(dev, "< xfer %pK: tx len %u, done %u, rx len %u, done %u\n", xfer, length, xfer->tx_done, xfer->rx_len, xfer->rx_done); if (length > DSI_TX_FIFO_SIZE) length = DSI_TX_FIFO_SIZE; xfer->tx_done += length; /* Send payload */ while (length >= 4) { reg = get_unaligned_le32(payload); samsung_dsim_write(dsi, DSIM_PAYLOAD_REG, reg); payload += 4; length -= 4; } reg = 0; switch (length) { case 3: reg |= payload[2] << 16; fallthrough; case 2: reg |= payload[1] << 8; fallthrough; case 1: reg |= payload[0]; samsung_dsim_write(dsi, DSIM_PAYLOAD_REG, reg); break; } /* Send packet header */ if (!first) return; reg = get_unaligned_le32(pkt->header); if (samsung_dsim_wait_for_hdr_fifo(dsi)) { dev_err(dev, "waiting for header FIFO timed out\n"); return; } if (NEQV(xfer->flags & MIPI_DSI_MSG_USE_LPM, dsi->state & DSIM_STATE_CMD_LPM)) { samsung_dsim_set_cmd_lpm(dsi, xfer->flags & MIPI_DSI_MSG_USE_LPM); dsi->state ^= DSIM_STATE_CMD_LPM; } samsung_dsim_write(dsi, DSIM_PKTHDR_REG, reg); if (xfer->flags & MIPI_DSI_MSG_REQ_ACK) samsung_dsim_force_bta(dsi); } static void samsung_dsim_read_from_fifo(struct samsung_dsim *dsi, struct samsung_dsim_transfer *xfer) { u8 *payload = xfer->rx_payload + xfer->rx_done; bool first = !xfer->rx_done; struct device *dev = dsi->dev; u16 length; u32 reg; if (first) { reg = samsung_dsim_read(dsi, DSIM_RXFIFO_REG); switch (reg & 0x3f) { case MIPI_DSI_RX_GENERIC_SHORT_READ_RESPONSE_2BYTE: case MIPI_DSI_RX_DCS_SHORT_READ_RESPONSE_2BYTE: if (xfer->rx_len >= 2) { payload[1] = reg >> 16; ++xfer->rx_done; } fallthrough; case MIPI_DSI_RX_GENERIC_SHORT_READ_RESPONSE_1BYTE: case MIPI_DSI_RX_DCS_SHORT_READ_RESPONSE_1BYTE: payload[0] = reg >> 8; ++xfer->rx_done; xfer->rx_len = xfer->rx_done; xfer->result = 0; goto clear_fifo; case MIPI_DSI_RX_ACKNOWLEDGE_AND_ERROR_REPORT: dev_err(dev, "DSI Error Report: 0x%04x\n", (reg >> 8) & 0xffff); xfer->result = 0; goto clear_fifo; } length = (reg >> 8) & 0xffff; if (length > xfer->rx_len) { dev_err(dev, "response too long (%u > %u bytes), stripping\n", xfer->rx_len, length); length = xfer->rx_len; } else if (length < xfer->rx_len) { xfer->rx_len = length; } } length = xfer->rx_len - xfer->rx_done; xfer->rx_done += length; /* Receive payload */ while (length >= 4) { reg = samsung_dsim_read(dsi, DSIM_RXFIFO_REG); payload[0] = (reg >> 0) & 0xff; payload[1] = (reg >> 8) & 0xff; payload[2] = (reg >> 16) & 0xff; payload[3] = (reg >> 24) & 0xff; payload += 4; length -= 4; } if (length) { reg = samsung_dsim_read(dsi, DSIM_RXFIFO_REG); switch (length) { case 3: payload[2] = (reg >> 16) & 0xff; fallthrough; case 2: payload[1] = (reg >> 8) & 0xff; fallthrough; case 1: payload[0] = reg & 0xff; } } if (xfer->rx_done == xfer->rx_len) xfer->result = 0; clear_fifo: length = DSI_RX_FIFO_SIZE / 4; do { reg = samsung_dsim_read(dsi, DSIM_RXFIFO_REG); if (reg == DSI_RX_FIFO_EMPTY) break; } while (--length); } static void samsung_dsim_transfer_start(struct samsung_dsim *dsi) { unsigned long flags; struct samsung_dsim_transfer *xfer; bool start = false; again: spin_lock_irqsave(&dsi->transfer_lock, flags); if (list_empty(&dsi->transfer_list)) { spin_unlock_irqrestore(&dsi->transfer_lock, flags); return; } xfer = list_first_entry(&dsi->transfer_list, struct samsung_dsim_transfer, list); spin_unlock_irqrestore(&dsi->transfer_lock, flags); if (xfer->packet.payload_length && xfer->tx_done == xfer->packet.payload_length) /* waiting for RX */ return; samsung_dsim_send_to_fifo(dsi, xfer); if (xfer->packet.payload_length || xfer->rx_len) return; xfer->result = 0; complete(&xfer->completed); spin_lock_irqsave(&dsi->transfer_lock, flags); list_del_init(&xfer->list); start = !list_empty(&dsi->transfer_list); spin_unlock_irqrestore(&dsi->transfer_lock, flags); if (start) goto again; } static bool samsung_dsim_transfer_finish(struct samsung_dsim *dsi) { struct samsung_dsim_transfer *xfer; unsigned long flags; bool start = true; spin_lock_irqsave(&dsi->transfer_lock, flags); if (list_empty(&dsi->transfer_list)) { spin_unlock_irqrestore(&dsi->transfer_lock, flags); return false; } xfer = list_first_entry(&dsi->transfer_list, struct samsung_dsim_transfer, list); spin_unlock_irqrestore(&dsi->transfer_lock, flags); dev_dbg(dsi->dev, "> xfer %pK, tx_len %zu, tx_done %u, rx_len %u, rx_done %u\n", xfer, xfer->packet.payload_length, xfer->tx_done, xfer->rx_len, xfer->rx_done); if (xfer->tx_done != xfer->packet.payload_length) return true; if (xfer->rx_done != xfer->rx_len) samsung_dsim_read_from_fifo(dsi, xfer); if (xfer->rx_done != xfer->rx_len) return true; spin_lock_irqsave(&dsi->transfer_lock, flags); list_del_init(&xfer->list); start = !list_empty(&dsi->transfer_list); spin_unlock_irqrestore(&dsi->transfer_lock, flags); if (!xfer->rx_len) xfer->result = 0; complete(&xfer->completed); return start; } static void samsung_dsim_remove_transfer(struct samsung_dsim *dsi, struct samsung_dsim_transfer *xfer) { unsigned long flags; bool start; spin_lock_irqsave(&dsi->transfer_lock, flags); if (!list_empty(&dsi->transfer_list) && xfer == list_first_entry(&dsi->transfer_list, struct samsung_dsim_transfer, list)) { list_del_init(&xfer->list); start = !list_empty(&dsi->transfer_list); spin_unlock_irqrestore(&dsi->transfer_lock, flags); if (start) samsung_dsim_transfer_start(dsi); return; } list_del_init(&xfer->list); spin_unlock_irqrestore(&dsi->transfer_lock, flags); } static int samsung_dsim_transfer(struct samsung_dsim *dsi, struct samsung_dsim_transfer *xfer) { unsigned long flags; bool stopped; xfer->tx_done = 0; xfer->rx_done = 0; xfer->result = -ETIMEDOUT; init_completion(&xfer->completed); spin_lock_irqsave(&dsi->transfer_lock, flags); stopped = list_empty(&dsi->transfer_list); list_add_tail(&xfer->list, &dsi->transfer_list); spin_unlock_irqrestore(&dsi->transfer_lock, flags); if (stopped) samsung_dsim_transfer_start(dsi); wait_for_completion_timeout(&xfer->completed, msecs_to_jiffies(DSI_XFER_TIMEOUT_MS)); if (xfer->result == -ETIMEDOUT) { struct mipi_dsi_packet *pkt = &xfer->packet; samsung_dsim_remove_transfer(dsi, xfer); dev_err(dsi->dev, "xfer timed out: %*ph %*ph\n", 4, pkt->header, (int)pkt->payload_length, pkt->payload); return -ETIMEDOUT; } /* Also covers hardware timeout condition */ return xfer->result; } static irqreturn_t samsung_dsim_irq(int irq, void *dev_id) { struct samsung_dsim *dsi = dev_id; u32 status; status = samsung_dsim_read(dsi, DSIM_INTSRC_REG); if (!status) { static unsigned long j; if (printk_timed_ratelimit(&j, 500)) dev_warn(dsi->dev, "spurious interrupt\n"); return IRQ_HANDLED; } samsung_dsim_write(dsi, DSIM_INTSRC_REG, status); if (status & DSIM_INT_SW_RST_RELEASE) { unsigned long mask = ~(DSIM_INT_RX_DONE | DSIM_INT_SFR_FIFO_EMPTY | DSIM_INT_SFR_HDR_FIFO_EMPTY | DSIM_INT_RX_ECC_ERR | DSIM_INT_SW_RST_RELEASE); samsung_dsim_write(dsi, DSIM_INTMSK_REG, mask); complete(&dsi->completed); return IRQ_HANDLED; } if (!(status & (DSIM_INT_RX_DONE | DSIM_INT_SFR_FIFO_EMPTY | DSIM_INT_PLL_STABLE))) return IRQ_HANDLED; if (samsung_dsim_transfer_finish(dsi)) samsung_dsim_transfer_start(dsi); return IRQ_HANDLED; } static void samsung_dsim_enable_irq(struct samsung_dsim *dsi) { enable_irq(dsi->irq); if (dsi->te_gpio) enable_irq(gpiod_to_irq(dsi->te_gpio)); } static void samsung_dsim_disable_irq(struct samsung_dsim *dsi) { if (dsi->te_gpio) disable_irq(gpiod_to_irq(dsi->te_gpio)); disable_irq(dsi->irq); } static void samsung_dsim_set_stop_state(struct samsung_dsim *dsi, bool enable) { u32 reg = samsung_dsim_read(dsi, DSIM_ESCMODE_REG); if (enable) reg |= DSIM_FORCE_STOP_STATE; else reg &= ~DSIM_FORCE_STOP_STATE; samsung_dsim_write(dsi, DSIM_ESCMODE_REG, reg); } static int samsung_dsim_init(struct samsung_dsim *dsi) { const struct samsung_dsim_driver_data *driver_data = dsi->driver_data; if (dsi->state & DSIM_STATE_INITIALIZED) return 0; samsung_dsim_reset(dsi); samsung_dsim_enable_irq(dsi); if (driver_data->reg_values[RESET_TYPE] == DSIM_FUNCRST) samsung_dsim_enable_lane(dsi, BIT(dsi->lanes) - 1); samsung_dsim_enable_clock(dsi); if (driver_data->wait_for_reset) samsung_dsim_wait_for_reset(dsi); samsung_dsim_set_phy_ctrl(dsi); samsung_dsim_init_link(dsi); dsi->state |= DSIM_STATE_INITIALIZED; return 0; } static void samsung_dsim_atomic_pre_enable(struct drm_bridge *bridge, struct drm_bridge_state *old_bridge_state) { struct samsung_dsim *dsi = bridge_to_dsi(bridge); int ret; if (dsi->state & DSIM_STATE_ENABLED) return; ret = pm_runtime_resume_and_get(dsi->dev); if (ret < 0) { dev_err(dsi->dev, "failed to enable DSI device.\n"); return; } dsi->state |= DSIM_STATE_ENABLED; /* * For Exynos-DSIM the downstream bridge, or panel are expecting * the host initialization during DSI transfer. */ if (!samsung_dsim_hw_is_exynos(dsi->plat_data->hw_type)) { ret = samsung_dsim_init(dsi); if (ret) return; samsung_dsim_set_display_mode(dsi); samsung_dsim_set_display_enable(dsi, true); } } static void samsung_dsim_atomic_enable(struct drm_bridge *bridge, struct drm_bridge_state *old_bridge_state) { struct samsung_dsim *dsi = bridge_to_dsi(bridge); if (samsung_dsim_hw_is_exynos(dsi->plat_data->hw_type)) { samsung_dsim_set_display_mode(dsi); samsung_dsim_set_display_enable(dsi, true); } else { samsung_dsim_set_stop_state(dsi, false); } dsi->state |= DSIM_STATE_VIDOUT_AVAILABLE; } static void samsung_dsim_atomic_disable(struct drm_bridge *bridge, struct drm_bridge_state *old_bridge_state) { struct samsung_dsim *dsi = bridge_to_dsi(bridge); if (!(dsi->state & DSIM_STATE_ENABLED)) return; if (!samsung_dsim_hw_is_exynos(dsi->plat_data->hw_type)) samsung_dsim_set_stop_state(dsi, true); dsi->state &= ~DSIM_STATE_VIDOUT_AVAILABLE; } static void samsung_dsim_atomic_post_disable(struct drm_bridge *bridge, struct drm_bridge_state *old_bridge_state) { struct samsung_dsim *dsi = bridge_to_dsi(bridge); samsung_dsim_set_display_enable(dsi, false); dsi->state &= ~DSIM_STATE_ENABLED; pm_runtime_put_sync(dsi->dev); } /* * This pixel output formats list referenced from, * AN13573 i.MX 8/RT MIPI DSI/CSI-2, Rev. 0, 21 March 2022 * 3.7.4 Pixel formats * Table 14. DSI pixel packing formats */ static const u32 samsung_dsim_pixel_output_fmts[] = { MEDIA_BUS_FMT_YUYV10_1X20, MEDIA_BUS_FMT_YUYV12_1X24, MEDIA_BUS_FMT_UYVY8_1X16, MEDIA_BUS_FMT_RGB101010_1X30, MEDIA_BUS_FMT_RGB121212_1X36, MEDIA_BUS_FMT_RGB565_1X16, MEDIA_BUS_FMT_RGB666_1X18, MEDIA_BUS_FMT_RGB888_1X24, }; static bool samsung_dsim_pixel_output_fmt_supported(u32 fmt) { int i; if (fmt == MEDIA_BUS_FMT_FIXED) return false; for (i = 0; i < ARRAY_SIZE(samsung_dsim_pixel_output_fmts); i++) { if (samsung_dsim_pixel_output_fmts[i] == fmt) return true; } return false; } static u32 * samsung_dsim_atomic_get_input_bus_fmts(struct drm_bridge *bridge, struct drm_bridge_state *bridge_state, struct drm_crtc_state *crtc_state, struct drm_connector_state *conn_state, u32 output_fmt, unsigned int *num_input_fmts) { u32 *input_fmts; input_fmts = kmalloc(sizeof(*input_fmts), GFP_KERNEL); if (!input_fmts) return NULL; if (!samsung_dsim_pixel_output_fmt_supported(output_fmt)) /* * Some bridge/display drivers are still not able to pass the * correct format, so handle those pipelines by falling back * to the default format till the supported formats finalized. */ output_fmt = MEDIA_BUS_FMT_RGB888_1X24; input_fmts[0] = output_fmt; *num_input_fmts = 1; return input_fmts; } static int samsung_dsim_atomic_check(struct drm_bridge *bridge, struct drm_bridge_state *bridge_state, struct drm_crtc_state *crtc_state, struct drm_connector_state *conn_state) { struct samsung_dsim *dsi = bridge_to_dsi(bridge); struct drm_display_mode *adjusted_mode = &crtc_state->adjusted_mode; /* * The i.MX8M Mini/Nano glue logic between LCDIF and DSIM * inverts HS/VS/DE sync signals polarity, therefore, while * i.MX 8M Mini Applications Processor Reference Manual Rev. 3, 11/2020 * 13.6.3.5.2 RGB interface * i.MX 8M Nano Applications Processor Reference Manual Rev. 2, 07/2022 * 13.6.2.7.2 RGB interface * both claim "Vsync, Hsync, and VDEN are active high signals.", the * LCDIF must generate inverted HS/VS/DE signals, i.e. active LOW. * * The i.MX8M Plus glue logic between LCDIFv3 and DSIM does not * implement the same behavior, therefore LCDIFv3 must generate * HS/VS/DE signals active HIGH. */ if (dsi->plat_data->hw_type == DSIM_TYPE_IMX8MM) { adjusted_mode->flags |= (DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC); adjusted_mode->flags &= ~(DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC); } else if (dsi->plat_data->hw_type == DSIM_TYPE_IMX8MP) { adjusted_mode->flags &= ~(DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC); adjusted_mode->flags |= (DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC); } return 0; } static void samsung_dsim_mode_set(struct drm_bridge *bridge, const struct drm_display_mode *mode, const struct drm_display_mode *adjusted_mode) { struct samsung_dsim *dsi = bridge_to_dsi(bridge); drm_mode_copy(&dsi->mode, adjusted_mode); } static int samsung_dsim_attach(struct drm_bridge *bridge, enum drm_bridge_attach_flags flags) { struct samsung_dsim *dsi = bridge_to_dsi(bridge); return drm_bridge_attach(bridge->encoder, dsi->out_bridge, bridge, flags); } static const struct drm_bridge_funcs samsung_dsim_bridge_funcs = { .atomic_duplicate_state = drm_atomic_helper_bridge_duplicate_state, .atomic_destroy_state = drm_atomic_helper_bridge_destroy_state, .atomic_reset = drm_atomic_helper_bridge_reset, .atomic_get_input_bus_fmts = samsung_dsim_atomic_get_input_bus_fmts, .atomic_check = samsung_dsim_atomic_check, .atomic_pre_enable = samsung_dsim_atomic_pre_enable, .atomic_enable = samsung_dsim_atomic_enable, .atomic_disable = samsung_dsim_atomic_disable, .atomic_post_disable = samsung_dsim_atomic_post_disable, .mode_set = samsung_dsim_mode_set, .attach = samsung_dsim_attach, }; static irqreturn_t samsung_dsim_te_irq_handler(int irq, void *dev_id) { struct samsung_dsim *dsi = (struct samsung_dsim *)dev_id; const struct samsung_dsim_plat_data *pdata = dsi->plat_data; if (pdata->host_ops && pdata->host_ops->te_irq_handler) return pdata->host_ops->te_irq_handler(dsi); return IRQ_HANDLED; } static int samsung_dsim_register_te_irq(struct samsung_dsim *dsi, struct device *dev) { int te_gpio_irq; int ret; dsi->te_gpio = devm_gpiod_get_optional(dev, "te", GPIOD_IN); if (!dsi->te_gpio) return 0; else if (IS_ERR(dsi->te_gpio)) return dev_err_probe(dev, PTR_ERR(dsi->te_gpio), "failed to get te GPIO\n"); te_gpio_irq = gpiod_to_irq(dsi->te_gpio); ret = request_threaded_irq(te_gpio_irq, samsung_dsim_te_irq_handler, NULL, IRQF_TRIGGER_RISING | IRQF_NO_AUTOEN, "TE", dsi); if (ret) { dev_err(dsi->dev, "request interrupt failed with %d\n", ret); gpiod_put(dsi->te_gpio); return ret; } return 0; } static int samsung_dsim_host_attach(struct mipi_dsi_host *host, struct mipi_dsi_device *device) { struct samsung_dsim *dsi = host_to_dsi(host); const struct samsung_dsim_plat_data *pdata = dsi->plat_data; struct device *dev = dsi->dev; struct device_node *np = dev->of_node; struct device_node *remote; struct drm_panel *panel; int ret; /* * Devices can also be child nodes when we also control that device * through the upstream device (ie, MIPI-DCS for a MIPI-DSI device). * * Lookup for a child node of the given parent that isn't either port * or ports. */ for_each_available_child_of_node(np, remote) { if (of_node_name_eq(remote, "port") || of_node_name_eq(remote, "ports")) continue; goto of_find_panel_or_bridge; } /* * of_graph_get_remote_node() produces a noisy error message if port * node isn't found and the absence of the port is a legit case here, * so at first we silently check whether graph presents in the * device-tree node. */ if (!of_graph_is_present(np)) return -ENODEV; remote = of_graph_get_remote_node(np, 1, 0); of_find_panel_or_bridge: if (!remote) return -ENODEV; panel = of_drm_find_panel(remote); if (!IS_ERR(panel)) { dsi->out_bridge = devm_drm_panel_bridge_add(dev, panel); } else { dsi->out_bridge = of_drm_find_bridge(remote); if (!dsi->out_bridge) dsi->out_bridge = ERR_PTR(-EINVAL); } of_node_put(remote); if (IS_ERR(dsi->out_bridge)) { ret = PTR_ERR(dsi->out_bridge); DRM_DEV_ERROR(dev, "failed to find the bridge: %d\n", ret); return ret; } DRM_DEV_INFO(dev, "Attached %s device\n", device->name); drm_bridge_add(&dsi->bridge); /* * This is a temporary solution and should be made by more generic way. * * If attached panel device is for command mode one, dsi should register * TE interrupt handler. */ if (!(device->mode_flags & MIPI_DSI_MODE_VIDEO)) { ret = samsung_dsim_register_te_irq(dsi, &device->dev); if (ret) return ret; } if (pdata->host_ops && pdata->host_ops->attach) { ret = pdata->host_ops->attach(dsi, device); if (ret) return ret; } dsi->lanes = device->lanes; dsi->format = device->format; dsi->mode_flags = device->mode_flags; return 0; } static void samsung_dsim_unregister_te_irq(struct samsung_dsim *dsi) { if (dsi->te_gpio) { free_irq(gpiod_to_irq(dsi->te_gpio), dsi); gpiod_put(dsi->te_gpio); } } static int samsung_dsim_host_detach(struct mipi_dsi_host *host, struct mipi_dsi_device *device) { struct samsung_dsim *dsi = host_to_dsi(host); const struct samsung_dsim_plat_data *pdata = dsi->plat_data; dsi->out_bridge = NULL; if (pdata->host_ops && pdata->host_ops->detach) pdata->host_ops->detach(dsi, device); samsung_dsim_unregister_te_irq(dsi); drm_bridge_remove(&dsi->bridge); return 0; } static ssize_t samsung_dsim_host_transfer(struct mipi_dsi_host *host, const struct mipi_dsi_msg *msg) { struct samsung_dsim *dsi = host_to_dsi(host); struct samsung_dsim_transfer xfer; int ret; if (!(dsi->state & DSIM_STATE_ENABLED)) return -EINVAL; ret = samsung_dsim_init(dsi); if (ret) return ret; samsung_dsim_set_stop_state(dsi, false); ret = mipi_dsi_create_packet(&xfer.packet, msg); if (ret < 0) return ret; xfer.rx_len = msg->rx_len; xfer.rx_payload = msg->rx_buf; xfer.flags = msg->flags; ret = samsung_dsim_transfer(dsi, &xfer); return (ret < 0) ? ret : xfer.rx_done; } static const struct mipi_dsi_host_ops samsung_dsim_ops = { .attach = samsung_dsim_host_attach, .detach = samsung_dsim_host_detach, .transfer = samsung_dsim_host_transfer, }; static int samsung_dsim_of_read_u32(const struct device_node *np, const char *propname, u32 *out_value, bool optional) { int ret = of_property_read_u32(np, propname, out_value); if (ret < 0 && !optional) pr_err("%pOF: failed to get '%s' property\n", np, propname); return ret; } static int samsung_dsim_parse_dt(struct samsung_dsim *dsi) { struct device *dev = dsi->dev; struct device_node *node = dev->of_node; u32 lane_polarities[5] = { 0 }; struct device_node *endpoint; int i, nr_lanes, ret; struct clk *pll_clk; ret = samsung_dsim_of_read_u32(node, "samsung,pll-clock-frequency", &dsi->pll_clk_rate, 1); /* If it doesn't exist, read it from the clock instead of failing */ if (ret < 0) { dev_dbg(dev, "Using sclk_mipi for pll clock frequency\n"); pll_clk = devm_clk_get(dev, "sclk_mipi"); if (!IS_ERR(pll_clk)) dsi->pll_clk_rate = clk_get_rate(pll_clk); else return PTR_ERR(pll_clk); } /* If it doesn't exist, use pixel clock instead of failing */ ret = samsung_dsim_of_read_u32(node, "samsung,burst-clock-frequency", &dsi->burst_clk_rate, 1); if (ret < 0) { dev_dbg(dev, "Using pixel clock for HS clock frequency\n"); dsi->burst_clk_rate = 0; } ret = samsung_dsim_of_read_u32(node, "samsung,esc-clock-frequency", &dsi->esc_clk_rate, 0); if (ret < 0) return ret; endpoint = of_graph_get_endpoint_by_regs(node, 1, -1); nr_lanes = of_property_count_u32_elems(endpoint, "data-lanes"); if (nr_lanes > 0 && nr_lanes <= 4) { /* Polarity 0 is clock lane, 1..4 are data lanes. */ of_property_read_u32_array(endpoint, "lane-polarities", lane_polarities, nr_lanes + 1); for (i = 1; i <= nr_lanes; i++) { if (lane_polarities[1] != lane_polarities[i]) DRM_DEV_ERROR(dsi->dev, "Data lanes polarities do not match"); } if (lane_polarities[0]) dsi->swap_dn_dp_clk = true; if (lane_polarities[1]) dsi->swap_dn_dp_data = true; } return 0; } static int generic_dsim_register_host(struct samsung_dsim *dsi) { return mipi_dsi_host_register(&dsi->dsi_host); } static void generic_dsim_unregister_host(struct samsung_dsim *dsi) { mipi_dsi_host_unregister(&dsi->dsi_host); } static const struct samsung_dsim_host_ops generic_dsim_host_ops = { .register_host = generic_dsim_register_host, .unregister_host = generic_dsim_unregister_host, }; static const struct drm_bridge_timings samsung_dsim_bridge_timings_de_high = { .input_bus_flags = DRM_BUS_FLAG_DE_HIGH, }; static const struct drm_bridge_timings samsung_dsim_bridge_timings_de_low = { .input_bus_flags = DRM_BUS_FLAG_DE_LOW, }; int samsung_dsim_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; struct samsung_dsim *dsi; int ret, i; dsi = devm_kzalloc(dev, sizeof(*dsi), GFP_KERNEL); if (!dsi) return -ENOMEM; init_completion(&dsi->completed); spin_lock_init(&dsi->transfer_lock); INIT_LIST_HEAD(&dsi->transfer_list); dsi->dsi_host.ops = &samsung_dsim_ops; dsi->dsi_host.dev = dev; dsi->dev = dev; dsi->plat_data = of_device_get_match_data(dev); dsi->driver_data = samsung_dsim_types[dsi->plat_data->hw_type]; dsi->supplies[0].supply = "vddcore"; dsi->supplies[1].supply = "vddio"; ret = devm_regulator_bulk_get(dev, ARRAY_SIZE(dsi->supplies), dsi->supplies); if (ret) return dev_err_probe(dev, ret, "failed to get regulators\n"); dsi->clks = devm_kcalloc(dev, dsi->driver_data->num_clks, sizeof(*dsi->clks), GFP_KERNEL); if (!dsi->clks) return -ENOMEM; for (i = 0; i < dsi->driver_data->num_clks; i++) { dsi->clks[i] = devm_clk_get(dev, clk_names[i]); if (IS_ERR(dsi->clks[i])) { if (strcmp(clk_names[i], "sclk_mipi") == 0) { dsi->clks[i] = devm_clk_get(dev, OLD_SCLK_MIPI_CLK_NAME); if (!IS_ERR(dsi->clks[i])) continue; } dev_info(dev, "failed to get the clock: %s\n", clk_names[i]); return PTR_ERR(dsi->clks[i]); } } dsi->reg_base = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(dsi->reg_base)) return PTR_ERR(dsi->reg_base); dsi->phy = devm_phy_optional_get(dev, "dsim"); if (IS_ERR(dsi->phy)) { dev_info(dev, "failed to get dsim phy\n"); return PTR_ERR(dsi->phy); } dsi->irq = platform_get_irq(pdev, 0); if (dsi->irq < 0) return dsi->irq; ret = devm_request_threaded_irq(dev, dsi->irq, NULL, samsung_dsim_irq, IRQF_ONESHOT | IRQF_NO_AUTOEN, dev_name(dev), dsi); if (ret) { dev_err(dev, "failed to request dsi irq\n"); return ret; } ret = samsung_dsim_parse_dt(dsi); if (ret) return ret; platform_set_drvdata(pdev, dsi); pm_runtime_enable(dev); dsi->bridge.funcs = &samsung_dsim_bridge_funcs; dsi->bridge.of_node = dev->of_node; dsi->bridge.type = DRM_MODE_CONNECTOR_DSI; /* DE_LOW: i.MX8M Mini/Nano LCDIF-DSIM glue logic inverts HS/VS/DE */ if (dsi->plat_data->hw_type == DSIM_TYPE_IMX8MM) dsi->bridge.timings = &samsung_dsim_bridge_timings_de_low; else dsi->bridge.timings = &samsung_dsim_bridge_timings_de_high; if (dsi->plat_data->host_ops && dsi->plat_data->host_ops->register_host) ret = dsi->plat_data->host_ops->register_host(dsi); if (ret) goto err_disable_runtime; return 0; err_disable_runtime: pm_runtime_disable(dev); return ret; } EXPORT_SYMBOL_GPL(samsung_dsim_probe); int samsung_dsim_remove(struct platform_device *pdev) { struct samsung_dsim *dsi = platform_get_drvdata(pdev); pm_runtime_disable(&pdev->dev); if (dsi->plat_data->host_ops && dsi->plat_data->host_ops->unregister_host) dsi->plat_data->host_ops->unregister_host(dsi); return 0; } EXPORT_SYMBOL_GPL(samsung_dsim_remove); static int __maybe_unused samsung_dsim_suspend(struct device *dev) { struct samsung_dsim *dsi = dev_get_drvdata(dev); const struct samsung_dsim_driver_data *driver_data = dsi->driver_data; int ret, i; usleep_range(10000, 20000); if (dsi->state & DSIM_STATE_INITIALIZED) { dsi->state &= ~DSIM_STATE_INITIALIZED; samsung_dsim_disable_clock(dsi); samsung_dsim_disable_irq(dsi); } dsi->state &= ~DSIM_STATE_CMD_LPM; phy_power_off(dsi->phy); for (i = driver_data->num_clks - 1; i > -1; i--) clk_disable_unprepare(dsi->clks[i]); ret = regulator_bulk_disable(ARRAY_SIZE(dsi->supplies), dsi->supplies); if (ret < 0) dev_err(dsi->dev, "cannot disable regulators %d\n", ret); return 0; } static int __maybe_unused samsung_dsim_resume(struct device *dev) { struct samsung_dsim *dsi = dev_get_drvdata(dev); const struct samsung_dsim_driver_data *driver_data = dsi->driver_data; int ret, i; ret = regulator_bulk_enable(ARRAY_SIZE(dsi->supplies), dsi->supplies); if (ret < 0) { dev_err(dsi->dev, "cannot enable regulators %d\n", ret); return ret; } for (i = 0; i < driver_data->num_clks; i++) { ret = clk_prepare_enable(dsi->clks[i]); if (ret < 0) goto err_clk; } ret = phy_power_on(dsi->phy); if (ret < 0) { dev_err(dsi->dev, "cannot enable phy %d\n", ret); goto err_clk; } return 0; err_clk: while (--i > -1) clk_disable_unprepare(dsi->clks[i]); regulator_bulk_disable(ARRAY_SIZE(dsi->supplies), dsi->supplies); return ret; } const struct dev_pm_ops samsung_dsim_pm_ops = { SET_RUNTIME_PM_OPS(samsung_dsim_suspend, samsung_dsim_resume, NULL) SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend, pm_runtime_force_resume) }; EXPORT_SYMBOL_GPL(samsung_dsim_pm_ops); static const struct samsung_dsim_plat_data samsung_dsim_imx8mm_pdata = { .hw_type = DSIM_TYPE_IMX8MM, .host_ops = &generic_dsim_host_ops, }; static const struct samsung_dsim_plat_data samsung_dsim_imx8mp_pdata = { .hw_type = DSIM_TYPE_IMX8MP, .host_ops = &generic_dsim_host_ops, }; static const struct of_device_id samsung_dsim_of_match[] = { { .compatible = "fsl,imx8mm-mipi-dsim", .data = &samsung_dsim_imx8mm_pdata, }, { .compatible = "fsl,imx8mp-mipi-dsim", .data = &samsung_dsim_imx8mp_pdata, }, { /* sentinel. */ } }; MODULE_DEVICE_TABLE(of, samsung_dsim_of_match); static struct platform_driver samsung_dsim_driver = { .probe = samsung_dsim_probe, .remove = samsung_dsim_remove, .driver = { .name = "samsung-dsim", .pm = &samsung_dsim_pm_ops, .of_match_table = samsung_dsim_of_match, }, }; module_platform_driver(samsung_dsim_driver); MODULE_AUTHOR("Jagan Teki <[email protected]>"); MODULE_DESCRIPTION("Samsung MIPI DSIM controller bridge"); MODULE_LICENSE("GPL");
linux-master
drivers/gpu/drm/bridge/samsung-dsim.c
// SPDX-License-Identifier: GPL-2.0 /* * Copyright (c) 2018, The Linux Foundation. All rights reserved. */ #include <linux/device.h> #include <linux/delay.h> #include <linux/gpio/consumer.h> #include <linux/i2c.h> #include <linux/media-bus-format.h> #include <linux/regmap.h> #include <drm/drm_probe_helper.h> #include <drm/drm_atomic_helper.h> #include <drm/drm_edid.h> #include <drm/drm_mipi_dsi.h> #include <drm/drm_of.h> #include <video/videomode.h> #define I2C_MAIN 0 #define I2C_ADDR_MAIN 0x48 #define I2C_CEC_DSI 1 #define I2C_ADDR_CEC_DSI 0x49 #define I2C_MAX_IDX 2 struct lt8912 { struct device *dev; struct drm_bridge bridge; struct drm_connector connector; struct i2c_client *i2c_client[I2C_MAX_IDX]; struct regmap *regmap[I2C_MAX_IDX]; struct device_node *host_node; struct drm_bridge *hdmi_port; struct mipi_dsi_device *dsi; struct gpio_desc *gp_reset; struct videomode mode; u8 data_lanes; bool is_power_on; bool is_attached; }; static int lt8912_write_init_config(struct lt8912 *lt) { const struct reg_sequence seq[] = { /* Digital clock en*/ {0x08, 0xff}, {0x09, 0xff}, {0x0a, 0xff}, {0x0b, 0x7c}, {0x0c, 0xff}, {0x42, 0x04}, /*Tx Analog*/ {0x31, 0xb1}, {0x32, 0xb1}, {0x33, 0x0e}, {0x37, 0x00}, {0x38, 0x22}, {0x60, 0x82}, /*Cbus Analog*/ {0x39, 0x45}, {0x3a, 0x00}, {0x3b, 0x00}, /*HDMI Pll Analog*/ {0x44, 0x31}, {0x55, 0x44}, {0x57, 0x01}, {0x5a, 0x02}, /*MIPI Analog*/ {0x3e, 0xd6}, {0x3f, 0xd4}, {0x41, 0x3c}, {0xB2, 0x00}, }; return regmap_multi_reg_write(lt->regmap[I2C_MAIN], seq, ARRAY_SIZE(seq)); } static int lt8912_write_mipi_basic_config(struct lt8912 *lt) { const struct reg_sequence seq[] = { {0x12, 0x04}, {0x14, 0x00}, {0x15, 0x00}, {0x1a, 0x03}, {0x1b, 0x03}, }; return regmap_multi_reg_write(lt->regmap[I2C_CEC_DSI], seq, ARRAY_SIZE(seq)); }; static int lt8912_write_dds_config(struct lt8912 *lt) { const struct reg_sequence seq[] = { {0x4e, 0xff}, {0x4f, 0x56}, {0x50, 0x69}, {0x51, 0x80}, {0x1f, 0x5e}, {0x20, 0x01}, {0x21, 0x2c}, {0x22, 0x01}, {0x23, 0xfa}, {0x24, 0x00}, {0x25, 0xc8}, {0x26, 0x00}, {0x27, 0x5e}, {0x28, 0x01}, {0x29, 0x2c}, {0x2a, 0x01}, {0x2b, 0xfa}, {0x2c, 0x00}, {0x2d, 0xc8}, {0x2e, 0x00}, {0x42, 0x64}, {0x43, 0x00}, {0x44, 0x04}, {0x45, 0x00}, {0x46, 0x59}, {0x47, 0x00}, {0x48, 0xf2}, {0x49, 0x06}, {0x4a, 0x00}, {0x4b, 0x72}, {0x4c, 0x45}, {0x4d, 0x00}, {0x52, 0x08}, {0x53, 0x00}, {0x54, 0xb2}, {0x55, 0x00}, {0x56, 0xe4}, {0x57, 0x0d}, {0x58, 0x00}, {0x59, 0xe4}, {0x5a, 0x8a}, {0x5b, 0x00}, {0x5c, 0x34}, {0x1e, 0x4f}, {0x51, 0x00}, }; return regmap_multi_reg_write(lt->regmap[I2C_CEC_DSI], seq, ARRAY_SIZE(seq)); } static int lt8912_write_rxlogicres_config(struct lt8912 *lt) { int ret; ret = regmap_write(lt->regmap[I2C_MAIN], 0x03, 0x7f); usleep_range(10000, 20000); ret |= regmap_write(lt->regmap[I2C_MAIN], 0x03, 0xff); return ret; }; /* enable LVDS output with some hardcoded configuration, not required for the HDMI output */ static int lt8912_write_lvds_config(struct lt8912 *lt) { const struct reg_sequence seq[] = { // lvds power up {0x44, 0x30}, {0x51, 0x05}, // core pll bypass {0x50, 0x24}, // cp=50uA {0x51, 0x2d}, // Pix_clk as reference, second order passive LPF PLL {0x52, 0x04}, // loopdiv=0, use second-order PLL {0x69, 0x0e}, // CP_PRESET_DIV_RATIO {0x69, 0x8e}, {0x6a, 0x00}, {0x6c, 0xb8}, // RGD_CP_SOFT_K_EN,RGD_CP_SOFT_K[13:8] {0x6b, 0x51}, {0x04, 0xfb}, // core pll reset {0x04, 0xff}, // scaler bypass {0x7f, 0x00}, // disable scaler {0xa8, 0x13}, // 0x13: JEIDA, 0x33: VESA {0x02, 0xf7}, // lvds pll reset {0x02, 0xff}, {0x03, 0xcf}, {0x03, 0xff}, }; return regmap_multi_reg_write(lt->regmap[I2C_MAIN], seq, ARRAY_SIZE(seq)); }; static inline struct lt8912 *bridge_to_lt8912(struct drm_bridge *b) { return container_of(b, struct lt8912, bridge); } static inline struct lt8912 *connector_to_lt8912(struct drm_connector *c) { return container_of(c, struct lt8912, connector); } static const struct regmap_config lt8912_regmap_config = { .reg_bits = 8, .val_bits = 8, .max_register = 0xff, }; static int lt8912_init_i2c(struct lt8912 *lt, struct i2c_client *client) { unsigned int i; /* * At this time we only initialize 2 chips, but the lt8912 provides * a third interface for the audio over HDMI configuration. */ struct i2c_board_info info[] = { { I2C_BOARD_INFO("lt8912p0", I2C_ADDR_MAIN), }, { I2C_BOARD_INFO("lt8912p1", I2C_ADDR_CEC_DSI), }, }; if (!lt) return -ENODEV; for (i = 0; i < ARRAY_SIZE(info); i++) { if (i > 0) { lt->i2c_client[i] = i2c_new_dummy_device(client->adapter, info[i].addr); if (IS_ERR(lt->i2c_client[i])) return PTR_ERR(lt->i2c_client[i]); } lt->regmap[i] = devm_regmap_init_i2c(lt->i2c_client[i], &lt8912_regmap_config); if (IS_ERR(lt->regmap[i])) return PTR_ERR(lt->regmap[i]); } return 0; } static int lt8912_free_i2c(struct lt8912 *lt) { unsigned int i; for (i = 1; i < I2C_MAX_IDX; i++) i2c_unregister_device(lt->i2c_client[i]); return 0; } static int lt8912_hard_power_on(struct lt8912 *lt) { gpiod_set_value_cansleep(lt->gp_reset, 0); msleep(20); return 0; } static void lt8912_hard_power_off(struct lt8912 *lt) { gpiod_set_value_cansleep(lt->gp_reset, 1); msleep(20); lt->is_power_on = false; } static int lt8912_video_setup(struct lt8912 *lt) { u32 hactive, h_total, hpw, hfp, hbp; u32 vactive, v_total, vpw, vfp, vbp; u8 settle = 0x08; int ret, hsync_activehigh, vsync_activehigh; if (!lt) return -EINVAL; hactive = lt->mode.hactive; hfp = lt->mode.hfront_porch; hpw = lt->mode.hsync_len; hbp = lt->mode.hback_porch; h_total = hactive + hfp + hpw + hbp; hsync_activehigh = lt->mode.flags & DISPLAY_FLAGS_HSYNC_HIGH; vactive = lt->mode.vactive; vfp = lt->mode.vfront_porch; vpw = lt->mode.vsync_len; vbp = lt->mode.vback_porch; v_total = vactive + vfp + vpw + vbp; vsync_activehigh = lt->mode.flags & DISPLAY_FLAGS_VSYNC_HIGH; if (vactive <= 600) settle = 0x04; else if (vactive == 1080) settle = 0x0a; ret = regmap_write(lt->regmap[I2C_CEC_DSI], 0x10, 0x01); ret |= regmap_write(lt->regmap[I2C_CEC_DSI], 0x11, settle); ret |= regmap_write(lt->regmap[I2C_CEC_DSI], 0x18, hpw); ret |= regmap_write(lt->regmap[I2C_CEC_DSI], 0x19, vpw); ret |= regmap_write(lt->regmap[I2C_CEC_DSI], 0x1c, hactive & 0xff); ret |= regmap_write(lt->regmap[I2C_CEC_DSI], 0x1d, hactive >> 8); ret |= regmap_write(lt->regmap[I2C_CEC_DSI], 0x2f, 0x0c); ret |= regmap_write(lt->regmap[I2C_CEC_DSI], 0x34, h_total & 0xff); ret |= regmap_write(lt->regmap[I2C_CEC_DSI], 0x35, h_total >> 8); ret |= regmap_write(lt->regmap[I2C_CEC_DSI], 0x36, v_total & 0xff); ret |= regmap_write(lt->regmap[I2C_CEC_DSI], 0x37, v_total >> 8); ret |= regmap_write(lt->regmap[I2C_CEC_DSI], 0x38, vbp & 0xff); ret |= regmap_write(lt->regmap[I2C_CEC_DSI], 0x39, vbp >> 8); ret |= regmap_write(lt->regmap[I2C_CEC_DSI], 0x3a, vfp & 0xff); ret |= regmap_write(lt->regmap[I2C_CEC_DSI], 0x3b, vfp >> 8); ret |= regmap_write(lt->regmap[I2C_CEC_DSI], 0x3c, hbp & 0xff); ret |= regmap_write(lt->regmap[I2C_CEC_DSI], 0x3d, hbp >> 8); ret |= regmap_write(lt->regmap[I2C_CEC_DSI], 0x3e, hfp & 0xff); ret |= regmap_write(lt->regmap[I2C_CEC_DSI], 0x3f, hfp >> 8); ret |= regmap_update_bits(lt->regmap[I2C_MAIN], 0xab, BIT(0), vsync_activehigh ? BIT(0) : 0); ret |= regmap_update_bits(lt->regmap[I2C_MAIN], 0xab, BIT(1), hsync_activehigh ? BIT(1) : 0); ret |= regmap_update_bits(lt->regmap[I2C_MAIN], 0xb2, BIT(0), lt->connector.display_info.is_hdmi ? BIT(0) : 0); return ret; } static int lt8912_soft_power_on(struct lt8912 *lt) { if (!lt->is_power_on) { u32 lanes = lt->data_lanes; lt8912_write_init_config(lt); regmap_write(lt->regmap[I2C_CEC_DSI], 0x13, lanes & 3); lt8912_write_mipi_basic_config(lt); lt->is_power_on = true; } return 0; } static int lt8912_video_on(struct lt8912 *lt) { int ret; ret = lt8912_video_setup(lt); if (ret < 0) goto end; ret = lt8912_write_dds_config(lt); if (ret < 0) goto end; ret = lt8912_write_rxlogicres_config(lt); if (ret < 0) goto end; ret = lt8912_write_lvds_config(lt); if (ret < 0) goto end; end: return ret; } static enum drm_connector_status lt8912_check_cable_status(struct lt8912 *lt) { int ret; unsigned int reg_val; ret = regmap_read(lt->regmap[I2C_MAIN], 0xC1, &reg_val); if (ret) return connector_status_unknown; if (reg_val & BIT(7)) return connector_status_connected; return connector_status_disconnected; } static enum drm_connector_status lt8912_connector_detect(struct drm_connector *connector, bool force) { struct lt8912 *lt = connector_to_lt8912(connector); if (lt->hdmi_port->ops & DRM_BRIDGE_OP_DETECT) return drm_bridge_detect(lt->hdmi_port); return lt8912_check_cable_status(lt); } static const struct drm_connector_funcs lt8912_connector_funcs = { .detect = lt8912_connector_detect, .fill_modes = drm_helper_probe_single_connector_modes, .destroy = drm_connector_cleanup, .reset = drm_atomic_helper_connector_reset, .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state, .atomic_destroy_state = drm_atomic_helper_connector_destroy_state, }; static enum drm_mode_status lt8912_connector_mode_valid(struct drm_connector *connector, struct drm_display_mode *mode) { if (mode->clock > 150000) return MODE_CLOCK_HIGH; if (mode->hdisplay > 1920) return MODE_BAD_HVALUE; if (mode->vdisplay > 1080) return MODE_BAD_VVALUE; return MODE_OK; } static int lt8912_connector_get_modes(struct drm_connector *connector) { struct edid *edid; int ret = -1; int num = 0; struct lt8912 *lt = connector_to_lt8912(connector); u32 bus_format = MEDIA_BUS_FMT_RGB888_1X24; edid = drm_bridge_get_edid(lt->hdmi_port, connector); if (edid) { drm_connector_update_edid_property(connector, edid); num = drm_add_edid_modes(connector, edid); } else { return ret; } ret = drm_display_info_set_bus_formats(&connector->display_info, &bus_format, 1); if (ret) num = ret; kfree(edid); return num; } static const struct drm_connector_helper_funcs lt8912_connector_helper_funcs = { .get_modes = lt8912_connector_get_modes, .mode_valid = lt8912_connector_mode_valid, }; static void lt8912_bridge_mode_set(struct drm_bridge *bridge, const struct drm_display_mode *mode, const struct drm_display_mode *adj) { struct lt8912 *lt = bridge_to_lt8912(bridge); drm_display_mode_to_videomode(adj, &lt->mode); } static void lt8912_bridge_enable(struct drm_bridge *bridge) { struct lt8912 *lt = bridge_to_lt8912(bridge); lt8912_video_on(lt); } static int lt8912_attach_dsi(struct lt8912 *lt) { struct device *dev = lt->dev; struct mipi_dsi_host *host; struct mipi_dsi_device *dsi; int ret = -1; const struct mipi_dsi_device_info info = { .type = "lt8912", .channel = 0, .node = NULL, }; host = of_find_mipi_dsi_host_by_node(lt->host_node); if (!host) { dev_err(dev, "failed to find dsi host\n"); return -EPROBE_DEFER; } dsi = devm_mipi_dsi_device_register_full(dev, host, &info); if (IS_ERR(dsi)) { ret = PTR_ERR(dsi); dev_err(dev, "failed to create dsi device (%d)\n", ret); return ret; } lt->dsi = dsi; dsi->lanes = lt->data_lanes; dsi->format = MIPI_DSI_FMT_RGB888; dsi->mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_LPM | MIPI_DSI_MODE_NO_EOT_PACKET; ret = devm_mipi_dsi_attach(dev, dsi); if (ret < 0) { dev_err(dev, "failed to attach dsi to host\n"); return ret; } return 0; } static void lt8912_bridge_hpd_cb(void *data, enum drm_connector_status status) { struct lt8912 *lt = data; if (lt->bridge.dev) drm_helper_hpd_irq_event(lt->bridge.dev); } static int lt8912_bridge_connector_init(struct drm_bridge *bridge) { int ret; struct lt8912 *lt = bridge_to_lt8912(bridge); struct drm_connector *connector = &lt->connector; if (lt->hdmi_port->ops & DRM_BRIDGE_OP_HPD) { drm_bridge_hpd_enable(lt->hdmi_port, lt8912_bridge_hpd_cb, lt); connector->polled = DRM_CONNECTOR_POLL_HPD; } else { connector->polled = DRM_CONNECTOR_POLL_CONNECT | DRM_CONNECTOR_POLL_DISCONNECT; } ret = drm_connector_init(bridge->dev, connector, &lt8912_connector_funcs, lt->hdmi_port->type); if (ret) goto exit; drm_connector_helper_add(connector, &lt8912_connector_helper_funcs); connector->dpms = DRM_MODE_DPMS_OFF; drm_connector_attach_encoder(connector, bridge->encoder); exit: return ret; } static int lt8912_bridge_attach(struct drm_bridge *bridge, enum drm_bridge_attach_flags flags) { struct lt8912 *lt = bridge_to_lt8912(bridge); int ret; if (!(flags & DRM_BRIDGE_ATTACH_NO_CONNECTOR)) { ret = lt8912_bridge_connector_init(bridge); if (ret) { dev_err(lt->dev, "Failed to init bridge ! (%d)\n", ret); return ret; } } ret = lt8912_hard_power_on(lt); if (ret) return ret; ret = lt8912_soft_power_on(lt); if (ret) goto error; lt->is_attached = true; return 0; error: lt8912_hard_power_off(lt); return ret; } static void lt8912_bridge_detach(struct drm_bridge *bridge) { struct lt8912 *lt = bridge_to_lt8912(bridge); if (lt->is_attached) { lt8912_hard_power_off(lt); if (lt->hdmi_port->ops & DRM_BRIDGE_OP_HPD) drm_bridge_hpd_disable(lt->hdmi_port); drm_connector_unregister(&lt->connector); drm_connector_cleanup(&lt->connector); } } static enum drm_connector_status lt8912_bridge_detect(struct drm_bridge *bridge) { struct lt8912 *lt = bridge_to_lt8912(bridge); if (lt->hdmi_port->ops & DRM_BRIDGE_OP_DETECT) return drm_bridge_detect(lt->hdmi_port); return lt8912_check_cable_status(lt); } static struct edid *lt8912_bridge_get_edid(struct drm_bridge *bridge, struct drm_connector *connector) { struct lt8912 *lt = bridge_to_lt8912(bridge); /* * edid must be read through the ddc bus but it must be * given to the hdmi connector node. */ if (lt->hdmi_port->ops & DRM_BRIDGE_OP_EDID) return drm_bridge_get_edid(lt->hdmi_port, connector); dev_warn(lt->dev, "The connected bridge does not supports DRM_BRIDGE_OP_EDID\n"); return NULL; } static const struct drm_bridge_funcs lt8912_bridge_funcs = { .attach = lt8912_bridge_attach, .detach = lt8912_bridge_detach, .mode_set = lt8912_bridge_mode_set, .enable = lt8912_bridge_enable, .detect = lt8912_bridge_detect, .get_edid = lt8912_bridge_get_edid, }; static int lt8912_parse_dt(struct lt8912 *lt) { struct gpio_desc *gp_reset; struct device *dev = lt->dev; int ret; int data_lanes; struct device_node *port_node; gp_reset = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_HIGH); if (IS_ERR(gp_reset)) { ret = PTR_ERR(gp_reset); if (ret != -EPROBE_DEFER) dev_err(dev, "Failed to get reset gpio: %d\n", ret); return ret; } lt->gp_reset = gp_reset; data_lanes = drm_of_get_data_lanes_count_ep(dev->of_node, 0, -1, 1, 4); if (data_lanes < 0) { dev_err(lt->dev, "%s: Bad data-lanes property\n", __func__); return data_lanes; } lt->data_lanes = data_lanes; lt->host_node = of_graph_get_remote_node(dev->of_node, 0, -1); if (!lt->host_node) { dev_err(lt->dev, "%s: Failed to get remote port\n", __func__); return -ENODEV; } port_node = of_graph_get_remote_node(dev->of_node, 1, -1); if (!port_node) { dev_err(lt->dev, "%s: Failed to get connector port\n", __func__); ret = -ENODEV; goto err_free_host_node; } lt->hdmi_port = of_drm_find_bridge(port_node); if (!lt->hdmi_port) { ret = -EPROBE_DEFER; dev_err_probe(lt->dev, ret, "%s: Failed to get hdmi port\n", __func__); goto err_free_host_node; } if (!of_device_is_compatible(port_node, "hdmi-connector")) { dev_err(lt->dev, "%s: Failed to get hdmi port\n", __func__); ret = -EINVAL; goto err_free_host_node; } of_node_put(port_node); return 0; err_free_host_node: of_node_put(port_node); of_node_put(lt->host_node); return ret; } static int lt8912_put_dt(struct lt8912 *lt) { of_node_put(lt->host_node); return 0; } static int lt8912_probe(struct i2c_client *client) { static struct lt8912 *lt; int ret = 0; struct device *dev = &client->dev; lt = devm_kzalloc(dev, sizeof(struct lt8912), GFP_KERNEL); if (!lt) return -ENOMEM; lt->dev = dev; lt->i2c_client[0] = client; ret = lt8912_parse_dt(lt); if (ret) goto err_dt_parse; ret = lt8912_init_i2c(lt, client); if (ret) goto err_i2c; i2c_set_clientdata(client, lt); lt->bridge.funcs = &lt8912_bridge_funcs; lt->bridge.of_node = dev->of_node; lt->bridge.ops = (DRM_BRIDGE_OP_EDID | DRM_BRIDGE_OP_DETECT); drm_bridge_add(&lt->bridge); ret = lt8912_attach_dsi(lt); if (ret) goto err_attach; return 0; err_attach: drm_bridge_remove(&lt->bridge); lt8912_free_i2c(lt); err_i2c: lt8912_put_dt(lt); err_dt_parse: return ret; } static void lt8912_remove(struct i2c_client *client) { struct lt8912 *lt = i2c_get_clientdata(client); lt8912_bridge_detach(&lt->bridge); drm_bridge_remove(&lt->bridge); lt8912_free_i2c(lt); lt8912_put_dt(lt); } static const struct of_device_id lt8912_dt_match[] = { {.compatible = "lontium,lt8912b"}, {} }; MODULE_DEVICE_TABLE(of, lt8912_dt_match); static const struct i2c_device_id lt8912_id[] = { {"lt8912", 0}, {}, }; MODULE_DEVICE_TABLE(i2c, lt8912_id); static struct i2c_driver lt8912_i2c_driver = { .driver = { .name = "lt8912", .of_match_table = lt8912_dt_match, }, .probe = lt8912_probe, .remove = lt8912_remove, .id_table = lt8912_id, }; module_i2c_driver(lt8912_i2c_driver); MODULE_AUTHOR("Adrien Grassein <[email protected]>"); MODULE_DESCRIPTION("lt8912 drm driver"); MODULE_LICENSE("GPL v2");
linux-master
drivers/gpu/drm/bridge/lontium-lt8912b.c
// SPDX-License-Identifier: GPL-2.0 /* * TC358775 DSI to LVDS bridge driver * * Copyright (C) 2020 SMART Wireless Computing * Author: Vinay Simha BN <[email protected]> * */ /* #define DEBUG */ #include <linux/bitfield.h> #include <linux/clk.h> #include <linux/device.h> #include <linux/gpio/consumer.h> #include <linux/i2c.h> #include <linux/kernel.h> #include <linux/media-bus-format.h> #include <linux/module.h> #include <linux/regulator/consumer.h> #include <linux/slab.h> #include <asm/unaligned.h> #include <drm/display/drm_dp_helper.h> #include <drm/drm_atomic_helper.h> #include <drm/drm_bridge.h> #include <drm/drm_mipi_dsi.h> #include <drm/drm_of.h> #include <drm/drm_panel.h> #include <drm/drm_probe_helper.h> #define FLD_VAL(val, start, end) FIELD_PREP(GENMASK(start, end), val) /* Registers */ /* DSI D-PHY Layer Registers */ #define D0W_DPHYCONTTX 0x0004 /* Data Lane 0 DPHY Tx Control */ #define CLW_DPHYCONTRX 0x0020 /* Clock Lane DPHY Rx Control */ #define D0W_DPHYCONTRX 0x0024 /* Data Lane 0 DPHY Rx Control */ #define D1W_DPHYCONTRX 0x0028 /* Data Lane 1 DPHY Rx Control */ #define D2W_DPHYCONTRX 0x002C /* Data Lane 2 DPHY Rx Control */ #define D3W_DPHYCONTRX 0x0030 /* Data Lane 3 DPHY Rx Control */ #define COM_DPHYCONTRX 0x0038 /* DPHY Rx Common Control */ #define CLW_CNTRL 0x0040 /* Clock Lane Control */ #define D0W_CNTRL 0x0044 /* Data Lane 0 Control */ #define D1W_CNTRL 0x0048 /* Data Lane 1 Control */ #define D2W_CNTRL 0x004C /* Data Lane 2 Control */ #define D3W_CNTRL 0x0050 /* Data Lane 3 Control */ #define DFTMODE_CNTRL 0x0054 /* DFT Mode Control */ /* DSI PPI Layer Registers */ #define PPI_STARTPPI 0x0104 /* START control bit of PPI-TX function. */ #define PPI_START_FUNCTION 1 #define PPI_BUSYPPI 0x0108 #define PPI_LINEINITCNT 0x0110 /* Line Initialization Wait Counter */ #define PPI_LPTXTIMECNT 0x0114 #define PPI_LANEENABLE 0x0134 /* Enables each lane at the PPI layer. */ #define PPI_TX_RX_TA 0x013C /* DSI Bus Turn Around timing parameters */ /* Analog timer function enable */ #define PPI_CLS_ATMR 0x0140 /* Delay for Clock Lane in LPRX */ #define PPI_D0S_ATMR 0x0144 /* Delay for Data Lane 0 in LPRX */ #define PPI_D1S_ATMR 0x0148 /* Delay for Data Lane 1 in LPRX */ #define PPI_D2S_ATMR 0x014C /* Delay for Data Lane 2 in LPRX */ #define PPI_D3S_ATMR 0x0150 /* Delay for Data Lane 3 in LPRX */ #define PPI_D0S_CLRSIPOCOUNT 0x0164 /* For lane 0 */ #define PPI_D1S_CLRSIPOCOUNT 0x0168 /* For lane 1 */ #define PPI_D2S_CLRSIPOCOUNT 0x016C /* For lane 2 */ #define PPI_D3S_CLRSIPOCOUNT 0x0170 /* For lane 3 */ #define CLS_PRE 0x0180 /* Digital Counter inside of PHY IO */ #define D0S_PRE 0x0184 /* Digital Counter inside of PHY IO */ #define D1S_PRE 0x0188 /* Digital Counter inside of PHY IO */ #define D2S_PRE 0x018C /* Digital Counter inside of PHY IO */ #define D3S_PRE 0x0190 /* Digital Counter inside of PHY IO */ #define CLS_PREP 0x01A0 /* Digital Counter inside of PHY IO */ #define D0S_PREP 0x01A4 /* Digital Counter inside of PHY IO */ #define D1S_PREP 0x01A8 /* Digital Counter inside of PHY IO */ #define D2S_PREP 0x01AC /* Digital Counter inside of PHY IO */ #define D3S_PREP 0x01B0 /* Digital Counter inside of PHY IO */ #define CLS_ZERO 0x01C0 /* Digital Counter inside of PHY IO */ #define D0S_ZERO 0x01C4 /* Digital Counter inside of PHY IO */ #define D1S_ZERO 0x01C8 /* Digital Counter inside of PHY IO */ #define D2S_ZERO 0x01CC /* Digital Counter inside of PHY IO */ #define D3S_ZERO 0x01D0 /* Digital Counter inside of PHY IO */ #define PPI_CLRFLG 0x01E0 /* PRE Counters has reached set values */ #define PPI_CLRSIPO 0x01E4 /* Clear SIPO values, Slave mode use only. */ #define HSTIMEOUT 0x01F0 /* HS Rx Time Out Counter */ #define HSTIMEOUTENABLE 0x01F4 /* Enable HS Rx Time Out Counter */ #define DSI_STARTDSI 0x0204 /* START control bit of DSI-TX function */ #define DSI_RX_START 1 #define DSI_BUSYDSI 0x0208 #define DSI_LANEENABLE 0x0210 /* Enables each lane at the Protocol layer. */ #define DSI_LANESTATUS0 0x0214 /* Displays lane is in HS RX mode. */ #define DSI_LANESTATUS1 0x0218 /* Displays lane is in ULPS or STOP state */ #define DSI_INTSTATUS 0x0220 /* Interrupt Status */ #define DSI_INTMASK 0x0224 /* Interrupt Mask */ #define DSI_INTCLR 0x0228 /* Interrupt Clear */ #define DSI_LPTXTO 0x0230 /* Low Power Tx Time Out Counter */ #define DSIERRCNT 0x0300 /* DSI Error Count */ #define APLCTRL 0x0400 /* Application Layer Control */ #define RDPKTLN 0x0404 /* Command Read Packet Length */ #define VPCTRL 0x0450 /* Video Path Control */ #define HTIM1 0x0454 /* Horizontal Timing Control 1 */ #define HTIM2 0x0458 /* Horizontal Timing Control 2 */ #define VTIM1 0x045C /* Vertical Timing Control 1 */ #define VTIM2 0x0460 /* Vertical Timing Control 2 */ #define VFUEN 0x0464 /* Video Frame Timing Update Enable */ #define VFUEN_EN BIT(0) /* Upload Enable */ /* Mux Input Select for LVDS LINK Input */ #define LV_MX0003 0x0480 /* Bit 0 to 3 */ #define LV_MX0407 0x0484 /* Bit 4 to 7 */ #define LV_MX0811 0x0488 /* Bit 8 to 11 */ #define LV_MX1215 0x048C /* Bit 12 to 15 */ #define LV_MX1619 0x0490 /* Bit 16 to 19 */ #define LV_MX2023 0x0494 /* Bit 20 to 23 */ #define LV_MX2427 0x0498 /* Bit 24 to 27 */ #define LV_MX(b0, b1, b2, b3) (FLD_VAL(b0, 4, 0) | FLD_VAL(b1, 12, 8) | \ FLD_VAL(b2, 20, 16) | FLD_VAL(b3, 28, 24)) /* Input bit numbers used in mux registers */ enum { LVI_R0, LVI_R1, LVI_R2, LVI_R3, LVI_R4, LVI_R5, LVI_R6, LVI_R7, LVI_G0, LVI_G1, LVI_G2, LVI_G3, LVI_G4, LVI_G5, LVI_G6, LVI_G7, LVI_B0, LVI_B1, LVI_B2, LVI_B3, LVI_B4, LVI_B5, LVI_B6, LVI_B7, LVI_HS, LVI_VS, LVI_DE, LVI_L0 }; #define LVCFG 0x049C /* LVDS Configuration */ #define LVPHY0 0x04A0 /* LVDS PHY 0 */ #define LV_PHY0_RST(v) FLD_VAL(v, 22, 22) /* PHY reset */ #define LV_PHY0_IS(v) FLD_VAL(v, 15, 14) #define LV_PHY0_ND(v) FLD_VAL(v, 4, 0) /* Frequency range select */ #define LV_PHY0_PRBS_ON(v) FLD_VAL(v, 20, 16) /* Clock/Data Flag pins */ #define LVPHY1 0x04A4 /* LVDS PHY 1 */ #define SYSSTAT 0x0500 /* System Status */ #define SYSRST 0x0504 /* System Reset */ #define SYS_RST_I2CS BIT(0) /* Reset I2C-Slave controller */ #define SYS_RST_I2CM BIT(1) /* Reset I2C-Master controller */ #define SYS_RST_LCD BIT(2) /* Reset LCD controller */ #define SYS_RST_BM BIT(3) /* Reset Bus Management controller */ #define SYS_RST_DSIRX BIT(4) /* Reset DSI-RX and App controller */ #define SYS_RST_REG BIT(5) /* Reset Register module */ /* GPIO Registers */ #define GPIOC 0x0520 /* GPIO Control */ #define GPIOO 0x0524 /* GPIO Output */ #define GPIOI 0x0528 /* GPIO Input */ /* I2C Registers */ #define I2CTIMCTRL 0x0540 /* I2C IF Timing and Enable Control */ #define I2CMADDR 0x0544 /* I2C Master Addressing */ #define WDATAQ 0x0548 /* Write Data Queue */ #define RDATAQ 0x054C /* Read Data Queue */ /* Chip ID and Revision ID Register */ #define IDREG 0x0580 #define LPX_PERIOD 4 #define TTA_GET 0x40000 #define TTA_SURE 6 #define SINGLE_LINK 1 #define DUAL_LINK 2 #define TC358775XBG_ID 0x00007500 /* Debug Registers */ #define DEBUG00 0x05A0 /* Debug */ #define DEBUG01 0x05A4 /* LVDS Data */ #define DSI_CLEN_BIT BIT(0) #define DIVIDE_BY_3 3 /* PCLK=DCLK/3 */ #define DIVIDE_BY_6 6 /* PCLK=DCLK/6 */ #define LVCFG_LVEN_BIT BIT(0) #define L0EN BIT(1) #define TC358775_VPCTRL_VSDELAY__MASK 0x3FF00000 #define TC358775_VPCTRL_VSDELAY__SHIFT 20 static inline u32 TC358775_VPCTRL_VSDELAY(uint32_t val) { return ((val) << TC358775_VPCTRL_VSDELAY__SHIFT) & TC358775_VPCTRL_VSDELAY__MASK; } #define TC358775_VPCTRL_OPXLFMT__MASK 0x00000100 #define TC358775_VPCTRL_OPXLFMT__SHIFT 8 static inline u32 TC358775_VPCTRL_OPXLFMT(uint32_t val) { return ((val) << TC358775_VPCTRL_OPXLFMT__SHIFT) & TC358775_VPCTRL_OPXLFMT__MASK; } #define TC358775_VPCTRL_MSF__MASK 0x00000001 #define TC358775_VPCTRL_MSF__SHIFT 0 static inline u32 TC358775_VPCTRL_MSF(uint32_t val) { return ((val) << TC358775_VPCTRL_MSF__SHIFT) & TC358775_VPCTRL_MSF__MASK; } #define TC358775_LVCFG_PCLKDIV__MASK 0x000000f0 #define TC358775_LVCFG_PCLKDIV__SHIFT 4 static inline u32 TC358775_LVCFG_PCLKDIV(uint32_t val) { return ((val) << TC358775_LVCFG_PCLKDIV__SHIFT) & TC358775_LVCFG_PCLKDIV__MASK; } #define TC358775_LVCFG_LVDLINK__MASK 0x00000002 #define TC358775_LVCFG_LVDLINK__SHIFT 1 static inline u32 TC358775_LVCFG_LVDLINK(uint32_t val) { return ((val) << TC358775_LVCFG_LVDLINK__SHIFT) & TC358775_LVCFG_LVDLINK__MASK; } enum tc358775_ports { TC358775_DSI_IN, TC358775_LVDS_OUT0, TC358775_LVDS_OUT1, }; struct tc_data { struct i2c_client *i2c; struct device *dev; struct drm_bridge bridge; struct drm_bridge *panel_bridge; struct device_node *host_node; struct mipi_dsi_device *dsi; u8 num_dsi_lanes; struct regulator *vdd; struct regulator *vddio; struct gpio_desc *reset_gpio; struct gpio_desc *stby_gpio; u8 lvds_link; /* single-link or dual-link */ u8 bpc; }; static inline struct tc_data *bridge_to_tc(struct drm_bridge *b) { return container_of(b, struct tc_data, bridge); } static void tc_bridge_pre_enable(struct drm_bridge *bridge) { struct tc_data *tc = bridge_to_tc(bridge); struct device *dev = &tc->dsi->dev; int ret; ret = regulator_enable(tc->vddio); if (ret < 0) dev_err(dev, "regulator vddio enable failed, %d\n", ret); usleep_range(10000, 11000); ret = regulator_enable(tc->vdd); if (ret < 0) dev_err(dev, "regulator vdd enable failed, %d\n", ret); usleep_range(10000, 11000); gpiod_set_value(tc->stby_gpio, 0); usleep_range(10000, 11000); gpiod_set_value(tc->reset_gpio, 0); usleep_range(10, 20); } static void tc_bridge_post_disable(struct drm_bridge *bridge) { struct tc_data *tc = bridge_to_tc(bridge); struct device *dev = &tc->dsi->dev; int ret; gpiod_set_value(tc->reset_gpio, 1); usleep_range(10, 20); gpiod_set_value(tc->stby_gpio, 1); usleep_range(10000, 11000); ret = regulator_disable(tc->vdd); if (ret < 0) dev_err(dev, "regulator vdd disable failed, %d\n", ret); usleep_range(10000, 11000); ret = regulator_disable(tc->vddio); if (ret < 0) dev_err(dev, "regulator vddio disable failed, %d\n", ret); usleep_range(10000, 11000); } static void d2l_read(struct i2c_client *i2c, u16 addr, u32 *val) { int ret; u8 buf_addr[2]; put_unaligned_be16(addr, buf_addr); ret = i2c_master_send(i2c, buf_addr, sizeof(buf_addr)); if (ret < 0) goto fail; ret = i2c_master_recv(i2c, (u8 *)val, sizeof(*val)); if (ret < 0) goto fail; pr_debug("d2l: I2C : addr:%04x value:%08x\n", addr, *val); return; fail: dev_err(&i2c->dev, "Error %d reading from subaddress 0x%x\n", ret, addr); } static void d2l_write(struct i2c_client *i2c, u16 addr, u32 val) { u8 data[6]; int ret; put_unaligned_be16(addr, data); put_unaligned_le32(val, data + 2); ret = i2c_master_send(i2c, data, ARRAY_SIZE(data)); if (ret < 0) dev_err(&i2c->dev, "Error %d writing to subaddress 0x%x\n", ret, addr); } /* helper function to access bus_formats */ static struct drm_connector *get_connector(struct drm_encoder *encoder) { struct drm_device *dev = encoder->dev; struct drm_connector *connector; list_for_each_entry(connector, &dev->mode_config.connector_list, head) if (connector->encoder == encoder) return connector; return NULL; } static void tc_bridge_enable(struct drm_bridge *bridge) { struct tc_data *tc = bridge_to_tc(bridge); u32 hback_porch, hsync_len, hfront_porch, hactive, htime1, htime2; u32 vback_porch, vsync_len, vfront_porch, vactive, vtime1, vtime2; u32 val = 0; u16 dsiclk, clkdiv, byteclk, t1, t2, t3, vsdelay; struct drm_display_mode *mode; struct drm_connector *connector = get_connector(bridge->encoder); mode = &bridge->encoder->crtc->state->adjusted_mode; hback_porch = mode->htotal - mode->hsync_end; hsync_len = mode->hsync_end - mode->hsync_start; vback_porch = mode->vtotal - mode->vsync_end; vsync_len = mode->vsync_end - mode->vsync_start; htime1 = (hback_porch << 16) + hsync_len; vtime1 = (vback_porch << 16) + vsync_len; hfront_porch = mode->hsync_start - mode->hdisplay; hactive = mode->hdisplay; vfront_porch = mode->vsync_start - mode->vdisplay; vactive = mode->vdisplay; htime2 = (hfront_porch << 16) + hactive; vtime2 = (vfront_porch << 16) + vactive; d2l_read(tc->i2c, IDREG, &val); dev_info(tc->dev, "DSI2LVDS Chip ID.%02x Revision ID. %02x **\n", (val >> 8) & 0xFF, val & 0xFF); d2l_write(tc->i2c, SYSRST, SYS_RST_REG | SYS_RST_DSIRX | SYS_RST_BM | SYS_RST_LCD | SYS_RST_I2CM); usleep_range(30000, 40000); d2l_write(tc->i2c, PPI_TX_RX_TA, TTA_GET | TTA_SURE); d2l_write(tc->i2c, PPI_LPTXTIMECNT, LPX_PERIOD); d2l_write(tc->i2c, PPI_D0S_CLRSIPOCOUNT, 3); d2l_write(tc->i2c, PPI_D1S_CLRSIPOCOUNT, 3); d2l_write(tc->i2c, PPI_D2S_CLRSIPOCOUNT, 3); d2l_write(tc->i2c, PPI_D3S_CLRSIPOCOUNT, 3); val = ((L0EN << tc->num_dsi_lanes) - L0EN) | DSI_CLEN_BIT; d2l_write(tc->i2c, PPI_LANEENABLE, val); d2l_write(tc->i2c, DSI_LANEENABLE, val); d2l_write(tc->i2c, PPI_STARTPPI, PPI_START_FUNCTION); d2l_write(tc->i2c, DSI_STARTDSI, DSI_RX_START); if (tc->bpc == 8) val = TC358775_VPCTRL_OPXLFMT(1); else /* bpc = 6; */ val = TC358775_VPCTRL_MSF(1); dsiclk = mode->crtc_clock * 3 * tc->bpc / tc->num_dsi_lanes / 1000; clkdiv = dsiclk / (tc->lvds_link == DUAL_LINK ? DIVIDE_BY_6 : DIVIDE_BY_3); byteclk = dsiclk / 4; t1 = hactive * (tc->bpc * 3 / 8) / tc->num_dsi_lanes; t2 = ((100000 / clkdiv)) * (hactive + hback_porch + hsync_len + hfront_porch) / 1000; t3 = ((t2 * byteclk) / 100) - (hactive * (tc->bpc * 3 / 8) / tc->num_dsi_lanes); vsdelay = (clkdiv * (t1 + t3) / byteclk) - hback_porch - hsync_len - hactive; val |= TC358775_VPCTRL_VSDELAY(vsdelay); d2l_write(tc->i2c, VPCTRL, val); d2l_write(tc->i2c, HTIM1, htime1); d2l_write(tc->i2c, VTIM1, vtime1); d2l_write(tc->i2c, HTIM2, htime2); d2l_write(tc->i2c, VTIM2, vtime2); d2l_write(tc->i2c, VFUEN, VFUEN_EN); d2l_write(tc->i2c, SYSRST, SYS_RST_LCD); d2l_write(tc->i2c, LVPHY0, LV_PHY0_PRBS_ON(4) | LV_PHY0_ND(6)); dev_dbg(tc->dev, "bus_formats %04x bpc %d\n", connector->display_info.bus_formats[0], tc->bpc); /* * Default hardware register settings of tc358775 configured * with MEDIA_BUS_FMT_RGB888_1X7X4_JEIDA jeida-24 format */ if (connector->display_info.bus_formats[0] == MEDIA_BUS_FMT_RGB888_1X7X4_SPWG) { /* VESA-24 */ d2l_write(tc->i2c, LV_MX0003, LV_MX(LVI_R0, LVI_R1, LVI_R2, LVI_R3)); d2l_write(tc->i2c, LV_MX0407, LV_MX(LVI_R4, LVI_R7, LVI_R5, LVI_G0)); d2l_write(tc->i2c, LV_MX0811, LV_MX(LVI_G1, LVI_G2, LVI_G6, LVI_G7)); d2l_write(tc->i2c, LV_MX1215, LV_MX(LVI_G3, LVI_G4, LVI_G5, LVI_B0)); d2l_write(tc->i2c, LV_MX1619, LV_MX(LVI_B6, LVI_B7, LVI_B1, LVI_B2)); d2l_write(tc->i2c, LV_MX2023, LV_MX(LVI_B3, LVI_B4, LVI_B5, LVI_L0)); d2l_write(tc->i2c, LV_MX2427, LV_MX(LVI_HS, LVI_VS, LVI_DE, LVI_R6)); } else { /* MEDIA_BUS_FMT_RGB666_1X7X3_SPWG - JEIDA-18 */ d2l_write(tc->i2c, LV_MX0003, LV_MX(LVI_R0, LVI_R1, LVI_R2, LVI_R3)); d2l_write(tc->i2c, LV_MX0407, LV_MX(LVI_R4, LVI_L0, LVI_R5, LVI_G0)); d2l_write(tc->i2c, LV_MX0811, LV_MX(LVI_G1, LVI_G2, LVI_L0, LVI_L0)); d2l_write(tc->i2c, LV_MX1215, LV_MX(LVI_G3, LVI_G4, LVI_G5, LVI_B0)); d2l_write(tc->i2c, LV_MX1619, LV_MX(LVI_L0, LVI_L0, LVI_B1, LVI_B2)); d2l_write(tc->i2c, LV_MX2023, LV_MX(LVI_B3, LVI_B4, LVI_B5, LVI_L0)); d2l_write(tc->i2c, LV_MX2427, LV_MX(LVI_HS, LVI_VS, LVI_DE, LVI_L0)); } d2l_write(tc->i2c, VFUEN, VFUEN_EN); val = LVCFG_LVEN_BIT; if (tc->lvds_link == DUAL_LINK) { val |= TC358775_LVCFG_LVDLINK(1); val |= TC358775_LVCFG_PCLKDIV(DIVIDE_BY_6); } else { val |= TC358775_LVCFG_PCLKDIV(DIVIDE_BY_3); } d2l_write(tc->i2c, LVCFG, val); } static enum drm_mode_status tc_mode_valid(struct drm_bridge *bridge, const struct drm_display_info *info, const struct drm_display_mode *mode) { struct tc_data *tc = bridge_to_tc(bridge); /* * Maximum pixel clock speed 135MHz for single-link * 270MHz for dual-link */ if ((mode->clock > 135000 && tc->lvds_link == SINGLE_LINK) || (mode->clock > 270000 && tc->lvds_link == DUAL_LINK)) return MODE_CLOCK_HIGH; switch (info->bus_formats[0]) { case MEDIA_BUS_FMT_RGB888_1X7X4_SPWG: case MEDIA_BUS_FMT_RGB888_1X7X4_JEIDA: /* RGB888 */ tc->bpc = 8; break; case MEDIA_BUS_FMT_RGB666_1X7X3_SPWG: /* RGB666 */ tc->bpc = 6; break; default: dev_warn(tc->dev, "unsupported LVDS bus format 0x%04x\n", info->bus_formats[0]); return MODE_NOMODE; } return MODE_OK; } static int tc358775_parse_dt(struct device_node *np, struct tc_data *tc) { struct device_node *endpoint; struct device_node *parent; struct device_node *remote; int dsi_lanes = -1; /* * To get the data-lanes of dsi, we need to access the dsi0_out of port1 * of dsi0 endpoint from bridge port0 of d2l_in */ endpoint = of_graph_get_endpoint_by_regs(tc->dev->of_node, TC358775_DSI_IN, -1); if (endpoint) { /* dsi0_out node */ parent = of_graph_get_remote_port_parent(endpoint); of_node_put(endpoint); if (parent) { /* dsi0 port 1 */ dsi_lanes = drm_of_get_data_lanes_count_ep(parent, 1, -1, 1, 4); of_node_put(parent); } } if (dsi_lanes < 0) return dsi_lanes; tc->num_dsi_lanes = dsi_lanes; tc->host_node = of_graph_get_remote_node(np, 0, 0); if (!tc->host_node) return -ENODEV; of_node_put(tc->host_node); tc->lvds_link = SINGLE_LINK; endpoint = of_graph_get_endpoint_by_regs(tc->dev->of_node, TC358775_LVDS_OUT1, -1); if (endpoint) { remote = of_graph_get_remote_port_parent(endpoint); of_node_put(endpoint); if (remote) { if (of_device_is_available(remote)) tc->lvds_link = DUAL_LINK; of_node_put(remote); } } dev_dbg(tc->dev, "no.of dsi lanes: %d\n", tc->num_dsi_lanes); dev_dbg(tc->dev, "operating in %d-link mode\n", tc->lvds_link); return 0; } static int tc_bridge_attach(struct drm_bridge *bridge, enum drm_bridge_attach_flags flags) { struct tc_data *tc = bridge_to_tc(bridge); /* Attach the panel-bridge to the dsi bridge */ return drm_bridge_attach(bridge->encoder, tc->panel_bridge, &tc->bridge, flags); } static const struct drm_bridge_funcs tc_bridge_funcs = { .attach = tc_bridge_attach, .pre_enable = tc_bridge_pre_enable, .enable = tc_bridge_enable, .mode_valid = tc_mode_valid, .post_disable = tc_bridge_post_disable, }; static int tc_attach_host(struct tc_data *tc) { struct device *dev = &tc->i2c->dev; struct mipi_dsi_host *host; struct mipi_dsi_device *dsi; int ret; const struct mipi_dsi_device_info info = { .type = "tc358775", .channel = 0, .node = NULL, }; host = of_find_mipi_dsi_host_by_node(tc->host_node); if (!host) { dev_err(dev, "failed to find dsi host\n"); return -EPROBE_DEFER; } dsi = devm_mipi_dsi_device_register_full(dev, host, &info); if (IS_ERR(dsi)) { dev_err(dev, "failed to create dsi device\n"); return PTR_ERR(dsi); } tc->dsi = dsi; dsi->lanes = tc->num_dsi_lanes; dsi->format = MIPI_DSI_FMT_RGB888; dsi->mode_flags = MIPI_DSI_MODE_VIDEO; ret = devm_mipi_dsi_attach(dev, dsi); if (ret < 0) { dev_err(dev, "failed to attach dsi to host\n"); return ret; } return 0; } static int tc_probe(struct i2c_client *client) { struct device *dev = &client->dev; struct tc_data *tc; int ret; tc = devm_kzalloc(dev, sizeof(*tc), GFP_KERNEL); if (!tc) return -ENOMEM; tc->dev = dev; tc->i2c = client; tc->panel_bridge = devm_drm_of_get_bridge(dev, dev->of_node, TC358775_LVDS_OUT0, 0); if (IS_ERR(tc->panel_bridge)) return PTR_ERR(tc->panel_bridge); ret = tc358775_parse_dt(dev->of_node, tc); if (ret) return ret; tc->vddio = devm_regulator_get(dev, "vddio-supply"); if (IS_ERR(tc->vddio)) { ret = PTR_ERR(tc->vddio); dev_err(dev, "vddio-supply not found\n"); return ret; } tc->vdd = devm_regulator_get(dev, "vdd-supply"); if (IS_ERR(tc->vdd)) { ret = PTR_ERR(tc->vdd); dev_err(dev, "vdd-supply not found\n"); return ret; } tc->stby_gpio = devm_gpiod_get(dev, "stby", GPIOD_OUT_HIGH); if (IS_ERR(tc->stby_gpio)) { ret = PTR_ERR(tc->stby_gpio); dev_err(dev, "cannot get stby-gpio %d\n", ret); return ret; } tc->reset_gpio = devm_gpiod_get(dev, "reset", GPIOD_OUT_HIGH); if (IS_ERR(tc->reset_gpio)) { ret = PTR_ERR(tc->reset_gpio); dev_err(dev, "cannot get reset-gpios %d\n", ret); return ret; } tc->bridge.funcs = &tc_bridge_funcs; tc->bridge.of_node = dev->of_node; drm_bridge_add(&tc->bridge); i2c_set_clientdata(client, tc); ret = tc_attach_host(tc); if (ret) goto err_bridge_remove; return 0; err_bridge_remove: drm_bridge_remove(&tc->bridge); return ret; } static void tc_remove(struct i2c_client *client) { struct tc_data *tc = i2c_get_clientdata(client); drm_bridge_remove(&tc->bridge); } static const struct i2c_device_id tc358775_i2c_ids[] = { { "tc358775", 0 }, { } }; MODULE_DEVICE_TABLE(i2c, tc358775_i2c_ids); static const struct of_device_id tc358775_of_ids[] = { { .compatible = "toshiba,tc358775", }, { } }; MODULE_DEVICE_TABLE(of, tc358775_of_ids); static struct i2c_driver tc358775_driver = { .driver = { .name = "tc358775", .of_match_table = tc358775_of_ids, }, .id_table = tc358775_i2c_ids, .probe = tc_probe, .remove = tc_remove, }; module_i2c_driver(tc358775_driver); MODULE_AUTHOR("Vinay Simha BN <[email protected]>"); MODULE_DESCRIPTION("TC358775 DSI/LVDS bridge driver"); MODULE_LICENSE("GPL v2");
linux-master
drivers/gpu/drm/bridge/tc358775.c
// SPDX-License-Identifier: GPL-2.0-only /* * CrOS EC ANX7688 HDMI->DP bridge driver * * Copyright 2020 Google LLC */ #include <drm/drm_bridge.h> #include <drm/drm_print.h> #include <linux/i2c.h> #include <linux/module.h> #include <linux/regmap.h> #include <linux/types.h> /* Register addresses */ #define ANX7688_VENDOR_ID_REG 0x00 #define ANX7688_DEVICE_ID_REG 0x02 #define ANX7688_FW_VERSION_REG 0x80 #define ANX7688_DP_BANDWIDTH_REG 0x85 #define ANX7688_DP_LANE_COUNT_REG 0x86 #define ANX7688_VENDOR_ID 0x1f29 #define ANX7688_DEVICE_ID 0x7688 /* First supported firmware version (0.85) */ #define ANX7688_MINIMUM_FW_VERSION 0x0085 static const struct regmap_config cros_ec_anx7688_regmap_config = { .reg_bits = 8, .val_bits = 8, }; struct cros_ec_anx7688 { struct i2c_client *client; struct regmap *regmap; struct drm_bridge bridge; bool filter; }; static inline struct cros_ec_anx7688 * bridge_to_cros_ec_anx7688(struct drm_bridge *bridge) { return container_of(bridge, struct cros_ec_anx7688, bridge); } static bool cros_ec_anx7688_bridge_mode_fixup(struct drm_bridge *bridge, const struct drm_display_mode *mode, struct drm_display_mode *adjusted_mode) { struct cros_ec_anx7688 *anx = bridge_to_cros_ec_anx7688(bridge); int totalbw, requiredbw; u8 dpbw, lanecount; u8 regs[2]; int ret; if (!anx->filter) return true; /* Read both regs 0x85 (bandwidth) and 0x86 (lane count). */ ret = regmap_bulk_read(anx->regmap, ANX7688_DP_BANDWIDTH_REG, regs, 2); if (ret < 0) { DRM_ERROR("Failed to read bandwidth/lane count\n"); return false; } dpbw = regs[0]; lanecount = regs[1]; /* Maximum 0x19 bandwidth (6.75 Gbps Turbo mode), 2 lanes */ if (dpbw > 0x19 || lanecount > 2) { DRM_ERROR("Invalid bandwidth/lane count (%02x/%d)\n", dpbw, lanecount); return false; } /* Compute available bandwidth (kHz) */ totalbw = dpbw * lanecount * 270000 * 8 / 10; /* Required bandwidth (8 bpc, kHz) */ requiredbw = mode->clock * 8 * 3; DRM_DEBUG_KMS("DP bandwidth: %d kHz (%02x/%d); mode requires %d Khz\n", totalbw, dpbw, lanecount, requiredbw); if (totalbw == 0) { DRM_ERROR("Bandwidth/lane count are 0, not rejecting modes\n"); return true; } return totalbw >= requiredbw; } static const struct drm_bridge_funcs cros_ec_anx7688_bridge_funcs = { .mode_fixup = cros_ec_anx7688_bridge_mode_fixup, }; static int cros_ec_anx7688_bridge_probe(struct i2c_client *client) { struct device *dev = &client->dev; struct cros_ec_anx7688 *anx7688; u16 vendor, device, fw_version; u8 buffer[4]; int ret; anx7688 = devm_kzalloc(dev, sizeof(*anx7688), GFP_KERNEL); if (!anx7688) return -ENOMEM; anx7688->client = client; i2c_set_clientdata(client, anx7688); anx7688->regmap = devm_regmap_init_i2c(client, &cros_ec_anx7688_regmap_config); if (IS_ERR(anx7688->regmap)) { ret = PTR_ERR(anx7688->regmap); dev_err(dev, "regmap i2c init failed: %d\n", ret); return ret; } /* Read both vendor and device id (4 bytes). */ ret = regmap_bulk_read(anx7688->regmap, ANX7688_VENDOR_ID_REG, buffer, 4); if (ret) { dev_err(dev, "Failed to read chip vendor/device id\n"); return ret; } vendor = (u16)buffer[1] << 8 | buffer[0]; device = (u16)buffer[3] << 8 | buffer[2]; if (vendor != ANX7688_VENDOR_ID || device != ANX7688_DEVICE_ID) { dev_err(dev, "Invalid vendor/device id %04x/%04x\n", vendor, device); return -ENODEV; } ret = regmap_bulk_read(anx7688->regmap, ANX7688_FW_VERSION_REG, buffer, 2); if (ret) { dev_err(dev, "Failed to read firmware version\n"); return ret; } fw_version = (u16)buffer[0] << 8 | buffer[1]; dev_info(dev, "ANX7688 firmware version 0x%04x\n", fw_version); anx7688->bridge.of_node = dev->of_node; /* FW version >= 0.85 supports bandwidth/lane count registers */ if (fw_version >= ANX7688_MINIMUM_FW_VERSION) anx7688->filter = true; else /* Warn, but not fail, for backwards compatibility */ DRM_WARN("Old ANX7688 FW version (0x%04x), not filtering\n", fw_version); anx7688->bridge.funcs = &cros_ec_anx7688_bridge_funcs; drm_bridge_add(&anx7688->bridge); return 0; } static void cros_ec_anx7688_bridge_remove(struct i2c_client *client) { struct cros_ec_anx7688 *anx7688 = i2c_get_clientdata(client); drm_bridge_remove(&anx7688->bridge); } static const struct of_device_id cros_ec_anx7688_bridge_match_table[] = { { .compatible = "google,cros-ec-anx7688" }, { } }; MODULE_DEVICE_TABLE(of, cros_ec_anx7688_bridge_match_table); static struct i2c_driver cros_ec_anx7688_bridge_driver = { .probe = cros_ec_anx7688_bridge_probe, .remove = cros_ec_anx7688_bridge_remove, .driver = { .name = "cros-ec-anx7688-bridge", .of_match_table = cros_ec_anx7688_bridge_match_table, }, }; module_i2c_driver(cros_ec_anx7688_bridge_driver); MODULE_DESCRIPTION("ChromeOS EC ANX7688 HDMI->DP bridge driver"); MODULE_AUTHOR("Nicolas Boichat <[email protected]>"); MODULE_AUTHOR("Enric Balletbo i Serra <[email protected]>"); MODULE_LICENSE("GPL");
linux-master
drivers/gpu/drm/bridge/cros-ec-anx7688.c
// SPDX-License-Identifier: GPL-2.0-only /* * Silicon Image SiI8620 HDMI/MHL bridge driver * * Copyright (C) 2015, Samsung Electronics Co., Ltd. * Andrzej Hajda <[email protected]> */ #include <asm/unaligned.h> #include <drm/bridge/mhl.h> #include <drm/drm_bridge.h> #include <drm/drm_crtc.h> #include <drm/drm_edid.h> #include <drm/drm_encoder.h> #include <linux/clk.h> #include <linux/delay.h> #include <linux/extcon.h> #include <linux/gpio/consumer.h> #include <linux/i2c.h> #include <linux/interrupt.h> #include <linux/irq.h> #include <linux/kernel.h> #include <linux/list.h> #include <linux/module.h> #include <linux/mutex.h> #include <linux/of_graph.h> #include <linux/regulator/consumer.h> #include <linux/slab.h> #include <media/rc-core.h> #include "sil-sii8620.h" #define SII8620_BURST_BUF_LEN 288 #define VAL_RX_HDMI_CTRL2_DEFVAL VAL_RX_HDMI_CTRL2_IDLE_CNT(3) #define MHL1_MAX_PCLK 75000 #define MHL1_MAX_PCLK_PP_MODE 150000 #define MHL3_MAX_PCLK 200000 #define MHL3_MAX_PCLK_PP_MODE 300000 enum sii8620_mode { CM_DISCONNECTED, CM_DISCOVERY, CM_MHL1, CM_MHL3, CM_ECBUS_S }; enum sii8620_sink_type { SINK_NONE, SINK_HDMI, SINK_DVI }; enum sii8620_mt_state { MT_STATE_READY, MT_STATE_BUSY, MT_STATE_DONE }; struct sii8620 { struct drm_bridge bridge; struct device *dev; struct rc_dev *rc_dev; struct clk *clk_xtal; struct gpio_desc *gpio_reset; struct gpio_desc *gpio_int; struct regulator_bulk_data supplies[2]; struct mutex lock; /* context lock, protects fields below */ int error; unsigned int use_packed_pixel:1; enum sii8620_mode mode; enum sii8620_sink_type sink_type; u8 cbus_status; u8 stat[MHL_DST_SIZE]; u8 xstat[MHL_XDS_SIZE]; u8 devcap[MHL_DCAP_SIZE]; u8 xdevcap[MHL_XDC_SIZE]; bool feature_complete; bool devcap_read; bool sink_detected; struct edid *edid; unsigned int gen2_write_burst:1; enum sii8620_mt_state mt_state; struct extcon_dev *extcon; struct notifier_block extcon_nb; struct work_struct extcon_wq; int cable_state; struct list_head mt_queue; struct { int r_size; int r_count; int rx_ack; int rx_count; u8 rx_buf[32]; int tx_count; u8 tx_buf[32]; } burst; }; struct sii8620_mt_msg; typedef void (*sii8620_mt_msg_cb)(struct sii8620 *ctx, struct sii8620_mt_msg *msg); typedef void (*sii8620_cb)(struct sii8620 *ctx, int ret); struct sii8620_mt_msg { struct list_head node; u8 reg[4]; u8 ret; sii8620_mt_msg_cb send; sii8620_mt_msg_cb recv; sii8620_cb continuation; }; static const u8 sii8620_i2c_page[] = { 0x39, /* Main System */ 0x3d, /* TDM and HSIC */ 0x49, /* TMDS Receiver, MHL EDID */ 0x4d, /* eMSC, HDCP, HSIC */ 0x5d, /* MHL Spec */ 0x64, /* MHL CBUS */ 0x59, /* Hardware TPI (Transmitter Programming Interface) */ 0x61, /* eCBUS-S, eCBUS-D */ }; static void sii8620_fetch_edid(struct sii8620 *ctx); static void sii8620_set_upstream_edid(struct sii8620 *ctx); static void sii8620_enable_hpd(struct sii8620 *ctx); static void sii8620_mhl_disconnected(struct sii8620 *ctx); static void sii8620_disconnect(struct sii8620 *ctx); static int sii8620_clear_error(struct sii8620 *ctx) { int ret = ctx->error; ctx->error = 0; return ret; } static void sii8620_read_buf(struct sii8620 *ctx, u16 addr, u8 *buf, int len) { struct device *dev = ctx->dev; struct i2c_client *client = to_i2c_client(dev); u8 data = addr; struct i2c_msg msg[] = { { .addr = sii8620_i2c_page[addr >> 8], .flags = client->flags, .len = 1, .buf = &data }, { .addr = sii8620_i2c_page[addr >> 8], .flags = client->flags | I2C_M_RD, .len = len, .buf = buf }, }; int ret; if (ctx->error) return; ret = i2c_transfer(client->adapter, msg, 2); dev_dbg(dev, "read at %04x: %*ph, %d\n", addr, len, buf, ret); if (ret != 2) { dev_err(dev, "Read at %#06x of %d bytes failed with code %d.\n", addr, len, ret); ctx->error = ret < 0 ? ret : -EIO; } } static u8 sii8620_readb(struct sii8620 *ctx, u16 addr) { u8 ret = 0; sii8620_read_buf(ctx, addr, &ret, 1); return ret; } static void sii8620_write_buf(struct sii8620 *ctx, u16 addr, const u8 *buf, int len) { struct device *dev = ctx->dev; struct i2c_client *client = to_i2c_client(dev); u8 data[2]; struct i2c_msg msg = { .addr = sii8620_i2c_page[addr >> 8], .flags = client->flags, .len = len + 1, }; int ret; if (ctx->error) return; if (len > 1) { msg.buf = kmalloc(len + 1, GFP_KERNEL); if (!msg.buf) { ctx->error = -ENOMEM; return; } memcpy(msg.buf + 1, buf, len); } else { msg.buf = data; msg.buf[1] = *buf; } msg.buf[0] = addr; ret = i2c_transfer(client->adapter, &msg, 1); dev_dbg(dev, "write at %04x: %*ph, %d\n", addr, len, buf, ret); if (ret != 1) { dev_err(dev, "Write at %#06x of %*ph failed with code %d.\n", addr, len, buf, ret); ctx->error = ret ?: -EIO; } if (len > 1) kfree(msg.buf); } #define sii8620_write(ctx, addr, arr...) \ ({\ u8 d[] = { arr }; \ sii8620_write_buf(ctx, addr, d, ARRAY_SIZE(d)); \ }) static void __sii8620_write_seq(struct sii8620 *ctx, const u16 *seq, int len) { int i; for (i = 0; i < len; i += 2) sii8620_write(ctx, seq[i], seq[i + 1]); } #define sii8620_write_seq(ctx, seq...) \ ({\ const u16 d[] = { seq }; \ __sii8620_write_seq(ctx, d, ARRAY_SIZE(d)); \ }) #define sii8620_write_seq_static(ctx, seq...) \ ({\ static const u16 d[] = { seq }; \ __sii8620_write_seq(ctx, d, ARRAY_SIZE(d)); \ }) static void sii8620_setbits(struct sii8620 *ctx, u16 addr, u8 mask, u8 val) { val = (val & mask) | (sii8620_readb(ctx, addr) & ~mask); sii8620_write(ctx, addr, val); } static inline bool sii8620_is_mhl3(struct sii8620 *ctx) { return ctx->mode >= CM_MHL3; } static void sii8620_mt_cleanup(struct sii8620 *ctx) { struct sii8620_mt_msg *msg, *n; list_for_each_entry_safe(msg, n, &ctx->mt_queue, node) { list_del(&msg->node); kfree(msg); } ctx->mt_state = MT_STATE_READY; } static void sii8620_mt_work(struct sii8620 *ctx) { struct sii8620_mt_msg *msg; if (ctx->error) return; if (ctx->mt_state == MT_STATE_BUSY || list_empty(&ctx->mt_queue)) return; if (ctx->mt_state == MT_STATE_DONE) { ctx->mt_state = MT_STATE_READY; msg = list_first_entry(&ctx->mt_queue, struct sii8620_mt_msg, node); list_del(&msg->node); if (msg->recv) msg->recv(ctx, msg); if (msg->continuation) msg->continuation(ctx, msg->ret); kfree(msg); } if (ctx->mt_state != MT_STATE_READY || list_empty(&ctx->mt_queue)) return; ctx->mt_state = MT_STATE_BUSY; msg = list_first_entry(&ctx->mt_queue, struct sii8620_mt_msg, node); if (msg->send) msg->send(ctx, msg); } static void sii8620_enable_gen2_write_burst(struct sii8620 *ctx) { u8 ctrl = BIT_MDT_RCV_CTRL_MDT_RCV_EN; if (ctx->gen2_write_burst) return; if (ctx->mode >= CM_MHL1) ctrl |= BIT_MDT_RCV_CTRL_MDT_DELAY_RCV_EN; sii8620_write_seq(ctx, REG_MDT_RCV_TIMEOUT, 100, REG_MDT_RCV_CTRL, ctrl ); ctx->gen2_write_burst = 1; } static void sii8620_disable_gen2_write_burst(struct sii8620 *ctx) { if (!ctx->gen2_write_burst) return; sii8620_write_seq_static(ctx, REG_MDT_XMIT_CTRL, 0, REG_MDT_RCV_CTRL, 0 ); ctx->gen2_write_burst = 0; } static void sii8620_start_gen2_write_burst(struct sii8620 *ctx) { sii8620_write_seq_static(ctx, REG_MDT_INT_1_MASK, BIT_MDT_RCV_TIMEOUT | BIT_MDT_RCV_SM_ABORT_PKT_RCVD | BIT_MDT_RCV_SM_ERROR | BIT_MDT_XMIT_TIMEOUT | BIT_MDT_XMIT_SM_ABORT_PKT_RCVD | BIT_MDT_XMIT_SM_ERROR, REG_MDT_INT_0_MASK, BIT_MDT_XFIFO_EMPTY | BIT_MDT_IDLE_AFTER_HAWB_DISABLE | BIT_MDT_RFIFO_DATA_RDY ); sii8620_enable_gen2_write_burst(ctx); } static void sii8620_mt_msc_cmd_send(struct sii8620 *ctx, struct sii8620_mt_msg *msg) { if (msg->reg[0] == MHL_SET_INT && msg->reg[1] == MHL_INT_REG(RCHANGE) && msg->reg[2] == MHL_INT_RC_FEAT_REQ) sii8620_enable_gen2_write_burst(ctx); else sii8620_disable_gen2_write_burst(ctx); switch (msg->reg[0]) { case MHL_WRITE_STAT: case MHL_SET_INT: sii8620_write_buf(ctx, REG_MSC_CMD_OR_OFFSET, msg->reg + 1, 2); sii8620_write(ctx, REG_MSC_COMMAND_START, BIT_MSC_COMMAND_START_WRITE_STAT); break; case MHL_MSC_MSG: sii8620_write_buf(ctx, REG_MSC_CMD_OR_OFFSET, msg->reg, 3); sii8620_write(ctx, REG_MSC_COMMAND_START, BIT_MSC_COMMAND_START_MSC_MSG); break; case MHL_READ_DEVCAP_REG: case MHL_READ_XDEVCAP_REG: sii8620_write(ctx, REG_MSC_CMD_OR_OFFSET, msg->reg[1]); sii8620_write(ctx, REG_MSC_COMMAND_START, BIT_MSC_COMMAND_START_READ_DEVCAP); break; default: dev_err(ctx->dev, "%s: command %#x not supported\n", __func__, msg->reg[0]); } } static struct sii8620_mt_msg *sii8620_mt_msg_new(struct sii8620 *ctx) { struct sii8620_mt_msg *msg = kzalloc(sizeof(*msg), GFP_KERNEL); if (!msg) ctx->error = -ENOMEM; else list_add_tail(&msg->node, &ctx->mt_queue); return msg; } static void sii8620_mt_set_cont(struct sii8620 *ctx, sii8620_cb cont) { struct sii8620_mt_msg *msg; if (ctx->error) return; if (list_empty(&ctx->mt_queue)) { ctx->error = -EINVAL; return; } msg = list_last_entry(&ctx->mt_queue, struct sii8620_mt_msg, node); msg->continuation = cont; } static void sii8620_mt_msc_cmd(struct sii8620 *ctx, u8 cmd, u8 arg1, u8 arg2) { struct sii8620_mt_msg *msg = sii8620_mt_msg_new(ctx); if (!msg) return; msg->reg[0] = cmd; msg->reg[1] = arg1; msg->reg[2] = arg2; msg->send = sii8620_mt_msc_cmd_send; } static void sii8620_mt_write_stat(struct sii8620 *ctx, u8 reg, u8 val) { sii8620_mt_msc_cmd(ctx, MHL_WRITE_STAT, reg, val); } static inline void sii8620_mt_set_int(struct sii8620 *ctx, u8 irq, u8 mask) { sii8620_mt_msc_cmd(ctx, MHL_SET_INT, irq, mask); } static void sii8620_mt_msc_msg(struct sii8620 *ctx, u8 cmd, u8 data) { sii8620_mt_msc_cmd(ctx, MHL_MSC_MSG, cmd, data); } static void sii8620_mt_rap(struct sii8620 *ctx, u8 code) { sii8620_mt_msc_msg(ctx, MHL_MSC_MSG_RAP, code); } static void sii8620_mt_rcpk(struct sii8620 *ctx, u8 code) { sii8620_mt_msc_msg(ctx, MHL_MSC_MSG_RCPK, code); } static void sii8620_mt_rcpe(struct sii8620 *ctx, u8 code) { sii8620_mt_msc_msg(ctx, MHL_MSC_MSG_RCPE, code); } static void sii8620_mt_read_devcap_send(struct sii8620 *ctx, struct sii8620_mt_msg *msg) { u8 ctrl = BIT_EDID_CTRL_DEVCAP_SELECT_DEVCAP | BIT_EDID_CTRL_EDID_FIFO_ADDR_AUTO | BIT_EDID_CTRL_EDID_MODE_EN; if (msg->reg[0] == MHL_READ_XDEVCAP) ctrl |= BIT_EDID_CTRL_XDEVCAP_EN; sii8620_write_seq(ctx, REG_INTR9_MASK, BIT_INTR9_DEVCAP_DONE, REG_EDID_CTRL, ctrl, REG_TPI_CBUS_START, BIT_TPI_CBUS_START_GET_DEVCAP_START ); } /* copy src to dst and set changed bits in src */ static void sii8620_update_array(u8 *dst, u8 *src, int count) { while (--count >= 0) { *src ^= *dst; *dst++ ^= *src++; } } static void sii8620_identify_sink(struct sii8620 *ctx) { static const char * const sink_str[] = { [SINK_NONE] = "NONE", [SINK_HDMI] = "HDMI", [SINK_DVI] = "DVI" }; char sink_name[20]; struct device *dev = ctx->dev; if (!ctx->sink_detected || !ctx->devcap_read) return; sii8620_fetch_edid(ctx); if (!ctx->edid) { dev_err(ctx->dev, "Cannot fetch EDID\n"); sii8620_mhl_disconnected(ctx); return; } sii8620_set_upstream_edid(ctx); if (drm_detect_hdmi_monitor(ctx->edid)) ctx->sink_type = SINK_HDMI; else ctx->sink_type = SINK_DVI; drm_edid_get_monitor_name(ctx->edid, sink_name, ARRAY_SIZE(sink_name)); dev_info(dev, "detected sink(type: %s): %s\n", sink_str[ctx->sink_type], sink_name); } static void sii8620_mr_devcap(struct sii8620 *ctx) { u8 dcap[MHL_DCAP_SIZE]; struct device *dev = ctx->dev; sii8620_read_buf(ctx, REG_EDID_FIFO_RD_DATA, dcap, MHL_DCAP_SIZE); if (ctx->error < 0) return; dev_info(dev, "detected dongle MHL %d.%d, ChipID %02x%02x:%02x%02x\n", dcap[MHL_DCAP_MHL_VERSION] / 16, dcap[MHL_DCAP_MHL_VERSION] % 16, dcap[MHL_DCAP_ADOPTER_ID_H], dcap[MHL_DCAP_ADOPTER_ID_L], dcap[MHL_DCAP_DEVICE_ID_H], dcap[MHL_DCAP_DEVICE_ID_L]); sii8620_update_array(ctx->devcap, dcap, MHL_DCAP_SIZE); ctx->devcap_read = true; sii8620_identify_sink(ctx); } static void sii8620_mr_xdevcap(struct sii8620 *ctx) { sii8620_read_buf(ctx, REG_EDID_FIFO_RD_DATA, ctx->xdevcap, MHL_XDC_SIZE); } static void sii8620_mt_read_devcap_recv(struct sii8620 *ctx, struct sii8620_mt_msg *msg) { u8 ctrl = BIT_EDID_CTRL_DEVCAP_SELECT_DEVCAP | BIT_EDID_CTRL_EDID_FIFO_ADDR_AUTO | BIT_EDID_CTRL_EDID_MODE_EN; if (msg->reg[0] == MHL_READ_XDEVCAP) ctrl |= BIT_EDID_CTRL_XDEVCAP_EN; sii8620_write_seq(ctx, REG_INTR9_MASK, BIT_INTR9_DEVCAP_DONE | BIT_INTR9_EDID_DONE | BIT_INTR9_EDID_ERROR, REG_EDID_CTRL, ctrl, REG_EDID_FIFO_ADDR, 0 ); if (msg->reg[0] == MHL_READ_XDEVCAP) sii8620_mr_xdevcap(ctx); else sii8620_mr_devcap(ctx); } static void sii8620_mt_read_devcap(struct sii8620 *ctx, bool xdevcap) { struct sii8620_mt_msg *msg = sii8620_mt_msg_new(ctx); if (!msg) return; msg->reg[0] = xdevcap ? MHL_READ_XDEVCAP : MHL_READ_DEVCAP; msg->send = sii8620_mt_read_devcap_send; msg->recv = sii8620_mt_read_devcap_recv; } static void sii8620_mt_read_devcap_reg_recv(struct sii8620 *ctx, struct sii8620_mt_msg *msg) { u8 reg = msg->reg[1] & 0x7f; if (msg->reg[1] & 0x80) ctx->xdevcap[reg] = msg->ret; else ctx->devcap[reg] = msg->ret; } static void sii8620_mt_read_devcap_reg(struct sii8620 *ctx, u8 reg) { struct sii8620_mt_msg *msg = sii8620_mt_msg_new(ctx); if (!msg) return; msg->reg[0] = (reg & 0x80) ? MHL_READ_XDEVCAP_REG : MHL_READ_DEVCAP_REG; msg->reg[1] = reg; msg->send = sii8620_mt_msc_cmd_send; msg->recv = sii8620_mt_read_devcap_reg_recv; } static inline void sii8620_mt_read_xdevcap_reg(struct sii8620 *ctx, u8 reg) { sii8620_mt_read_devcap_reg(ctx, reg | 0x80); } static void *sii8620_burst_get_tx_buf(struct sii8620 *ctx, int len) { u8 *buf = &ctx->burst.tx_buf[ctx->burst.tx_count]; int size = len + 2; if (ctx->burst.tx_count + size >= ARRAY_SIZE(ctx->burst.tx_buf)) { dev_err(ctx->dev, "TX-BLK buffer exhausted\n"); ctx->error = -EINVAL; return NULL; } ctx->burst.tx_count += size; buf[1] = len; return buf + 2; } static u8 *sii8620_burst_get_rx_buf(struct sii8620 *ctx, int len) { u8 *buf = &ctx->burst.rx_buf[ctx->burst.rx_count]; int size = len + 1; if (ctx->burst.rx_count + size >= ARRAY_SIZE(ctx->burst.rx_buf)) { dev_err(ctx->dev, "RX-BLK buffer exhausted\n"); ctx->error = -EINVAL; return NULL; } ctx->burst.rx_count += size; buf[0] = len; return buf + 1; } static void sii8620_burst_send(struct sii8620 *ctx) { int tx_left = ctx->burst.tx_count; u8 *d = ctx->burst.tx_buf; while (tx_left > 0) { int len = d[1] + 2; if (ctx->burst.r_count + len > ctx->burst.r_size) break; d[0] = min(ctx->burst.rx_ack, 255); ctx->burst.rx_ack -= d[0]; sii8620_write_buf(ctx, REG_EMSC_XMIT_WRITE_PORT, d, len); ctx->burst.r_count += len; tx_left -= len; d += len; } ctx->burst.tx_count = tx_left; while (ctx->burst.rx_ack > 0) { u8 b[2] = { min(ctx->burst.rx_ack, 255), 0 }; if (ctx->burst.r_count + 2 > ctx->burst.r_size) break; ctx->burst.rx_ack -= b[0]; sii8620_write_buf(ctx, REG_EMSC_XMIT_WRITE_PORT, b, 2); ctx->burst.r_count += 2; } } static void sii8620_burst_receive(struct sii8620 *ctx) { u8 buf[3], *d; int count; sii8620_read_buf(ctx, REG_EMSCRFIFOBCNTL, buf, 2); count = get_unaligned_le16(buf); while (count > 0) { int len = min(count, 3); sii8620_read_buf(ctx, REG_EMSC_RCV_READ_PORT, buf, len); count -= len; ctx->burst.rx_ack += len - 1; ctx->burst.r_count -= buf[1]; if (ctx->burst.r_count < 0) ctx->burst.r_count = 0; if (len < 3 || !buf[2]) continue; len = buf[2]; d = sii8620_burst_get_rx_buf(ctx, len); if (!d) continue; sii8620_read_buf(ctx, REG_EMSC_RCV_READ_PORT, d, len); count -= len; ctx->burst.rx_ack += len; } } static void sii8620_burst_tx_rbuf_info(struct sii8620 *ctx, int size) { struct mhl_burst_blk_rcv_buffer_info *d = sii8620_burst_get_tx_buf(ctx, sizeof(*d)); if (!d) return; d->id = cpu_to_be16(MHL_BURST_ID_BLK_RCV_BUFFER_INFO); d->size = cpu_to_le16(size); } static u8 sii8620_checksum(void *ptr, int size) { u8 *d = ptr, sum = 0; while (size--) sum += *d++; return sum; } static void sii8620_mhl_burst_hdr_set(struct mhl3_burst_header *h, enum mhl_burst_id id) { h->id = cpu_to_be16(id); h->total_entries = 1; h->sequence_index = 1; } static void sii8620_burst_tx_bits_per_pixel_fmt(struct sii8620 *ctx, u8 fmt) { struct mhl_burst_bits_per_pixel_fmt *d; const int size = sizeof(*d) + sizeof(d->desc[0]); d = sii8620_burst_get_tx_buf(ctx, size); if (!d) return; sii8620_mhl_burst_hdr_set(&d->hdr, MHL_BURST_ID_BITS_PER_PIXEL_FMT); d->num_entries = 1; d->desc[0].stream_id = 0; d->desc[0].pixel_format = fmt; d->hdr.checksum -= sii8620_checksum(d, size); } static void sii8620_burst_rx_all(struct sii8620 *ctx) { u8 *d = ctx->burst.rx_buf; int count = ctx->burst.rx_count; while (count-- > 0) { int len = *d++; int id = get_unaligned_be16(&d[0]); switch (id) { case MHL_BURST_ID_BLK_RCV_BUFFER_INFO: ctx->burst.r_size = get_unaligned_le16(&d[2]); break; default: break; } count -= len; d += len; } ctx->burst.rx_count = 0; } static void sii8620_fetch_edid(struct sii8620 *ctx) { u8 lm_ddc, ddc_cmd, int3, cbus; unsigned long timeout; int fetched, i; int edid_len = EDID_LENGTH; u8 *edid; sii8620_readb(ctx, REG_CBUS_STATUS); lm_ddc = sii8620_readb(ctx, REG_LM_DDC); ddc_cmd = sii8620_readb(ctx, REG_DDC_CMD); sii8620_write_seq(ctx, REG_INTR9_MASK, 0, REG_EDID_CTRL, BIT_EDID_CTRL_EDID_FIFO_ADDR_AUTO, REG_HDCP2X_POLL_CS, 0x71, REG_HDCP2X_CTRL_0, BIT_HDCP2X_CTRL_0_HDCP2X_HDCPTX, REG_LM_DDC, lm_ddc | BIT_LM_DDC_SW_TPI_EN_DISABLED, ); for (i = 0; i < 256; ++i) { u8 ddc_stat = sii8620_readb(ctx, REG_DDC_STATUS); if (!(ddc_stat & BIT_DDC_STATUS_DDC_I2C_IN_PROG)) break; sii8620_write(ctx, REG_DDC_STATUS, BIT_DDC_STATUS_DDC_FIFO_EMPTY); } sii8620_write(ctx, REG_DDC_ADDR, 0x50 << 1); edid = kmalloc(EDID_LENGTH, GFP_KERNEL); if (!edid) { ctx->error = -ENOMEM; return; } #define FETCH_SIZE 16 for (fetched = 0; fetched < edid_len; fetched += FETCH_SIZE) { sii8620_readb(ctx, REG_DDC_STATUS); sii8620_write_seq(ctx, REG_DDC_CMD, ddc_cmd | VAL_DDC_CMD_DDC_CMD_ABORT, REG_DDC_CMD, ddc_cmd | VAL_DDC_CMD_DDC_CMD_CLEAR_FIFO, REG_DDC_STATUS, BIT_DDC_STATUS_DDC_FIFO_EMPTY ); sii8620_write_seq(ctx, REG_DDC_SEGM, fetched >> 8, REG_DDC_OFFSET, fetched & 0xff, REG_DDC_DIN_CNT1, FETCH_SIZE, REG_DDC_DIN_CNT2, 0, REG_DDC_CMD, ddc_cmd | VAL_DDC_CMD_ENH_DDC_READ_NO_ACK ); int3 = 0; timeout = jiffies + msecs_to_jiffies(200); for (;;) { cbus = sii8620_readb(ctx, REG_CBUS_STATUS); if (~cbus & BIT_CBUS_STATUS_CBUS_CONNECTED) { kfree(edid); edid = NULL; goto end; } if (int3 & BIT_DDC_CMD_DONE) { if (sii8620_readb(ctx, REG_DDC_DOUT_CNT) >= FETCH_SIZE) break; } else { int3 = sii8620_readb(ctx, REG_INTR3); } if (time_is_before_jiffies(timeout)) { ctx->error = -ETIMEDOUT; dev_err(ctx->dev, "timeout during EDID read\n"); kfree(edid); edid = NULL; goto end; } usleep_range(10, 20); } sii8620_read_buf(ctx, REG_DDC_DATA, edid + fetched, FETCH_SIZE); if (fetched + FETCH_SIZE == EDID_LENGTH) { u8 ext = ((struct edid *)edid)->extensions; if (ext) { u8 *new_edid; edid_len += ext * EDID_LENGTH; new_edid = krealloc(edid, edid_len, GFP_KERNEL); if (!new_edid) { kfree(edid); ctx->error = -ENOMEM; return; } edid = new_edid; } } } sii8620_write_seq(ctx, REG_INTR3_MASK, BIT_DDC_CMD_DONE, REG_LM_DDC, lm_ddc ); end: kfree(ctx->edid); ctx->edid = (struct edid *)edid; } static void sii8620_set_upstream_edid(struct sii8620 *ctx) { sii8620_setbits(ctx, REG_DPD, BIT_DPD_PDNRX12 | BIT_DPD_PDIDCK_N | BIT_DPD_PD_MHL_CLK_N, 0xff); sii8620_write_seq_static(ctx, REG_RX_HDMI_CTRL3, 0x00, REG_PKT_FILTER_0, 0xFF, REG_PKT_FILTER_1, 0xFF, REG_ALICE0_BW_I2C, 0x06 ); sii8620_setbits(ctx, REG_RX_HDMI_CLR_BUFFER, BIT_RX_HDMI_CLR_BUFFER_VSI_CLR_EN, 0xff); sii8620_write_seq_static(ctx, REG_EDID_CTRL, BIT_EDID_CTRL_EDID_FIFO_ADDR_AUTO | BIT_EDID_CTRL_EDID_MODE_EN, REG_EDID_FIFO_ADDR, 0, ); sii8620_write_buf(ctx, REG_EDID_FIFO_WR_DATA, (u8 *)ctx->edid, (ctx->edid->extensions + 1) * EDID_LENGTH); sii8620_write_seq_static(ctx, REG_EDID_CTRL, BIT_EDID_CTRL_EDID_PRIME_VALID | BIT_EDID_CTRL_EDID_FIFO_ADDR_AUTO | BIT_EDID_CTRL_EDID_MODE_EN, REG_INTR5_MASK, BIT_INTR_SCDT_CHANGE, REG_INTR9_MASK, 0 ); } static void sii8620_xtal_set_rate(struct sii8620 *ctx) { static const struct { unsigned int rate; u8 div; u8 tp1; } rates[] = { { 19200, 0x04, 0x53 }, { 20000, 0x04, 0x62 }, { 24000, 0x05, 0x75 }, { 30000, 0x06, 0x92 }, { 38400, 0x0c, 0xbc }, }; unsigned long rate = clk_get_rate(ctx->clk_xtal) / 1000; int i; for (i = 0; i < ARRAY_SIZE(rates) - 1; ++i) if (rate <= rates[i].rate) break; if (rate != rates[i].rate) dev_err(ctx->dev, "xtal clock rate(%lukHz) not supported, setting MHL for %ukHz.\n", rate, rates[i].rate); sii8620_write(ctx, REG_DIV_CTL_MAIN, rates[i].div); sii8620_write(ctx, REG_HDCP2X_TP1, rates[i].tp1); } static int sii8620_hw_on(struct sii8620 *ctx) { int ret; ret = regulator_bulk_enable(ARRAY_SIZE(ctx->supplies), ctx->supplies); if (ret) return ret; usleep_range(10000, 20000); ret = clk_prepare_enable(ctx->clk_xtal); if (ret) return ret; msleep(100); gpiod_set_value(ctx->gpio_reset, 0); msleep(100); return 0; } static int sii8620_hw_off(struct sii8620 *ctx) { clk_disable_unprepare(ctx->clk_xtal); gpiod_set_value(ctx->gpio_reset, 1); return regulator_bulk_disable(ARRAY_SIZE(ctx->supplies), ctx->supplies); } static void sii8620_cbus_reset(struct sii8620 *ctx) { sii8620_write(ctx, REG_PWD_SRST, BIT_PWD_SRST_CBUS_RST | BIT_PWD_SRST_CBUS_RST_SW_EN); usleep_range(10000, 20000); sii8620_write(ctx, REG_PWD_SRST, BIT_PWD_SRST_CBUS_RST_SW_EN); } static void sii8620_set_auto_zone(struct sii8620 *ctx) { if (ctx->mode != CM_MHL1) { sii8620_write_seq_static(ctx, REG_TX_ZONE_CTL1, 0x0, REG_MHL_PLL_CTL0, VAL_MHL_PLL_CTL0_HDMI_CLK_RATIO_1X | BIT_MHL_PLL_CTL0_CRYSTAL_CLK_SEL | BIT_MHL_PLL_CTL0_ZONE_MASK_OE ); } else { sii8620_write_seq_static(ctx, REG_TX_ZONE_CTL1, VAL_TX_ZONE_CTL1_TX_ZONE_CTRL_MODE, REG_MHL_PLL_CTL0, VAL_MHL_PLL_CTL0_HDMI_CLK_RATIO_1X | BIT_MHL_PLL_CTL0_ZONE_MASK_OE ); } } static void sii8620_stop_video(struct sii8620 *ctx) { u8 val; sii8620_write_seq_static(ctx, REG_TPI_INTR_EN, 0, REG_HDCP2X_INTR0_MASK, 0, REG_TPI_COPP_DATA2, 0, REG_TPI_INTR_ST0, ~0, ); switch (ctx->sink_type) { case SINK_DVI: val = BIT_TPI_SC_REG_TMDS_OE_POWER_DOWN | BIT_TPI_SC_TPI_AV_MUTE; break; case SINK_HDMI: default: val = BIT_TPI_SC_REG_TMDS_OE_POWER_DOWN | BIT_TPI_SC_TPI_AV_MUTE | BIT_TPI_SC_TPI_OUTPUT_MODE_0_HDMI; break; } sii8620_write(ctx, REG_TPI_SC, val); } static void sii8620_set_format(struct sii8620 *ctx) { u8 out_fmt; if (sii8620_is_mhl3(ctx)) { sii8620_setbits(ctx, REG_M3_P0CTRL, BIT_M3_P0CTRL_MHL3_P0_PIXEL_MODE_PACKED, ctx->use_packed_pixel ? ~0 : 0); } else { if (ctx->use_packed_pixel) { sii8620_write_seq_static(ctx, REG_VID_MODE, BIT_VID_MODE_M1080P, REG_MHL_TOP_CTL, BIT_MHL_TOP_CTL_MHL_PP_SEL | 1, REG_MHLTX_CTL6, 0x60 ); } else { sii8620_write_seq_static(ctx, REG_VID_MODE, 0, REG_MHL_TOP_CTL, 1, REG_MHLTX_CTL6, 0xa0 ); } } if (ctx->use_packed_pixel) out_fmt = VAL_TPI_FORMAT(YCBCR422, FULL); else out_fmt = VAL_TPI_FORMAT(RGB, FULL); sii8620_write_seq(ctx, REG_TPI_INPUT, VAL_TPI_FORMAT(RGB, FULL), REG_TPI_OUTPUT, out_fmt, ); } static int mhl3_infoframe_init(struct mhl3_infoframe *frame) { memset(frame, 0, sizeof(*frame)); frame->version = 3; frame->hev_format = -1; return 0; } static ssize_t mhl3_infoframe_pack(struct mhl3_infoframe *frame, void *buffer, size_t size) { const int frm_len = HDMI_INFOFRAME_HEADER_SIZE + MHL3_INFOFRAME_SIZE; u8 *ptr = buffer; if (size < frm_len) return -ENOSPC; memset(buffer, 0, size); ptr[0] = HDMI_INFOFRAME_TYPE_VENDOR; ptr[1] = frame->version; ptr[2] = MHL3_INFOFRAME_SIZE; ptr[4] = MHL3_IEEE_OUI & 0xff; ptr[5] = (MHL3_IEEE_OUI >> 8) & 0xff; ptr[6] = (MHL3_IEEE_OUI >> 16) & 0xff; ptr[7] = frame->video_format & 0x3; ptr[7] |= (frame->format_type & 0x7) << 2; ptr[7] |= frame->sep_audio ? BIT(5) : 0; if (frame->hev_format >= 0) { ptr[9] = 1; ptr[10] = (frame->hev_format >> 8) & 0xff; ptr[11] = frame->hev_format & 0xff; } if (frame->av_delay) { bool sign = frame->av_delay < 0; int delay = sign ? -frame->av_delay : frame->av_delay; ptr[12] = (delay >> 16) & 0xf; if (sign) ptr[12] |= BIT(4); ptr[13] = (delay >> 8) & 0xff; ptr[14] = delay & 0xff; } ptr[3] -= sii8620_checksum(buffer, frm_len); return frm_len; } static void sii8620_set_infoframes(struct sii8620 *ctx, struct drm_display_mode *mode) { struct mhl3_infoframe mhl_frm; union hdmi_infoframe frm; u8 buf[31]; int ret; ret = drm_hdmi_avi_infoframe_from_display_mode(&frm.avi, NULL, mode); if (ctx->use_packed_pixel) frm.avi.colorspace = HDMI_COLORSPACE_YUV422; if (!ret) ret = hdmi_avi_infoframe_pack(&frm.avi, buf, ARRAY_SIZE(buf)); if (ret > 0) sii8620_write_buf(ctx, REG_TPI_AVI_CHSUM, buf + 3, ret - 3); if (!sii8620_is_mhl3(ctx) || !ctx->use_packed_pixel) { sii8620_write(ctx, REG_TPI_SC, BIT_TPI_SC_TPI_OUTPUT_MODE_0_HDMI); sii8620_write(ctx, REG_PKT_FILTER_0, BIT_PKT_FILTER_0_DROP_CEA_GAMUT_PKT | BIT_PKT_FILTER_0_DROP_MPEG_PKT | BIT_PKT_FILTER_0_DROP_GCP_PKT, BIT_PKT_FILTER_1_DROP_GEN_PKT); return; } sii8620_write(ctx, REG_PKT_FILTER_0, BIT_PKT_FILTER_0_DROP_CEA_GAMUT_PKT | BIT_PKT_FILTER_0_DROP_MPEG_PKT | BIT_PKT_FILTER_0_DROP_AVI_PKT | BIT_PKT_FILTER_0_DROP_GCP_PKT, BIT_PKT_FILTER_1_VSI_OVERRIDE_DIS | BIT_PKT_FILTER_1_DROP_GEN_PKT | BIT_PKT_FILTER_1_DROP_VSIF_PKT); sii8620_write(ctx, REG_TPI_INFO_FSEL, BIT_TPI_INFO_FSEL_EN | BIT_TPI_INFO_FSEL_RPT | VAL_TPI_INFO_FSEL_VSI); ret = mhl3_infoframe_init(&mhl_frm); if (!ret) ret = mhl3_infoframe_pack(&mhl_frm, buf, ARRAY_SIZE(buf)); sii8620_write_buf(ctx, REG_TPI_INFO_B0, buf, ret); } static void sii8620_start_video(struct sii8620 *ctx) { struct drm_display_mode *mode = &ctx->bridge.encoder->crtc->state->adjusted_mode; if (!sii8620_is_mhl3(ctx)) sii8620_stop_video(ctx); if (ctx->sink_type == SINK_DVI && !sii8620_is_mhl3(ctx)) { sii8620_write(ctx, REG_RX_HDMI_CTRL2, VAL_RX_HDMI_CTRL2_DEFVAL); sii8620_write(ctx, REG_TPI_SC, 0); return; } sii8620_write_seq_static(ctx, REG_RX_HDMI_CTRL2, VAL_RX_HDMI_CTRL2_DEFVAL | BIT_RX_HDMI_CTRL2_USE_AV_MUTE, REG_VID_OVRRD, BIT_VID_OVRRD_PP_AUTO_DISABLE | BIT_VID_OVRRD_M1080P_OVRRD); sii8620_set_format(ctx); if (!sii8620_is_mhl3(ctx)) { u8 link_mode = MHL_DST_LM_PATH_ENABLED; if (ctx->use_packed_pixel) link_mode |= MHL_DST_LM_CLK_MODE_PACKED_PIXEL; else link_mode |= MHL_DST_LM_CLK_MODE_NORMAL; sii8620_mt_write_stat(ctx, MHL_DST_REG(LINK_MODE), link_mode); sii8620_set_auto_zone(ctx); } else { static const struct { int max_clk; u8 zone; u8 link_rate; u8 rrp_decode; } clk_spec[] = { { 150000, VAL_TX_ZONE_CTL3_TX_ZONE_1_5GBPS, MHL_XDS_LINK_RATE_1_5_GBPS, 0x38 }, { 300000, VAL_TX_ZONE_CTL3_TX_ZONE_3GBPS, MHL_XDS_LINK_RATE_3_0_GBPS, 0x40 }, { 600000, VAL_TX_ZONE_CTL3_TX_ZONE_6GBPS, MHL_XDS_LINK_RATE_6_0_GBPS, 0x40 }, }; u8 p0_ctrl = BIT_M3_P0CTRL_MHL3_P0_PORT_EN; int clk = mode->clock * (ctx->use_packed_pixel ? 2 : 3); int i; for (i = 0; i < ARRAY_SIZE(clk_spec) - 1; ++i) if (clk < clk_spec[i].max_clk) break; if (100 * clk >= 98 * clk_spec[i].max_clk) p0_ctrl |= BIT_M3_P0CTRL_MHL3_P0_UNLIMIT_EN; sii8620_burst_tx_bits_per_pixel_fmt(ctx, ctx->use_packed_pixel); sii8620_burst_send(ctx); sii8620_write_seq(ctx, REG_MHL_DP_CTL0, 0xf0, REG_MHL3_TX_ZONE_CTL, clk_spec[i].zone); sii8620_setbits(ctx, REG_M3_P0CTRL, BIT_M3_P0CTRL_MHL3_P0_PORT_EN | BIT_M3_P0CTRL_MHL3_P0_UNLIMIT_EN, p0_ctrl); sii8620_setbits(ctx, REG_M3_POSTM, MSK_M3_POSTM_RRP_DECODE, clk_spec[i].rrp_decode); sii8620_write_seq_static(ctx, REG_M3_CTRL, VAL_M3_CTRL_MHL3_VALUE | BIT_M3_CTRL_H2M_SWRST, REG_M3_CTRL, VAL_M3_CTRL_MHL3_VALUE ); sii8620_mt_write_stat(ctx, MHL_XDS_REG(AVLINK_MODE_CONTROL), clk_spec[i].link_rate); } sii8620_set_infoframes(ctx, mode); } static void sii8620_disable_hpd(struct sii8620 *ctx) { sii8620_setbits(ctx, REG_EDID_CTRL, BIT_EDID_CTRL_EDID_PRIME_VALID, 0); sii8620_write_seq_static(ctx, REG_HPD_CTRL, BIT_HPD_CTRL_HPD_OUT_OVR_EN, REG_INTR8_MASK, 0 ); } static void sii8620_enable_hpd(struct sii8620 *ctx) { sii8620_setbits(ctx, REG_TMDS_CSTAT_P3, BIT_TMDS_CSTAT_P3_SCDT_CLR_AVI_DIS | BIT_TMDS_CSTAT_P3_CLR_AVI, ~0); sii8620_write_seq_static(ctx, REG_HPD_CTRL, BIT_HPD_CTRL_HPD_OUT_OVR_EN | BIT_HPD_CTRL_HPD_HIGH, ); } static void sii8620_mhl_discover(struct sii8620 *ctx) { sii8620_write_seq_static(ctx, REG_DISC_CTRL9, BIT_DISC_CTRL9_WAKE_DRVFLT | BIT_DISC_CTRL9_DISC_PULSE_PROCEED, REG_DISC_CTRL4, VAL_DISC_CTRL4(VAL_PUP_5K, VAL_PUP_20K), REG_CBUS_DISC_INTR0_MASK, BIT_MHL3_EST_INT | BIT_MHL_EST_INT | BIT_NOT_MHL_EST_INT | BIT_CBUS_MHL3_DISCON_INT | BIT_CBUS_MHL12_DISCON_INT | BIT_RGND_READY_INT, REG_MHL_PLL_CTL0, VAL_MHL_PLL_CTL0_HDMI_CLK_RATIO_1X | BIT_MHL_PLL_CTL0_CRYSTAL_CLK_SEL | BIT_MHL_PLL_CTL0_ZONE_MASK_OE, REG_MHL_DP_CTL0, BIT_MHL_DP_CTL0_DP_OE | BIT_MHL_DP_CTL0_TX_OE_OVR, REG_M3_CTRL, VAL_M3_CTRL_MHL3_VALUE, REG_MHL_DP_CTL1, 0xA2, REG_MHL_DP_CTL2, 0x03, REG_MHL_DP_CTL3, 0x35, REG_MHL_DP_CTL5, 0x02, REG_MHL_DP_CTL6, 0x02, REG_MHL_DP_CTL7, 0x03, REG_COC_CTLC, 0xFF, REG_DPD, BIT_DPD_PWRON_PLL | BIT_DPD_PDNTX12 | BIT_DPD_OSC_EN | BIT_DPD_PWRON_HSIC, REG_COC_INTR_MASK, BIT_COC_PLL_LOCK_STATUS_CHANGE | BIT_COC_CALIBRATION_DONE, REG_CBUS_INT_1_MASK, BIT_CBUS_MSC_ABORT_RCVD | BIT_CBUS_CMD_ABORT, REG_CBUS_INT_0_MASK, BIT_CBUS_MSC_MT_DONE | BIT_CBUS_HPD_CHG | BIT_CBUS_MSC_MR_WRITE_STAT | BIT_CBUS_MSC_MR_MSC_MSG | BIT_CBUS_MSC_MR_WRITE_BURST | BIT_CBUS_MSC_MR_SET_INT | BIT_CBUS_MSC_MT_DONE_NACK ); } static void sii8620_peer_specific_init(struct sii8620 *ctx) { if (sii8620_is_mhl3(ctx)) sii8620_write_seq_static(ctx, REG_SYS_CTRL1, BIT_SYS_CTRL1_BLOCK_DDC_BY_HPD, REG_EMSCINTRMASK1, BIT_EMSCINTR1_EMSC_TRAINING_COMMA_ERR ); else sii8620_write_seq_static(ctx, REG_HDCP2X_INTR0_MASK, 0x00, REG_EMSCINTRMASK1, 0x00, REG_HDCP2X_INTR0, 0xFF, REG_INTR1, 0xFF, REG_SYS_CTRL1, BIT_SYS_CTRL1_BLOCK_DDC_BY_HPD | BIT_SYS_CTRL1_TX_CTRL_HDMI ); } #define SII8620_MHL_VERSION 0x32 #define SII8620_SCRATCHPAD_SIZE 16 #define SII8620_INT_STAT_SIZE 0x33 static void sii8620_set_dev_cap(struct sii8620 *ctx) { static const u8 devcap[MHL_DCAP_SIZE] = { [MHL_DCAP_MHL_VERSION] = SII8620_MHL_VERSION, [MHL_DCAP_CAT] = MHL_DCAP_CAT_SOURCE | MHL_DCAP_CAT_POWER, [MHL_DCAP_ADOPTER_ID_H] = 0x01, [MHL_DCAP_ADOPTER_ID_L] = 0x41, [MHL_DCAP_VID_LINK_MODE] = MHL_DCAP_VID_LINK_RGB444 | MHL_DCAP_VID_LINK_PPIXEL | MHL_DCAP_VID_LINK_16BPP, [MHL_DCAP_AUD_LINK_MODE] = MHL_DCAP_AUD_LINK_2CH, [MHL_DCAP_VIDEO_TYPE] = MHL_DCAP_VT_GRAPHICS, [MHL_DCAP_LOG_DEV_MAP] = MHL_DCAP_LD_GUI, [MHL_DCAP_BANDWIDTH] = 0x0f, [MHL_DCAP_FEATURE_FLAG] = MHL_DCAP_FEATURE_RCP_SUPPORT | MHL_DCAP_FEATURE_RAP_SUPPORT | MHL_DCAP_FEATURE_SP_SUPPORT, [MHL_DCAP_SCRATCHPAD_SIZE] = SII8620_SCRATCHPAD_SIZE, [MHL_DCAP_INT_STAT_SIZE] = SII8620_INT_STAT_SIZE, }; static const u8 xdcap[MHL_XDC_SIZE] = { [MHL_XDC_ECBUS_SPEEDS] = MHL_XDC_ECBUS_S_075 | MHL_XDC_ECBUS_S_8BIT, [MHL_XDC_TMDS_SPEEDS] = MHL_XDC_TMDS_150 | MHL_XDC_TMDS_300 | MHL_XDC_TMDS_600, [MHL_XDC_ECBUS_ROLES] = MHL_XDC_DEV_HOST, [MHL_XDC_LOG_DEV_MAPX] = MHL_XDC_LD_PHONE, }; sii8620_write_buf(ctx, REG_MHL_DEVCAP_0, devcap, ARRAY_SIZE(devcap)); sii8620_write_buf(ctx, REG_MHL_EXTDEVCAP_0, xdcap, ARRAY_SIZE(xdcap)); } static void sii8620_mhl_init(struct sii8620 *ctx) { sii8620_write_seq_static(ctx, REG_DISC_CTRL4, VAL_DISC_CTRL4(VAL_PUP_OFF, VAL_PUP_20K), REG_CBUS_MSC_COMPAT_CTRL, BIT_CBUS_MSC_COMPAT_CTRL_XDEVCAP_EN, ); sii8620_peer_specific_init(ctx); sii8620_disable_hpd(ctx); sii8620_write_seq_static(ctx, REG_EDID_CTRL, BIT_EDID_CTRL_EDID_FIFO_ADDR_AUTO, REG_DISC_CTRL9, BIT_DISC_CTRL9_WAKE_DRVFLT | BIT_DISC_CTRL9_WAKE_PULSE_BYPASS, REG_TMDS0_CCTRL1, 0x90, REG_TMDS_CLK_EN, 0x01, REG_TMDS_CH_EN, 0x11, REG_BGR_BIAS, 0x87, REG_ALICE0_ZONE_CTRL, 0xE8, REG_ALICE0_MODE_CTRL, 0x04, ); sii8620_setbits(ctx, REG_LM_DDC, BIT_LM_DDC_SW_TPI_EN_DISABLED, 0); sii8620_write_seq_static(ctx, REG_TPI_HW_OPT3, 0x76, REG_TMDS_CCTRL, BIT_TMDS_CCTRL_TMDS_OE, REG_TPI_DTD_B2, 79, ); sii8620_set_dev_cap(ctx); sii8620_write_seq_static(ctx, REG_MDT_XMIT_TIMEOUT, 100, REG_MDT_XMIT_CTRL, 0x03, REG_MDT_XFIFO_STAT, 0x00, REG_MDT_RCV_TIMEOUT, 100, REG_CBUS_LINK_CTRL_8, 0x1D, ); sii8620_start_gen2_write_burst(ctx); sii8620_write_seq_static(ctx, REG_BIST_CTRL, 0x00, REG_COC_CTL1, 0x10, REG_COC_CTL2, 0x18, REG_COC_CTLF, 0x07, REG_COC_CTL11, 0xF8, REG_COC_CTL17, 0x61, REG_COC_CTL18, 0x46, REG_COC_CTL19, 0x15, REG_COC_CTL1A, 0x01, REG_MHL_COC_CTL3, BIT_MHL_COC_CTL3_COC_AECHO_EN, REG_MHL_COC_CTL4, 0x2D, REG_MHL_COC_CTL5, 0xF9, REG_MSC_HEARTBEAT_CTRL, 0x27, ); sii8620_disable_gen2_write_burst(ctx); sii8620_mt_write_stat(ctx, MHL_DST_REG(VERSION), SII8620_MHL_VERSION); sii8620_mt_write_stat(ctx, MHL_DST_REG(CONNECTED_RDY), MHL_DST_CONN_DCAP_RDY | MHL_DST_CONN_XDEVCAPP_SUPP | MHL_DST_CONN_POW_STAT); sii8620_mt_set_int(ctx, MHL_INT_REG(RCHANGE), MHL_INT_RC_DCAP_CHG); } static void sii8620_emsc_enable(struct sii8620 *ctx) { u8 reg; sii8620_setbits(ctx, REG_GENCTL, BIT_GENCTL_EMSC_EN | BIT_GENCTL_CLR_EMSC_RFIFO | BIT_GENCTL_CLR_EMSC_XFIFO, ~0); sii8620_setbits(ctx, REG_GENCTL, BIT_GENCTL_CLR_EMSC_RFIFO | BIT_GENCTL_CLR_EMSC_XFIFO, 0); sii8620_setbits(ctx, REG_COMMECNT, BIT_COMMECNT_I2C_TO_EMSC_EN, ~0); reg = sii8620_readb(ctx, REG_EMSCINTR); sii8620_write(ctx, REG_EMSCINTR, reg); sii8620_write(ctx, REG_EMSCINTRMASK, BIT_EMSCINTR_SPI_DVLD); } static int sii8620_wait_for_fsm_state(struct sii8620 *ctx, u8 state) { int i; for (i = 0; i < 10; ++i) { u8 s = sii8620_readb(ctx, REG_COC_STAT_0); if ((s & MSK_COC_STAT_0_FSM_STATE) == state) return 0; if (!(s & BIT_COC_STAT_0_PLL_LOCKED)) return -EBUSY; usleep_range(4000, 6000); } return -ETIMEDOUT; } static void sii8620_set_mode(struct sii8620 *ctx, enum sii8620_mode mode) { int ret; if (ctx->mode == mode) return; switch (mode) { case CM_MHL1: sii8620_write_seq_static(ctx, REG_CBUS_MSC_COMPAT_CTRL, 0x02, REG_M3_CTRL, VAL_M3_CTRL_MHL1_2_VALUE, REG_DPD, BIT_DPD_PWRON_PLL | BIT_DPD_PDNTX12 | BIT_DPD_OSC_EN, REG_COC_INTR_MASK, 0 ); ctx->mode = mode; break; case CM_MHL3: sii8620_write(ctx, REG_M3_CTRL, VAL_M3_CTRL_MHL3_VALUE); ctx->mode = mode; return; case CM_ECBUS_S: sii8620_emsc_enable(ctx); sii8620_write_seq_static(ctx, REG_TTXSPINUMS, 4, REG_TRXSPINUMS, 4, REG_TTXHSICNUMS, 0x14, REG_TRXHSICNUMS, 0x14, REG_TTXTOTNUMS, 0x18, REG_TRXTOTNUMS, 0x18, REG_PWD_SRST, BIT_PWD_SRST_COC_DOC_RST | BIT_PWD_SRST_CBUS_RST_SW_EN, REG_MHL_COC_CTL1, 0xbd, REG_PWD_SRST, BIT_PWD_SRST_CBUS_RST_SW_EN, REG_COC_CTLB, 0x01, REG_COC_CTL0, 0x5c, REG_COC_CTL14, 0x03, REG_COC_CTL15, 0x80, REG_MHL_DP_CTL6, BIT_MHL_DP_CTL6_DP_TAP1_SGN | BIT_MHL_DP_CTL6_DP_TAP1_EN | BIT_MHL_DP_CTL6_DT_PREDRV_FEEDCAP_EN, REG_MHL_DP_CTL8, 0x03 ); ret = sii8620_wait_for_fsm_state(ctx, 0x03); sii8620_write_seq_static(ctx, REG_COC_CTL14, 0x00, REG_COC_CTL15, 0x80 ); if (!ret) sii8620_write(ctx, REG_CBUS3_CNVT, 0x85); else sii8620_disconnect(ctx); return; case CM_DISCONNECTED: ctx->mode = mode; break; default: dev_err(ctx->dev, "%s mode %d not supported\n", __func__, mode); break; } sii8620_set_auto_zone(ctx); if (mode != CM_MHL1) return; sii8620_write_seq_static(ctx, REG_MHL_DP_CTL0, 0xBC, REG_MHL_DP_CTL1, 0xBB, REG_MHL_DP_CTL3, 0x48, REG_MHL_DP_CTL5, 0x39, REG_MHL_DP_CTL2, 0x2A, REG_MHL_DP_CTL6, 0x2A, REG_MHL_DP_CTL7, 0x08 ); } static void sii8620_hpd_unplugged(struct sii8620 *ctx) { sii8620_disable_hpd(ctx); ctx->sink_type = SINK_NONE; ctx->sink_detected = false; ctx->feature_complete = false; kfree(ctx->edid); ctx->edid = NULL; } static void sii8620_disconnect(struct sii8620 *ctx) { sii8620_disable_gen2_write_burst(ctx); sii8620_stop_video(ctx); msleep(100); sii8620_cbus_reset(ctx); sii8620_set_mode(ctx, CM_DISCONNECTED); sii8620_write_seq_static(ctx, REG_TX_ZONE_CTL1, 0, REG_MHL_PLL_CTL0, 0x07, REG_COC_CTL0, 0x40, REG_CBUS3_CNVT, 0x84, REG_COC_CTL14, 0x00, REG_COC_CTL0, 0x40, REG_HRXCTRL3, 0x07, REG_MHL_PLL_CTL0, VAL_MHL_PLL_CTL0_HDMI_CLK_RATIO_1X | BIT_MHL_PLL_CTL0_CRYSTAL_CLK_SEL | BIT_MHL_PLL_CTL0_ZONE_MASK_OE, REG_MHL_DP_CTL0, BIT_MHL_DP_CTL0_DP_OE | BIT_MHL_DP_CTL0_TX_OE_OVR, REG_MHL_DP_CTL1, 0xBB, REG_MHL_DP_CTL3, 0x48, REG_MHL_DP_CTL5, 0x3F, REG_MHL_DP_CTL2, 0x2F, REG_MHL_DP_CTL6, 0x2A, REG_MHL_DP_CTL7, 0x03 ); sii8620_hpd_unplugged(ctx); sii8620_write_seq_static(ctx, REG_M3_CTRL, VAL_M3_CTRL_MHL3_VALUE, REG_MHL_COC_CTL1, 0x07, REG_DISC_CTRL4, VAL_DISC_CTRL4(VAL_PUP_OFF, VAL_PUP_20K), REG_DISC_CTRL8, 0x00, REG_DISC_CTRL9, BIT_DISC_CTRL9_WAKE_DRVFLT | BIT_DISC_CTRL9_WAKE_PULSE_BYPASS, REG_INT_CTRL, 0x00, REG_MSC_HEARTBEAT_CTRL, 0x27, REG_DISC_CTRL1, 0x25, REG_CBUS_DISC_INTR0, (u8)~BIT_RGND_READY_INT, REG_CBUS_DISC_INTR0_MASK, BIT_RGND_READY_INT, REG_MDT_INT_1, 0xff, REG_MDT_INT_1_MASK, 0x00, REG_MDT_INT_0, 0xff, REG_MDT_INT_0_MASK, 0x00, REG_COC_INTR, 0xff, REG_COC_INTR_MASK, 0x00, REG_TRXINTH, 0xff, REG_TRXINTMH, 0x00, REG_CBUS_INT_0, 0xff, REG_CBUS_INT_0_MASK, 0x00, REG_CBUS_INT_1, 0xff, REG_CBUS_INT_1_MASK, 0x00, REG_EMSCINTR, 0xff, REG_EMSCINTRMASK, 0x00, REG_EMSCINTR1, 0xff, REG_EMSCINTRMASK1, 0x00, REG_INTR8, 0xff, REG_INTR8_MASK, 0x00, REG_TPI_INTR_ST0, 0xff, REG_TPI_INTR_EN, 0x00, REG_HDCP2X_INTR0, 0xff, REG_HDCP2X_INTR0_MASK, 0x00, REG_INTR9, 0xff, REG_INTR9_MASK, 0x00, REG_INTR3, 0xff, REG_INTR3_MASK, 0x00, REG_INTR5, 0xff, REG_INTR5_MASK, 0x00, REG_INTR2, 0xff, REG_INTR2_MASK, 0x00, ); memset(ctx->stat, 0, sizeof(ctx->stat)); memset(ctx->xstat, 0, sizeof(ctx->xstat)); memset(ctx->devcap, 0, sizeof(ctx->devcap)); memset(ctx->xdevcap, 0, sizeof(ctx->xdevcap)); ctx->devcap_read = false; ctx->cbus_status = 0; sii8620_mt_cleanup(ctx); } static void sii8620_mhl_disconnected(struct sii8620 *ctx) { sii8620_write_seq_static(ctx, REG_DISC_CTRL4, VAL_DISC_CTRL4(VAL_PUP_OFF, VAL_PUP_20K), REG_CBUS_MSC_COMPAT_CTRL, BIT_CBUS_MSC_COMPAT_CTRL_XDEVCAP_EN ); sii8620_disconnect(ctx); } static void sii8620_irq_disc(struct sii8620 *ctx) { u8 stat = sii8620_readb(ctx, REG_CBUS_DISC_INTR0); if (stat & VAL_CBUS_MHL_DISCON) sii8620_mhl_disconnected(ctx); if (stat & BIT_RGND_READY_INT) { u8 stat2 = sii8620_readb(ctx, REG_DISC_STAT2); if ((stat2 & MSK_DISC_STAT2_RGND) == VAL_RGND_1K) { sii8620_mhl_discover(ctx); } else { sii8620_write_seq_static(ctx, REG_DISC_CTRL9, BIT_DISC_CTRL9_WAKE_DRVFLT | BIT_DISC_CTRL9_NOMHL_EST | BIT_DISC_CTRL9_WAKE_PULSE_BYPASS, REG_CBUS_DISC_INTR0_MASK, BIT_RGND_READY_INT | BIT_CBUS_MHL3_DISCON_INT | BIT_CBUS_MHL12_DISCON_INT | BIT_NOT_MHL_EST_INT ); } } if (stat & BIT_MHL_EST_INT) sii8620_mhl_init(ctx); sii8620_write(ctx, REG_CBUS_DISC_INTR0, stat); } static void sii8620_read_burst(struct sii8620 *ctx) { u8 buf[17]; sii8620_read_buf(ctx, REG_MDT_RCV_READ_PORT, buf, ARRAY_SIZE(buf)); sii8620_write(ctx, REG_MDT_RCV_CTRL, BIT_MDT_RCV_CTRL_MDT_RCV_EN | BIT_MDT_RCV_CTRL_MDT_DELAY_RCV_EN | BIT_MDT_RCV_CTRL_MDT_RFIFO_CLR_CUR); sii8620_readb(ctx, REG_MDT_RFIFO_STAT); } static void sii8620_irq_g2wb(struct sii8620 *ctx) { u8 stat = sii8620_readb(ctx, REG_MDT_INT_0); if (stat & BIT_MDT_IDLE_AFTER_HAWB_DISABLE) if (sii8620_is_mhl3(ctx)) sii8620_mt_set_int(ctx, MHL_INT_REG(RCHANGE), MHL_INT_RC_FEAT_COMPLETE); if (stat & BIT_MDT_RFIFO_DATA_RDY) sii8620_read_burst(ctx); if (stat & BIT_MDT_XFIFO_EMPTY) sii8620_write(ctx, REG_MDT_XMIT_CTRL, 0); sii8620_write(ctx, REG_MDT_INT_0, stat); } static void sii8620_status_dcap_ready(struct sii8620 *ctx) { enum sii8620_mode mode; mode = ctx->stat[MHL_DST_VERSION] >= 0x30 ? CM_MHL3 : CM_MHL1; if (mode > ctx->mode) sii8620_set_mode(ctx, mode); sii8620_peer_specific_init(ctx); sii8620_write(ctx, REG_INTR9_MASK, BIT_INTR9_DEVCAP_DONE | BIT_INTR9_EDID_DONE | BIT_INTR9_EDID_ERROR); } static void sii8620_status_changed_path(struct sii8620 *ctx) { u8 link_mode; if (ctx->use_packed_pixel) link_mode = MHL_DST_LM_CLK_MODE_PACKED_PIXEL; else link_mode = MHL_DST_LM_CLK_MODE_NORMAL; if (ctx->stat[MHL_DST_LINK_MODE] & MHL_DST_LM_PATH_ENABLED) link_mode |= MHL_DST_LM_PATH_ENABLED; sii8620_mt_write_stat(ctx, MHL_DST_REG(LINK_MODE), link_mode); } static void sii8620_msc_mr_write_stat(struct sii8620 *ctx) { u8 st[MHL_DST_SIZE], xst[MHL_XDS_SIZE]; sii8620_read_buf(ctx, REG_MHL_STAT_0, st, MHL_DST_SIZE); sii8620_read_buf(ctx, REG_MHL_EXTSTAT_0, xst, MHL_XDS_SIZE); sii8620_update_array(ctx->stat, st, MHL_DST_SIZE); sii8620_update_array(ctx->xstat, xst, MHL_XDS_SIZE); if (ctx->stat[MHL_DST_CONNECTED_RDY] & st[MHL_DST_CONNECTED_RDY] & MHL_DST_CONN_DCAP_RDY) { sii8620_status_dcap_ready(ctx); if (!sii8620_is_mhl3(ctx)) sii8620_mt_read_devcap(ctx, false); } if (st[MHL_DST_LINK_MODE] & MHL_DST_LM_PATH_ENABLED) sii8620_status_changed_path(ctx); } static void sii8620_ecbus_up(struct sii8620 *ctx, int ret) { if (ret < 0) return; sii8620_set_mode(ctx, CM_ECBUS_S); } static void sii8620_got_ecbus_speed(struct sii8620 *ctx, int ret) { if (ret < 0) return; sii8620_mt_write_stat(ctx, MHL_XDS_REG(CURR_ECBUS_MODE), MHL_XDS_ECBUS_S | MHL_XDS_SLOT_MODE_8BIT); sii8620_mt_rap(ctx, MHL_RAP_CBUS_MODE_UP); sii8620_mt_set_cont(ctx, sii8620_ecbus_up); } static void sii8620_mhl_burst_emsc_support_set(struct mhl_burst_emsc_support *d, enum mhl_burst_id id) { sii8620_mhl_burst_hdr_set(&d->hdr, MHL_BURST_ID_EMSC_SUPPORT); d->num_entries = 1; d->burst_id[0] = cpu_to_be16(id); } static void sii8620_send_features(struct sii8620 *ctx) { u8 buf[16]; sii8620_write(ctx, REG_MDT_XMIT_CTRL, BIT_MDT_XMIT_CTRL_EN | BIT_MDT_XMIT_CTRL_FIXED_BURST_LEN); sii8620_mhl_burst_emsc_support_set((void *)buf, MHL_BURST_ID_HID_PAYLOAD); sii8620_write_buf(ctx, REG_MDT_XMIT_WRITE_PORT, buf, ARRAY_SIZE(buf)); } static bool sii8620_rcp_consume(struct sii8620 *ctx, u8 scancode) { bool pressed = !(scancode & MHL_RCP_KEY_RELEASED_MASK); scancode &= MHL_RCP_KEY_ID_MASK; if (!IS_ENABLED(CONFIG_RC_CORE) || !ctx->rc_dev) return false; if (pressed) rc_keydown(ctx->rc_dev, RC_PROTO_CEC, scancode, 0); else rc_keyup(ctx->rc_dev); return true; } static void sii8620_msc_mr_set_int(struct sii8620 *ctx) { u8 ints[MHL_INT_SIZE]; sii8620_read_buf(ctx, REG_MHL_INT_0, ints, MHL_INT_SIZE); sii8620_write_buf(ctx, REG_MHL_INT_0, ints, MHL_INT_SIZE); if (ints[MHL_INT_RCHANGE] & MHL_INT_RC_DCAP_CHG) { switch (ctx->mode) { case CM_MHL3: sii8620_mt_read_xdevcap_reg(ctx, MHL_XDC_ECBUS_SPEEDS); sii8620_mt_set_cont(ctx, sii8620_got_ecbus_speed); break; case CM_ECBUS_S: sii8620_mt_read_devcap(ctx, true); break; default: break; } } if (ints[MHL_INT_RCHANGE] & MHL_INT_RC_FEAT_REQ) sii8620_send_features(ctx); if (ints[MHL_INT_RCHANGE] & MHL_INT_RC_FEAT_COMPLETE) { ctx->feature_complete = true; if (ctx->edid) sii8620_enable_hpd(ctx); } } static struct sii8620_mt_msg *sii8620_msc_msg_first(struct sii8620 *ctx) { struct device *dev = ctx->dev; if (list_empty(&ctx->mt_queue)) { dev_err(dev, "unexpected MSC MT response\n"); return NULL; } return list_first_entry(&ctx->mt_queue, struct sii8620_mt_msg, node); } static void sii8620_msc_mt_done(struct sii8620 *ctx) { struct sii8620_mt_msg *msg = sii8620_msc_msg_first(ctx); if (!msg) return; msg->ret = sii8620_readb(ctx, REG_MSC_MT_RCVD_DATA0); ctx->mt_state = MT_STATE_DONE; } static void sii8620_msc_mr_msc_msg(struct sii8620 *ctx) { struct sii8620_mt_msg *msg; u8 buf[2]; sii8620_read_buf(ctx, REG_MSC_MR_MSC_MSG_RCVD_1ST_DATA, buf, 2); switch (buf[0]) { case MHL_MSC_MSG_RAPK: msg = sii8620_msc_msg_first(ctx); if (!msg) return; msg->ret = buf[1]; ctx->mt_state = MT_STATE_DONE; break; case MHL_MSC_MSG_RCP: if (!sii8620_rcp_consume(ctx, buf[1])) sii8620_mt_rcpe(ctx, MHL_RCPE_STATUS_INEFFECTIVE_KEY_CODE); sii8620_mt_rcpk(ctx, buf[1]); break; default: dev_err(ctx->dev, "%s message type %d,%d not supported", __func__, buf[0], buf[1]); } } static void sii8620_irq_msc(struct sii8620 *ctx) { u8 stat = sii8620_readb(ctx, REG_CBUS_INT_0); if (stat & ~BIT_CBUS_HPD_CHG) sii8620_write(ctx, REG_CBUS_INT_0, stat & ~BIT_CBUS_HPD_CHG); if (stat & BIT_CBUS_HPD_CHG) { u8 cbus_stat = sii8620_readb(ctx, REG_CBUS_STATUS); if ((cbus_stat ^ ctx->cbus_status) & BIT_CBUS_STATUS_CBUS_HPD) { sii8620_write(ctx, REG_CBUS_INT_0, BIT_CBUS_HPD_CHG); } else { stat ^= BIT_CBUS_STATUS_CBUS_HPD; cbus_stat ^= BIT_CBUS_STATUS_CBUS_HPD; } ctx->cbus_status = cbus_stat; } if (stat & BIT_CBUS_MSC_MR_WRITE_STAT) sii8620_msc_mr_write_stat(ctx); if (stat & BIT_CBUS_HPD_CHG) { if (ctx->cbus_status & BIT_CBUS_STATUS_CBUS_HPD) { ctx->sink_detected = true; sii8620_identify_sink(ctx); } else { sii8620_hpd_unplugged(ctx); } } if (stat & BIT_CBUS_MSC_MR_SET_INT) sii8620_msc_mr_set_int(ctx); if (stat & BIT_CBUS_MSC_MT_DONE) sii8620_msc_mt_done(ctx); if (stat & BIT_CBUS_MSC_MR_MSC_MSG) sii8620_msc_mr_msc_msg(ctx); } static void sii8620_irq_coc(struct sii8620 *ctx) { u8 stat = sii8620_readb(ctx, REG_COC_INTR); if (stat & BIT_COC_CALIBRATION_DONE) { u8 cstat = sii8620_readb(ctx, REG_COC_STAT_0); cstat &= BIT_COC_STAT_0_PLL_LOCKED | MSK_COC_STAT_0_FSM_STATE; if (cstat == (BIT_COC_STAT_0_PLL_LOCKED | 0x02)) { sii8620_write_seq_static(ctx, REG_COC_CTLB, 0, REG_TRXINTMH, BIT_TDM_INTR_SYNC_DATA | BIT_TDM_INTR_SYNC_WAIT ); } } sii8620_write(ctx, REG_COC_INTR, stat); } static void sii8620_irq_merr(struct sii8620 *ctx) { u8 stat = sii8620_readb(ctx, REG_CBUS_INT_1); sii8620_write(ctx, REG_CBUS_INT_1, stat); } static void sii8620_irq_edid(struct sii8620 *ctx) { u8 stat = sii8620_readb(ctx, REG_INTR9); sii8620_write(ctx, REG_INTR9, stat); if (stat & BIT_INTR9_DEVCAP_DONE) ctx->mt_state = MT_STATE_DONE; } static void sii8620_irq_scdt(struct sii8620 *ctx) { u8 stat = sii8620_readb(ctx, REG_INTR5); if (stat & BIT_INTR_SCDT_CHANGE) { u8 cstat = sii8620_readb(ctx, REG_TMDS_CSTAT_P3); if (cstat & BIT_TMDS_CSTAT_P3_SCDT) sii8620_start_video(ctx); } sii8620_write(ctx, REG_INTR5, stat); } static void sii8620_got_xdevcap(struct sii8620 *ctx, int ret) { if (ret < 0) return; sii8620_mt_read_devcap(ctx, false); } static void sii8620_irq_tdm(struct sii8620 *ctx) { u8 stat = sii8620_readb(ctx, REG_TRXINTH); u8 tdm = sii8620_readb(ctx, REG_TRXSTA2); if ((tdm & MSK_TDM_SYNCHRONIZED) == VAL_TDM_SYNCHRONIZED) { ctx->mode = CM_ECBUS_S; ctx->burst.rx_ack = 0; ctx->burst.r_size = SII8620_BURST_BUF_LEN; sii8620_burst_tx_rbuf_info(ctx, SII8620_BURST_BUF_LEN); sii8620_mt_read_devcap(ctx, true); sii8620_mt_set_cont(ctx, sii8620_got_xdevcap); } else { sii8620_write_seq_static(ctx, REG_MHL_PLL_CTL2, 0, REG_MHL_PLL_CTL2, BIT_MHL_PLL_CTL2_CLKDETECT_EN ); } sii8620_write(ctx, REG_TRXINTH, stat); } static void sii8620_irq_block(struct sii8620 *ctx) { u8 stat = sii8620_readb(ctx, REG_EMSCINTR); if (stat & BIT_EMSCINTR_SPI_DVLD) { u8 bstat = sii8620_readb(ctx, REG_SPIBURSTSTAT); if (bstat & BIT_SPIBURSTSTAT_EMSC_NORMAL_MODE) sii8620_burst_receive(ctx); } sii8620_write(ctx, REG_EMSCINTR, stat); } static void sii8620_irq_ddc(struct sii8620 *ctx) { u8 stat = sii8620_readb(ctx, REG_INTR3); if (stat & BIT_DDC_CMD_DONE) { sii8620_write(ctx, REG_INTR3_MASK, 0); if (sii8620_is_mhl3(ctx) && !ctx->feature_complete) sii8620_mt_set_int(ctx, MHL_INT_REG(RCHANGE), MHL_INT_RC_FEAT_REQ); else sii8620_enable_hpd(ctx); } sii8620_write(ctx, REG_INTR3, stat); } /* endian agnostic, non-volatile version of test_bit */ static bool sii8620_test_bit(unsigned int nr, const u8 *addr) { return 1 & (addr[nr / BITS_PER_BYTE] >> (nr % BITS_PER_BYTE)); } static irqreturn_t sii8620_irq_thread(int irq, void *data) { static const struct { int bit; void (*handler)(struct sii8620 *ctx); } irq_vec[] = { { BIT_FAST_INTR_STAT_DISC, sii8620_irq_disc }, { BIT_FAST_INTR_STAT_G2WB, sii8620_irq_g2wb }, { BIT_FAST_INTR_STAT_COC, sii8620_irq_coc }, { BIT_FAST_INTR_STAT_TDM, sii8620_irq_tdm }, { BIT_FAST_INTR_STAT_MSC, sii8620_irq_msc }, { BIT_FAST_INTR_STAT_MERR, sii8620_irq_merr }, { BIT_FAST_INTR_STAT_BLOCK, sii8620_irq_block }, { BIT_FAST_INTR_STAT_EDID, sii8620_irq_edid }, { BIT_FAST_INTR_STAT_DDC, sii8620_irq_ddc }, { BIT_FAST_INTR_STAT_SCDT, sii8620_irq_scdt }, }; struct sii8620 *ctx = data; u8 stats[LEN_FAST_INTR_STAT]; int i, ret; mutex_lock(&ctx->lock); sii8620_read_buf(ctx, REG_FAST_INTR_STAT, stats, ARRAY_SIZE(stats)); for (i = 0; i < ARRAY_SIZE(irq_vec); ++i) if (sii8620_test_bit(irq_vec[i].bit, stats)) irq_vec[i].handler(ctx); sii8620_burst_rx_all(ctx); sii8620_mt_work(ctx); sii8620_burst_send(ctx); ret = sii8620_clear_error(ctx); if (ret) { dev_err(ctx->dev, "Error during IRQ handling, %d.\n", ret); sii8620_mhl_disconnected(ctx); } mutex_unlock(&ctx->lock); return IRQ_HANDLED; } static void sii8620_cable_in(struct sii8620 *ctx) { struct device *dev = ctx->dev; u8 ver[5]; int ret; ret = sii8620_hw_on(ctx); if (ret) { dev_err(dev, "Error powering on, %d.\n", ret); return; } sii8620_read_buf(ctx, REG_VND_IDL, ver, ARRAY_SIZE(ver)); ret = sii8620_clear_error(ctx); if (ret) { dev_err(dev, "Error accessing I2C bus, %d.\n", ret); return; } dev_info(dev, "ChipID %02x%02x:%02x%02x rev %02x.\n", ver[1], ver[0], ver[3], ver[2], ver[4]); sii8620_write(ctx, REG_DPD, BIT_DPD_PWRON_PLL | BIT_DPD_PDNTX12 | BIT_DPD_OSC_EN); sii8620_xtal_set_rate(ctx); sii8620_disconnect(ctx); sii8620_write_seq_static(ctx, REG_MHL_CBUS_CTL0, VAL_MHL_CBUS_CTL0_CBUS_DRV_SEL_STRONG | VAL_MHL_CBUS_CTL0_CBUS_RGND_VBIAS_734, REG_MHL_CBUS_CTL1, VAL_MHL_CBUS_CTL1_1115_OHM, REG_DPD, BIT_DPD_PWRON_PLL | BIT_DPD_PDNTX12 | BIT_DPD_OSC_EN, ); ret = sii8620_clear_error(ctx); if (ret) { dev_err(dev, "Error accessing I2C bus, %d.\n", ret); return; } enable_irq(to_i2c_client(ctx->dev)->irq); } static void sii8620_init_rcp_input_dev(struct sii8620 *ctx) { struct rc_dev *rc_dev; int ret; if (!IS_ENABLED(CONFIG_RC_CORE)) return; rc_dev = rc_allocate_device(RC_DRIVER_SCANCODE); if (!rc_dev) { dev_err(ctx->dev, "Failed to allocate RC device\n"); ctx->error = -ENOMEM; return; } rc_dev->input_phys = "sii8620/input0"; rc_dev->input_id.bustype = BUS_VIRTUAL; rc_dev->map_name = RC_MAP_CEC; rc_dev->allowed_protocols = RC_PROTO_BIT_CEC; rc_dev->driver_name = "sii8620"; rc_dev->device_name = "sii8620"; ret = rc_register_device(rc_dev); if (ret) { dev_err(ctx->dev, "Failed to register RC device\n"); ctx->error = ret; rc_free_device(rc_dev); return; } ctx->rc_dev = rc_dev; } static void sii8620_cable_out(struct sii8620 *ctx) { disable_irq(to_i2c_client(ctx->dev)->irq); sii8620_hw_off(ctx); } static void sii8620_extcon_work(struct work_struct *work) { struct sii8620 *ctx = container_of(work, struct sii8620, extcon_wq); int state = extcon_get_state(ctx->extcon, EXTCON_DISP_MHL); if (state == ctx->cable_state) return; ctx->cable_state = state; if (state > 0) sii8620_cable_in(ctx); else sii8620_cable_out(ctx); } static int sii8620_extcon_notifier(struct notifier_block *self, unsigned long event, void *ptr) { struct sii8620 *ctx = container_of(self, struct sii8620, extcon_nb); schedule_work(&ctx->extcon_wq); return NOTIFY_DONE; } static int sii8620_extcon_init(struct sii8620 *ctx) { struct extcon_dev *edev; struct device_node *musb, *muic; int ret; /* get micro-USB connector node */ musb = of_graph_get_remote_node(ctx->dev->of_node, 1, -1); /* next get micro-USB Interface Controller node */ muic = of_get_next_parent(musb); if (!muic) { dev_info(ctx->dev, "no extcon found, switching to 'always on' mode\n"); return 0; } edev = extcon_find_edev_by_node(muic); of_node_put(muic); if (IS_ERR(edev)) { if (PTR_ERR(edev) == -EPROBE_DEFER) return -EPROBE_DEFER; dev_err(ctx->dev, "Invalid or missing extcon\n"); return PTR_ERR(edev); } ctx->extcon = edev; ctx->extcon_nb.notifier_call = sii8620_extcon_notifier; INIT_WORK(&ctx->extcon_wq, sii8620_extcon_work); ret = extcon_register_notifier(edev, EXTCON_DISP_MHL, &ctx->extcon_nb); if (ret) { dev_err(ctx->dev, "failed to register notifier for MHL\n"); return ret; } return 0; } static inline struct sii8620 *bridge_to_sii8620(struct drm_bridge *bridge) { return container_of(bridge, struct sii8620, bridge); } static int sii8620_attach(struct drm_bridge *bridge, enum drm_bridge_attach_flags flags) { struct sii8620 *ctx = bridge_to_sii8620(bridge); sii8620_init_rcp_input_dev(ctx); return sii8620_clear_error(ctx); } static void sii8620_detach(struct drm_bridge *bridge) { struct sii8620 *ctx = bridge_to_sii8620(bridge); if (!IS_ENABLED(CONFIG_RC_CORE)) return; rc_unregister_device(ctx->rc_dev); } static int sii8620_is_packing_required(struct sii8620 *ctx, const struct drm_display_mode *mode) { int max_pclk, max_pclk_pp_mode; if (sii8620_is_mhl3(ctx)) { max_pclk = MHL3_MAX_PCLK; max_pclk_pp_mode = MHL3_MAX_PCLK_PP_MODE; } else { max_pclk = MHL1_MAX_PCLK; max_pclk_pp_mode = MHL1_MAX_PCLK_PP_MODE; } if (mode->clock < max_pclk) return 0; else if (mode->clock < max_pclk_pp_mode) return 1; else return -1; } static enum drm_mode_status sii8620_mode_valid(struct drm_bridge *bridge, const struct drm_display_info *info, const struct drm_display_mode *mode) { struct sii8620 *ctx = bridge_to_sii8620(bridge); int pack_required = sii8620_is_packing_required(ctx, mode); bool can_pack = ctx->devcap[MHL_DCAP_VID_LINK_MODE] & MHL_DCAP_VID_LINK_PPIXEL; switch (pack_required) { case 0: return MODE_OK; case 1: return (can_pack) ? MODE_OK : MODE_CLOCK_HIGH; default: return MODE_CLOCK_HIGH; } } static bool sii8620_mode_fixup(struct drm_bridge *bridge, const struct drm_display_mode *mode, struct drm_display_mode *adjusted_mode) { struct sii8620 *ctx = bridge_to_sii8620(bridge); mutex_lock(&ctx->lock); ctx->use_packed_pixel = sii8620_is_packing_required(ctx, adjusted_mode); mutex_unlock(&ctx->lock); return true; } static const struct drm_bridge_funcs sii8620_bridge_funcs = { .attach = sii8620_attach, .detach = sii8620_detach, .mode_fixup = sii8620_mode_fixup, .mode_valid = sii8620_mode_valid, }; static int sii8620_probe(struct i2c_client *client) { struct device *dev = &client->dev; struct sii8620 *ctx; int ret; ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL); if (!ctx) return -ENOMEM; ctx->dev = dev; mutex_init(&ctx->lock); INIT_LIST_HEAD(&ctx->mt_queue); ctx->clk_xtal = devm_clk_get(dev, "xtal"); if (IS_ERR(ctx->clk_xtal)) return dev_err_probe(dev, PTR_ERR(ctx->clk_xtal), "failed to get xtal clock from DT\n"); if (!client->irq) { dev_err(dev, "no irq provided\n"); return -EINVAL; } irq_set_status_flags(client->irq, IRQ_NOAUTOEN); ret = devm_request_threaded_irq(dev, client->irq, NULL, sii8620_irq_thread, IRQF_TRIGGER_HIGH | IRQF_ONESHOT, "sii8620", ctx); if (ret < 0) return dev_err_probe(dev, ret, "failed to install IRQ handler\n"); ctx->gpio_reset = devm_gpiod_get(dev, "reset", GPIOD_OUT_HIGH); if (IS_ERR(ctx->gpio_reset)) return dev_err_probe(dev, PTR_ERR(ctx->gpio_reset), "failed to get reset gpio from DT\n"); ctx->supplies[0].supply = "cvcc10"; ctx->supplies[1].supply = "iovcc18"; ret = devm_regulator_bulk_get(dev, 2, ctx->supplies); if (ret) return ret; ret = sii8620_extcon_init(ctx); if (ret < 0) { dev_err(ctx->dev, "failed to initialize EXTCON\n"); return ret; } i2c_set_clientdata(client, ctx); ctx->bridge.funcs = &sii8620_bridge_funcs; ctx->bridge.of_node = dev->of_node; drm_bridge_add(&ctx->bridge); if (!ctx->extcon) sii8620_cable_in(ctx); return 0; } static void sii8620_remove(struct i2c_client *client) { struct sii8620 *ctx = i2c_get_clientdata(client); if (ctx->extcon) { extcon_unregister_notifier(ctx->extcon, EXTCON_DISP_MHL, &ctx->extcon_nb); flush_work(&ctx->extcon_wq); if (ctx->cable_state > 0) sii8620_cable_out(ctx); } else { sii8620_cable_out(ctx); } drm_bridge_remove(&ctx->bridge); } static const struct of_device_id sii8620_dt_match[] = { { .compatible = "sil,sii8620" }, { }, }; MODULE_DEVICE_TABLE(of, sii8620_dt_match); static const struct i2c_device_id sii8620_id[] = { { "sii8620", 0 }, { }, }; MODULE_DEVICE_TABLE(i2c, sii8620_id); static struct i2c_driver sii8620_driver = { .driver = { .name = "sii8620", .of_match_table = sii8620_dt_match, }, .probe = sii8620_probe, .remove = sii8620_remove, .id_table = sii8620_id, }; module_i2c_driver(sii8620_driver); MODULE_LICENSE("GPL v2");
linux-master
drivers/gpu/drm/bridge/sil-sii8620.c
// SPDX-License-Identifier: GPL-2.0-only /* * Copyright (C) 2016 Texas Instruments * Author: Jyri Sarha <[email protected]> */ #include <linux/gpio/consumer.h> #include <linux/i2c.h> #include <linux/media-bus-format.h> #include <linux/module.h> #include <linux/of_graph.h> #include <linux/platform_device.h> #include <linux/workqueue.h> #include <drm/drm_atomic_helper.h> #include <drm/drm_bridge.h> #include <drm/drm_crtc.h> #include <drm/drm_edid.h> #include <drm/drm_print.h> #include <drm/drm_probe_helper.h> #define HOTPLUG_DEBOUNCE_MS 1100 struct tfp410 { struct drm_bridge bridge; struct drm_connector connector; u32 bus_format; struct delayed_work hpd_work; struct gpio_desc *powerdown; struct drm_bridge_timings timings; struct drm_bridge *next_bridge; struct device *dev; }; static inline struct tfp410 * drm_bridge_to_tfp410(struct drm_bridge *bridge) { return container_of(bridge, struct tfp410, bridge); } static inline struct tfp410 * drm_connector_to_tfp410(struct drm_connector *connector) { return container_of(connector, struct tfp410, connector); } static int tfp410_get_modes(struct drm_connector *connector) { struct tfp410 *dvi = drm_connector_to_tfp410(connector); struct edid *edid; int ret; if (dvi->next_bridge->ops & DRM_BRIDGE_OP_EDID) { edid = drm_bridge_get_edid(dvi->next_bridge, connector); if (!edid) DRM_INFO("EDID read failed. Fallback to standard modes\n"); } else { edid = NULL; } if (!edid) { /* * No EDID, fallback on the XGA standard modes and prefer a mode * pretty much anything can handle. */ ret = drm_add_modes_noedid(connector, 1920, 1200); drm_set_preferred_mode(connector, 1024, 768); return ret; } drm_connector_update_edid_property(connector, edid); ret = drm_add_edid_modes(connector, edid); kfree(edid); return ret; } static const struct drm_connector_helper_funcs tfp410_con_helper_funcs = { .get_modes = tfp410_get_modes, }; static enum drm_connector_status tfp410_connector_detect(struct drm_connector *connector, bool force) { struct tfp410 *dvi = drm_connector_to_tfp410(connector); return drm_bridge_detect(dvi->next_bridge); } static const struct drm_connector_funcs tfp410_con_funcs = { .detect = tfp410_connector_detect, .fill_modes = drm_helper_probe_single_connector_modes, .destroy = drm_connector_cleanup, .reset = drm_atomic_helper_connector_reset, .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state, .atomic_destroy_state = drm_atomic_helper_connector_destroy_state, }; static void tfp410_hpd_work_func(struct work_struct *work) { struct tfp410 *dvi; dvi = container_of(work, struct tfp410, hpd_work.work); if (dvi->bridge.dev) drm_helper_hpd_irq_event(dvi->bridge.dev); } static void tfp410_hpd_callback(void *arg, enum drm_connector_status status) { struct tfp410 *dvi = arg; mod_delayed_work(system_wq, &dvi->hpd_work, msecs_to_jiffies(HOTPLUG_DEBOUNCE_MS)); } static int tfp410_attach(struct drm_bridge *bridge, enum drm_bridge_attach_flags flags) { struct tfp410 *dvi = drm_bridge_to_tfp410(bridge); int ret; ret = drm_bridge_attach(bridge->encoder, dvi->next_bridge, bridge, DRM_BRIDGE_ATTACH_NO_CONNECTOR); if (ret < 0) return ret; if (flags & DRM_BRIDGE_ATTACH_NO_CONNECTOR) return 0; if (!bridge->encoder) { dev_err(dvi->dev, "Missing encoder\n"); return -ENODEV; } if (dvi->next_bridge->ops & DRM_BRIDGE_OP_DETECT) dvi->connector.polled = DRM_CONNECTOR_POLL_HPD; else dvi->connector.polled = DRM_CONNECTOR_POLL_CONNECT | DRM_CONNECTOR_POLL_DISCONNECT; if (dvi->next_bridge->ops & DRM_BRIDGE_OP_HPD) { INIT_DELAYED_WORK(&dvi->hpd_work, tfp410_hpd_work_func); drm_bridge_hpd_enable(dvi->next_bridge, tfp410_hpd_callback, dvi); } drm_connector_helper_add(&dvi->connector, &tfp410_con_helper_funcs); ret = drm_connector_init_with_ddc(bridge->dev, &dvi->connector, &tfp410_con_funcs, dvi->next_bridge->type, dvi->next_bridge->ddc); if (ret) { dev_err(dvi->dev, "drm_connector_init_with_ddc() failed: %d\n", ret); return ret; } drm_display_info_set_bus_formats(&dvi->connector.display_info, &dvi->bus_format, 1); drm_connector_attach_encoder(&dvi->connector, bridge->encoder); return 0; } static void tfp410_detach(struct drm_bridge *bridge) { struct tfp410 *dvi = drm_bridge_to_tfp410(bridge); if (dvi->connector.dev && dvi->next_bridge->ops & DRM_BRIDGE_OP_HPD) { drm_bridge_hpd_disable(dvi->next_bridge); cancel_delayed_work_sync(&dvi->hpd_work); } } static void tfp410_enable(struct drm_bridge *bridge) { struct tfp410 *dvi = drm_bridge_to_tfp410(bridge); gpiod_set_value_cansleep(dvi->powerdown, 0); } static void tfp410_disable(struct drm_bridge *bridge) { struct tfp410 *dvi = drm_bridge_to_tfp410(bridge); gpiod_set_value_cansleep(dvi->powerdown, 1); } static enum drm_mode_status tfp410_mode_valid(struct drm_bridge *bridge, const struct drm_display_info *info, const struct drm_display_mode *mode) { if (mode->clock < 25000) return MODE_CLOCK_LOW; if (mode->clock > 165000) return MODE_CLOCK_HIGH; return MODE_OK; } static u32 *tfp410_get_input_bus_fmts(struct drm_bridge *bridge, struct drm_bridge_state *bridge_state, struct drm_crtc_state *crtc_state, struct drm_connector_state *conn_state, u32 output_fmt, unsigned int *num_input_fmts) { struct tfp410 *dvi = drm_bridge_to_tfp410(bridge); u32 *input_fmts; *num_input_fmts = 0; input_fmts = kzalloc(sizeof(*input_fmts), GFP_KERNEL); if (!input_fmts) return NULL; *num_input_fmts = 1; input_fmts[0] = dvi->bus_format; return input_fmts; } static int tfp410_atomic_check(struct drm_bridge *bridge, struct drm_bridge_state *bridge_state, struct drm_crtc_state *crtc_state, struct drm_connector_state *conn_state) { struct tfp410 *dvi = drm_bridge_to_tfp410(bridge); /* * There might be flags negotiation supported in future. * Set the bus flags in atomic_check statically for now. */ bridge_state->input_bus_cfg.flags = dvi->timings.input_bus_flags; return 0; } static const struct drm_bridge_funcs tfp410_bridge_funcs = { .attach = tfp410_attach, .detach = tfp410_detach, .enable = tfp410_enable, .disable = tfp410_disable, .mode_valid = tfp410_mode_valid, .atomic_reset = drm_atomic_helper_bridge_reset, .atomic_duplicate_state = drm_atomic_helper_bridge_duplicate_state, .atomic_destroy_state = drm_atomic_helper_bridge_destroy_state, .atomic_get_input_bus_fmts = tfp410_get_input_bus_fmts, .atomic_check = tfp410_atomic_check, }; static const struct drm_bridge_timings tfp410_default_timings = { .input_bus_flags = DRM_BUS_FLAG_PIXDATA_SAMPLE_POSEDGE | DRM_BUS_FLAG_DE_HIGH, .setup_time_ps = 1200, .hold_time_ps = 1300, }; static int tfp410_parse_timings(struct tfp410 *dvi, bool i2c) { struct drm_bridge_timings *timings = &dvi->timings; struct device_node *ep; u32 pclk_sample = 0; u32 bus_width = 24; u32 deskew = 0; /* Start with defaults. */ *timings = tfp410_default_timings; if (i2c) /* * In I2C mode timings are configured through the I2C interface. * As the driver doesn't support I2C configuration yet, we just * go with the defaults (BSEL=1, DSEL=1, DKEN=0, EDGE=1). */ return 0; /* * In non-I2C mode, timings are configured through the BSEL, DSEL, DKEN * and EDGE pins. They are specified in DT through endpoint properties * and vendor-specific properties. */ ep = of_graph_get_endpoint_by_regs(dvi->dev->of_node, 0, 0); if (!ep) return -EINVAL; /* Get the sampling edge from the endpoint. */ of_property_read_u32(ep, "pclk-sample", &pclk_sample); of_property_read_u32(ep, "bus-width", &bus_width); of_node_put(ep); timings->input_bus_flags = DRM_BUS_FLAG_DE_HIGH; switch (pclk_sample) { case 0: timings->input_bus_flags |= DRM_BUS_FLAG_PIXDATA_SAMPLE_NEGEDGE | DRM_BUS_FLAG_SYNC_SAMPLE_NEGEDGE; break; case 1: timings->input_bus_flags |= DRM_BUS_FLAG_PIXDATA_SAMPLE_POSEDGE | DRM_BUS_FLAG_SYNC_SAMPLE_POSEDGE; break; default: return -EINVAL; } switch (bus_width) { case 12: dvi->bus_format = MEDIA_BUS_FMT_RGB888_2X12_LE; break; case 24: dvi->bus_format = MEDIA_BUS_FMT_RGB888_1X24; break; default: return -EINVAL; } /* Get the setup and hold time from vendor-specific properties. */ of_property_read_u32(dvi->dev->of_node, "ti,deskew", &deskew); if (deskew > 7) return -EINVAL; timings->setup_time_ps = 1200 - 350 * ((s32)deskew - 4); timings->hold_time_ps = max(0, 1300 + 350 * ((s32)deskew - 4)); return 0; } static int tfp410_init(struct device *dev, bool i2c) { struct device_node *node; struct tfp410 *dvi; int ret; if (!dev->of_node) { dev_err(dev, "device-tree data is missing\n"); return -ENXIO; } dvi = devm_kzalloc(dev, sizeof(*dvi), GFP_KERNEL); if (!dvi) return -ENOMEM; dvi->dev = dev; dev_set_drvdata(dev, dvi); dvi->bridge.funcs = &tfp410_bridge_funcs; dvi->bridge.of_node = dev->of_node; dvi->bridge.timings = &dvi->timings; dvi->bridge.type = DRM_MODE_CONNECTOR_DVID; ret = tfp410_parse_timings(dvi, i2c); if (ret) return ret; /* Get the next bridge, connected to port@1. */ node = of_graph_get_remote_node(dev->of_node, 1, -1); if (!node) return -ENODEV; dvi->next_bridge = of_drm_find_bridge(node); of_node_put(node); if (!dvi->next_bridge) return -EPROBE_DEFER; /* Get the powerdown GPIO. */ dvi->powerdown = devm_gpiod_get_optional(dev, "powerdown", GPIOD_OUT_HIGH); if (IS_ERR(dvi->powerdown)) { dev_err(dev, "failed to parse powerdown gpio\n"); return PTR_ERR(dvi->powerdown); } /* Register the DRM bridge. */ drm_bridge_add(&dvi->bridge); return 0; } static void tfp410_fini(struct device *dev) { struct tfp410 *dvi = dev_get_drvdata(dev); drm_bridge_remove(&dvi->bridge); } static int tfp410_probe(struct platform_device *pdev) { return tfp410_init(&pdev->dev, false); } static void tfp410_remove(struct platform_device *pdev) { tfp410_fini(&pdev->dev); } static const struct of_device_id tfp410_match[] = { { .compatible = "ti,tfp410" }, {}, }; MODULE_DEVICE_TABLE(of, tfp410_match); static struct platform_driver tfp410_platform_driver = { .probe = tfp410_probe, .remove_new = tfp410_remove, .driver = { .name = "tfp410-bridge", .of_match_table = tfp410_match, }, }; #if IS_ENABLED(CONFIG_I2C) /* There is currently no i2c functionality. */ static int tfp410_i2c_probe(struct i2c_client *client) { int reg; if (!client->dev.of_node || of_property_read_u32(client->dev.of_node, "reg", &reg)) { dev_err(&client->dev, "Can't get i2c reg property from device-tree\n"); return -ENXIO; } return tfp410_init(&client->dev, true); } static void tfp410_i2c_remove(struct i2c_client *client) { tfp410_fini(&client->dev); } static const struct i2c_device_id tfp410_i2c_ids[] = { { "tfp410", 0 }, { } }; MODULE_DEVICE_TABLE(i2c, tfp410_i2c_ids); static struct i2c_driver tfp410_i2c_driver = { .driver = { .name = "tfp410", .of_match_table = tfp410_match, }, .id_table = tfp410_i2c_ids, .probe = tfp410_i2c_probe, .remove = tfp410_i2c_remove, }; #endif /* IS_ENABLED(CONFIG_I2C) */ static struct { uint i2c:1; uint platform:1; } tfp410_registered_driver; static int __init tfp410_module_init(void) { int ret; #if IS_ENABLED(CONFIG_I2C) ret = i2c_add_driver(&tfp410_i2c_driver); if (ret) pr_err("%s: registering i2c driver failed: %d", __func__, ret); else tfp410_registered_driver.i2c = 1; #endif ret = platform_driver_register(&tfp410_platform_driver); if (ret) pr_err("%s: registering platform driver failed: %d", __func__, ret); else tfp410_registered_driver.platform = 1; if (tfp410_registered_driver.i2c || tfp410_registered_driver.platform) return 0; return ret; } module_init(tfp410_module_init); static void __exit tfp410_module_exit(void) { #if IS_ENABLED(CONFIG_I2C) if (tfp410_registered_driver.i2c) i2c_del_driver(&tfp410_i2c_driver); #endif if (tfp410_registered_driver.platform) platform_driver_unregister(&tfp410_platform_driver); } module_exit(tfp410_module_exit); MODULE_AUTHOR("Jyri Sarha <[email protected]>"); MODULE_DESCRIPTION("TI TFP410 DVI bridge driver"); MODULE_LICENSE("GPL");
linux-master
drivers/gpu/drm/bridge/ti-tfp410.c
// SPDX-License-Identifier: GPL-2.0-only /* * Copyright (C) 2020 BayLibre, SAS * Author: Phong LE <[email protected]> * Copyright (C) 2018-2019, Artem Mygaiev * Copyright (C) 2017, Fresco Logic, Incorporated. * */ #include <linux/media-bus-format.h> #include <linux/module.h> #include <linux/device.h> #include <linux/interrupt.h> #include <linux/i2c.h> #include <linux/bitfield.h> #include <linux/property.h> #include <linux/regmap.h> #include <linux/of_graph.h> #include <linux/gpio/consumer.h> #include <linux/pinctrl/consumer.h> #include <linux/regulator/consumer.h> #include <drm/drm_atomic_helper.h> #include <drm/drm_bridge.h> #include <drm/drm_edid.h> #include <drm/drm_modes.h> #include <drm/drm_print.h> #include <drm/drm_probe_helper.h> #include <sound/hdmi-codec.h> #define IT66121_VENDOR_ID0_REG 0x00 #define IT66121_VENDOR_ID1_REG 0x01 #define IT66121_DEVICE_ID0_REG 0x02 #define IT66121_DEVICE_ID1_REG 0x03 #define IT66121_REVISION_MASK GENMASK(7, 4) #define IT66121_DEVICE_ID1_MASK GENMASK(3, 0) #define IT66121_MASTER_SEL_REG 0x10 #define IT66121_MASTER_SEL_HOST BIT(0) #define IT66121_AFE_DRV_REG 0x61 #define IT66121_AFE_DRV_RST BIT(4) #define IT66121_AFE_DRV_PWD BIT(5) #define IT66121_INPUT_MODE_REG 0x70 #define IT66121_INPUT_MODE_RGB (0 << 6) #define IT66121_INPUT_MODE_YUV422 BIT(6) #define IT66121_INPUT_MODE_YUV444 (2 << 6) #define IT66121_INPUT_MODE_CCIR656 BIT(4) #define IT66121_INPUT_MODE_SYNCEMB BIT(3) #define IT66121_INPUT_MODE_DDR BIT(2) #define IT66121_INPUT_CSC_REG 0x72 #define IT66121_INPUT_CSC_ENDITHER BIT(7) #define IT66121_INPUT_CSC_ENUDFILTER BIT(6) #define IT66121_INPUT_CSC_DNFREE_GO BIT(5) #define IT66121_INPUT_CSC_RGB_TO_YUV 0x02 #define IT66121_INPUT_CSC_YUV_TO_RGB 0x03 #define IT66121_INPUT_CSC_NO_CONV 0x00 #define IT66121_AFE_XP_REG 0x62 #define IT66121_AFE_XP_GAINBIT BIT(7) #define IT66121_AFE_XP_PWDPLL BIT(6) #define IT66121_AFE_XP_ENI BIT(5) #define IT66121_AFE_XP_ENO BIT(4) #define IT66121_AFE_XP_RESETB BIT(3) #define IT66121_AFE_XP_PWDI BIT(2) #define IT6610_AFE_XP_BYPASS BIT(0) #define IT66121_AFE_IP_REG 0x64 #define IT66121_AFE_IP_GAINBIT BIT(7) #define IT66121_AFE_IP_PWDPLL BIT(6) #define IT66121_AFE_IP_CKSEL_05 (0 << 4) #define IT66121_AFE_IP_CKSEL_1 BIT(4) #define IT66121_AFE_IP_CKSEL_2 (2 << 4) #define IT66121_AFE_IP_CKSEL_2OR4 (3 << 4) #define IT66121_AFE_IP_ER0 BIT(3) #define IT66121_AFE_IP_RESETB BIT(2) #define IT66121_AFE_IP_ENC BIT(1) #define IT66121_AFE_IP_EC1 BIT(0) #define IT66121_AFE_XP_EC1_REG 0x68 #define IT66121_AFE_XP_EC1_LOWCLK BIT(4) #define IT66121_SW_RST_REG 0x04 #define IT66121_SW_RST_REF BIT(5) #define IT66121_SW_RST_AREF BIT(4) #define IT66121_SW_RST_VID BIT(3) #define IT66121_SW_RST_AUD BIT(2) #define IT66121_SW_RST_HDCP BIT(0) #define IT66121_DDC_COMMAND_REG 0x15 #define IT66121_DDC_COMMAND_BURST_READ 0x0 #define IT66121_DDC_COMMAND_EDID_READ 0x3 #define IT66121_DDC_COMMAND_FIFO_CLR 0x9 #define IT66121_DDC_COMMAND_SCL_PULSE 0xA #define IT66121_DDC_COMMAND_ABORT 0xF #define IT66121_HDCP_REG 0x20 #define IT66121_HDCP_CPDESIRED BIT(0) #define IT66121_HDCP_EN1P1FEAT BIT(1) #define IT66121_INT_STATUS1_REG 0x06 #define IT66121_INT_STATUS1_AUD_OVF BIT(7) #define IT66121_INT_STATUS1_DDC_NOACK BIT(5) #define IT66121_INT_STATUS1_DDC_FIFOERR BIT(4) #define IT66121_INT_STATUS1_DDC_BUSHANG BIT(2) #define IT66121_INT_STATUS1_RX_SENS_STATUS BIT(1) #define IT66121_INT_STATUS1_HPD_STATUS BIT(0) #define IT66121_DDC_HEADER_REG 0x11 #define IT66121_DDC_HEADER_HDCP 0x74 #define IT66121_DDC_HEADER_EDID 0xA0 #define IT66121_DDC_OFFSET_REG 0x12 #define IT66121_DDC_BYTE_REG 0x13 #define IT66121_DDC_SEGMENT_REG 0x14 #define IT66121_DDC_RD_FIFO_REG 0x17 #define IT66121_CLK_BANK_REG 0x0F #define IT66121_CLK_BANK_PWROFF_RCLK BIT(6) #define IT66121_CLK_BANK_PWROFF_ACLK BIT(5) #define IT66121_CLK_BANK_PWROFF_TXCLK BIT(4) #define IT66121_CLK_BANK_PWROFF_CRCLK BIT(3) #define IT66121_CLK_BANK_0 0 #define IT66121_CLK_BANK_1 1 #define IT66121_INT_REG 0x05 #define IT66121_INT_ACTIVE_HIGH BIT(7) #define IT66121_INT_OPEN_DRAIN BIT(6) #define IT66121_INT_TX_CLK_OFF BIT(0) #define IT66121_INT_MASK1_REG 0x09 #define IT66121_INT_MASK1_AUD_OVF BIT(7) #define IT66121_INT_MASK1_DDC_NOACK BIT(5) #define IT66121_INT_MASK1_DDC_FIFOERR BIT(4) #define IT66121_INT_MASK1_DDC_BUSHANG BIT(2) #define IT66121_INT_MASK1_RX_SENS BIT(1) #define IT66121_INT_MASK1_HPD BIT(0) #define IT66121_INT_CLR1_REG 0x0C #define IT66121_INT_CLR1_PKTACP BIT(7) #define IT66121_INT_CLR1_PKTNULL BIT(6) #define IT66121_INT_CLR1_PKTGEN BIT(5) #define IT66121_INT_CLR1_KSVLISTCHK BIT(4) #define IT66121_INT_CLR1_AUTHDONE BIT(3) #define IT66121_INT_CLR1_AUTHFAIL BIT(2) #define IT66121_INT_CLR1_RX_SENS BIT(1) #define IT66121_INT_CLR1_HPD BIT(0) #define IT66121_AV_MUTE_REG 0xC1 #define IT66121_AV_MUTE_ON BIT(0) #define IT66121_AV_MUTE_BLUESCR BIT(1) #define IT66121_PKT_CTS_CTRL_REG 0xC5 #define IT66121_PKT_CTS_CTRL_SEL BIT(1) #define IT66121_PKT_GEN_CTRL_REG 0xC6 #define IT66121_PKT_GEN_CTRL_ON BIT(0) #define IT66121_PKT_GEN_CTRL_RPT BIT(1) #define IT66121_AVIINFO_DB1_REG 0x158 #define IT66121_AVIINFO_DB2_REG 0x159 #define IT66121_AVIINFO_DB3_REG 0x15A #define IT66121_AVIINFO_DB4_REG 0x15B #define IT66121_AVIINFO_DB5_REG 0x15C #define IT66121_AVIINFO_CSUM_REG 0x15D #define IT66121_AVIINFO_DB6_REG 0x15E #define IT66121_AVIINFO_DB7_REG 0x15F #define IT66121_AVIINFO_DB8_REG 0x160 #define IT66121_AVIINFO_DB9_REG 0x161 #define IT66121_AVIINFO_DB10_REG 0x162 #define IT66121_AVIINFO_DB11_REG 0x163 #define IT66121_AVIINFO_DB12_REG 0x164 #define IT66121_AVIINFO_DB13_REG 0x165 #define IT66121_AVI_INFO_PKT_REG 0xCD #define IT66121_AVI_INFO_PKT_ON BIT(0) #define IT66121_AVI_INFO_PKT_RPT BIT(1) #define IT66121_HDMI_MODE_REG 0xC0 #define IT66121_HDMI_MODE_HDMI BIT(0) #define IT66121_SYS_STATUS_REG 0x0E #define IT66121_SYS_STATUS_ACTIVE_IRQ BIT(7) #define IT66121_SYS_STATUS_HPDETECT BIT(6) #define IT66121_SYS_STATUS_SENDECTECT BIT(5) #define IT66121_SYS_STATUS_VID_STABLE BIT(4) #define IT66121_SYS_STATUS_AUD_CTS_CLR BIT(1) #define IT66121_SYS_STATUS_CLEAR_IRQ BIT(0) #define IT66121_DDC_STATUS_REG 0x16 #define IT66121_DDC_STATUS_TX_DONE BIT(7) #define IT66121_DDC_STATUS_ACTIVE BIT(6) #define IT66121_DDC_STATUS_NOACK BIT(5) #define IT66121_DDC_STATUS_WAIT_BUS BIT(4) #define IT66121_DDC_STATUS_ARBI_LOSE BIT(3) #define IT66121_DDC_STATUS_FIFO_FULL BIT(2) #define IT66121_DDC_STATUS_FIFO_EMPTY BIT(1) #define IT66121_DDC_STATUS_FIFO_VALID BIT(0) #define IT66121_EDID_SLEEP_US 20000 #define IT66121_EDID_TIMEOUT_US 200000 #define IT66121_EDID_FIFO_SIZE 32 #define IT66121_CLK_CTRL0_REG 0x58 #define IT66121_CLK_CTRL0_AUTO_OVER_SAMPLING BIT(4) #define IT66121_CLK_CTRL0_EXT_MCLK_MASK GENMASK(3, 2) #define IT66121_CLK_CTRL0_EXT_MCLK_128FS (0 << 2) #define IT66121_CLK_CTRL0_EXT_MCLK_256FS BIT(2) #define IT66121_CLK_CTRL0_EXT_MCLK_512FS (2 << 2) #define IT66121_CLK_CTRL0_EXT_MCLK_1024FS (3 << 2) #define IT66121_CLK_CTRL0_AUTO_IPCLK BIT(0) #define IT66121_CLK_STATUS1_REG 0x5E #define IT66121_CLK_STATUS2_REG 0x5F #define IT66121_AUD_CTRL0_REG 0xE0 #define IT66121_AUD_SWL (3 << 6) #define IT66121_AUD_16BIT (0 << 6) #define IT66121_AUD_18BIT BIT(6) #define IT66121_AUD_20BIT (2 << 6) #define IT66121_AUD_24BIT (3 << 6) #define IT66121_AUD_SPDIFTC BIT(5) #define IT66121_AUD_SPDIF BIT(4) #define IT66121_AUD_I2S (0 << 4) #define IT66121_AUD_EN_I2S3 BIT(3) #define IT66121_AUD_EN_I2S2 BIT(2) #define IT66121_AUD_EN_I2S1 BIT(1) #define IT66121_AUD_EN_I2S0 BIT(0) #define IT66121_AUD_CTRL0_AUD_SEL BIT(4) #define IT66121_AUD_CTRL1_REG 0xE1 #define IT66121_AUD_FIFOMAP_REG 0xE2 #define IT66121_AUD_CTRL3_REG 0xE3 #define IT66121_AUD_SRCVALID_FLAT_REG 0xE4 #define IT66121_AUD_FLAT_SRC0 BIT(4) #define IT66121_AUD_FLAT_SRC1 BIT(5) #define IT66121_AUD_FLAT_SRC2 BIT(6) #define IT66121_AUD_FLAT_SRC3 BIT(7) #define IT66121_AUD_HDAUDIO_REG 0xE5 #define IT66121_AUD_PKT_CTS0_REG 0x130 #define IT66121_AUD_PKT_CTS1_REG 0x131 #define IT66121_AUD_PKT_CTS2_REG 0x132 #define IT66121_AUD_PKT_N0_REG 0x133 #define IT66121_AUD_PKT_N1_REG 0x134 #define IT66121_AUD_PKT_N2_REG 0x135 #define IT66121_AUD_CHST_MODE_REG 0x191 #define IT66121_AUD_CHST_CAT_REG 0x192 #define IT66121_AUD_CHST_SRCNUM_REG 0x193 #define IT66121_AUD_CHST_CHTNUM_REG 0x194 #define IT66121_AUD_CHST_CA_FS_REG 0x198 #define IT66121_AUD_CHST_OFS_WL_REG 0x199 #define IT66121_AUD_PKT_CTS_CNT0_REG 0x1A0 #define IT66121_AUD_PKT_CTS_CNT1_REG 0x1A1 #define IT66121_AUD_PKT_CTS_CNT2_REG 0x1A2 #define IT66121_AUD_FS_22P05K 0x4 #define IT66121_AUD_FS_44P1K 0x0 #define IT66121_AUD_FS_88P2K 0x8 #define IT66121_AUD_FS_176P4K 0xC #define IT66121_AUD_FS_24K 0x6 #define IT66121_AUD_FS_48K 0x2 #define IT66121_AUD_FS_96K 0xA #define IT66121_AUD_FS_192K 0xE #define IT66121_AUD_FS_768K 0x9 #define IT66121_AUD_FS_32K 0x3 #define IT66121_AUD_FS_OTHER 0x1 #define IT66121_AUD_SWL_21BIT 0xD #define IT66121_AUD_SWL_24BIT 0xB #define IT66121_AUD_SWL_23BIT 0x9 #define IT66121_AUD_SWL_22BIT 0x5 #define IT66121_AUD_SWL_20BIT 0x3 #define IT66121_AUD_SWL_17BIT 0xC #define IT66121_AUD_SWL_19BIT 0x8 #define IT66121_AUD_SWL_18BIT 0x4 #define IT66121_AUD_SWL_16BIT 0x2 #define IT66121_AUD_SWL_NOT_INDICATED 0x0 #define IT66121_AFE_CLK_HIGH 80000 /* Khz */ enum chip_id { ID_IT6610, ID_IT66121, }; struct it66121_chip_info { enum chip_id id; u16 vid, pid; }; struct it66121_ctx { struct regmap *regmap; struct drm_bridge bridge; struct drm_bridge *next_bridge; struct drm_connector *connector; struct device *dev; struct gpio_desc *gpio_reset; struct i2c_client *client; u32 bus_width; struct mutex lock; /* Protects fields below and device registers */ struct hdmi_avi_infoframe hdmi_avi_infoframe; struct { struct platform_device *pdev; u8 ch_enable; u8 fs; u8 swl; bool auto_cts; } audio; const struct it66121_chip_info *info; }; static const struct regmap_range_cfg it66121_regmap_banks[] = { { .name = "it66121", .range_min = 0x00, .range_max = 0x1FF, .selector_reg = IT66121_CLK_BANK_REG, .selector_mask = 0x1, .selector_shift = 0, .window_start = 0x00, .window_len = 0x100, }, }; static const struct regmap_config it66121_regmap_config = { .val_bits = 8, .reg_bits = 8, .max_register = 0x1FF, .ranges = it66121_regmap_banks, .num_ranges = ARRAY_SIZE(it66121_regmap_banks), }; static void it66121_hw_reset(struct it66121_ctx *ctx) { gpiod_set_value(ctx->gpio_reset, 1); msleep(20); gpiod_set_value(ctx->gpio_reset, 0); } static inline int it66121_preamble_ddc(struct it66121_ctx *ctx) { return regmap_write(ctx->regmap, IT66121_MASTER_SEL_REG, IT66121_MASTER_SEL_HOST); } static inline int it66121_fire_afe(struct it66121_ctx *ctx) { return regmap_write(ctx->regmap, IT66121_AFE_DRV_REG, 0); } /* TOFIX: Handle YCbCr Input & Output */ static int it66121_configure_input(struct it66121_ctx *ctx) { int ret; u8 mode = IT66121_INPUT_MODE_RGB; if (ctx->bus_width == 12) mode |= IT66121_INPUT_MODE_DDR; ret = regmap_write(ctx->regmap, IT66121_INPUT_MODE_REG, mode); if (ret) return ret; return regmap_write(ctx->regmap, IT66121_INPUT_CSC_REG, IT66121_INPUT_CSC_NO_CONV); } /** * it66121_configure_afe() - Configure the analog front end * @ctx: it66121_ctx object * @mode: mode to configure * * RETURNS: * zero if success, a negative error code otherwise. */ static int it66121_configure_afe(struct it66121_ctx *ctx, const struct drm_display_mode *mode) { int ret; ret = regmap_write(ctx->regmap, IT66121_AFE_DRV_REG, IT66121_AFE_DRV_RST); if (ret) return ret; if (mode->clock > IT66121_AFE_CLK_HIGH) { ret = regmap_write_bits(ctx->regmap, IT66121_AFE_XP_REG, IT66121_AFE_XP_GAINBIT | IT66121_AFE_XP_ENO, IT66121_AFE_XP_GAINBIT); if (ret) return ret; ret = regmap_write_bits(ctx->regmap, IT66121_AFE_IP_REG, IT66121_AFE_IP_GAINBIT | IT66121_AFE_IP_ER0, IT66121_AFE_IP_GAINBIT); if (ret) return ret; if (ctx->info->id == ID_IT66121) { ret = regmap_write_bits(ctx->regmap, IT66121_AFE_IP_REG, IT66121_AFE_IP_EC1, 0); if (ret) return ret; ret = regmap_write_bits(ctx->regmap, IT66121_AFE_XP_EC1_REG, IT66121_AFE_XP_EC1_LOWCLK, 0x80); if (ret) return ret; } } else { ret = regmap_write_bits(ctx->regmap, IT66121_AFE_XP_REG, IT66121_AFE_XP_GAINBIT | IT66121_AFE_XP_ENO, IT66121_AFE_XP_ENO); if (ret) return ret; ret = regmap_write_bits(ctx->regmap, IT66121_AFE_IP_REG, IT66121_AFE_IP_GAINBIT | IT66121_AFE_IP_ER0, IT66121_AFE_IP_ER0); if (ret) return ret; if (ctx->info->id == ID_IT66121) { ret = regmap_write_bits(ctx->regmap, IT66121_AFE_IP_REG, IT66121_AFE_IP_EC1, IT66121_AFE_IP_EC1); if (ret) return ret; ret = regmap_write_bits(ctx->regmap, IT66121_AFE_XP_EC1_REG, IT66121_AFE_XP_EC1_LOWCLK, IT66121_AFE_XP_EC1_LOWCLK); if (ret) return ret; } } /* Clear reset flags */ ret = regmap_write_bits(ctx->regmap, IT66121_SW_RST_REG, IT66121_SW_RST_REF | IT66121_SW_RST_VID, 0); if (ret) return ret; if (ctx->info->id == ID_IT6610) { ret = regmap_write_bits(ctx->regmap, IT66121_AFE_XP_REG, IT6610_AFE_XP_BYPASS, IT6610_AFE_XP_BYPASS); if (ret) return ret; } return it66121_fire_afe(ctx); } static inline int it66121_wait_ddc_ready(struct it66121_ctx *ctx) { int ret, val; u32 error = IT66121_DDC_STATUS_NOACK | IT66121_DDC_STATUS_WAIT_BUS | IT66121_DDC_STATUS_ARBI_LOSE; u32 done = IT66121_DDC_STATUS_TX_DONE; ret = regmap_read_poll_timeout(ctx->regmap, IT66121_DDC_STATUS_REG, val, val & (error | done), IT66121_EDID_SLEEP_US, IT66121_EDID_TIMEOUT_US); if (ret) return ret; if (val & error) return -EAGAIN; return 0; } static int it66121_abort_ddc_ops(struct it66121_ctx *ctx) { int ret; unsigned int swreset, cpdesire; ret = regmap_read(ctx->regmap, IT66121_SW_RST_REG, &swreset); if (ret) return ret; ret = regmap_read(ctx->regmap, IT66121_HDCP_REG, &cpdesire); if (ret) return ret; ret = regmap_write(ctx->regmap, IT66121_HDCP_REG, cpdesire & (~IT66121_HDCP_CPDESIRED & 0xFF)); if (ret) return ret; ret = regmap_write(ctx->regmap, IT66121_SW_RST_REG, (swreset | IT66121_SW_RST_HDCP)); if (ret) return ret; ret = it66121_preamble_ddc(ctx); if (ret) return ret; ret = regmap_write(ctx->regmap, IT66121_DDC_COMMAND_REG, IT66121_DDC_COMMAND_ABORT); if (ret) return ret; return it66121_wait_ddc_ready(ctx); } static int it66121_get_edid_block(void *context, u8 *buf, unsigned int block, size_t len) { struct it66121_ctx *ctx = context; int remain = len; int offset = 0; int ret, cnt; offset = (block % 2) * len; block = block / 2; while (remain > 0) { cnt = (remain > IT66121_EDID_FIFO_SIZE) ? IT66121_EDID_FIFO_SIZE : remain; ret = regmap_write(ctx->regmap, IT66121_DDC_COMMAND_REG, IT66121_DDC_COMMAND_FIFO_CLR); if (ret) return ret; ret = it66121_wait_ddc_ready(ctx); if (ret) return ret; ret = regmap_write(ctx->regmap, IT66121_DDC_OFFSET_REG, offset); if (ret) return ret; ret = regmap_write(ctx->regmap, IT66121_DDC_BYTE_REG, cnt); if (ret) return ret; ret = regmap_write(ctx->regmap, IT66121_DDC_SEGMENT_REG, block); if (ret) return ret; ret = regmap_write(ctx->regmap, IT66121_DDC_COMMAND_REG, IT66121_DDC_COMMAND_EDID_READ); if (ret) return ret; offset += cnt; remain -= cnt; ret = it66121_wait_ddc_ready(ctx); if (ret) { it66121_abort_ddc_ops(ctx); return ret; } ret = regmap_noinc_read(ctx->regmap, IT66121_DDC_RD_FIFO_REG, buf, cnt); if (ret) return ret; buf += cnt; } return 0; } static bool it66121_is_hpd_detect(struct it66121_ctx *ctx) { int val; if (regmap_read(ctx->regmap, IT66121_SYS_STATUS_REG, &val)) return false; return val & IT66121_SYS_STATUS_HPDETECT; } static int it66121_bridge_attach(struct drm_bridge *bridge, enum drm_bridge_attach_flags flags) { struct it66121_ctx *ctx = container_of(bridge, struct it66121_ctx, bridge); int ret; if (!(flags & DRM_BRIDGE_ATTACH_NO_CONNECTOR)) return -EINVAL; ret = drm_bridge_attach(bridge->encoder, ctx->next_bridge, bridge, flags); if (ret) return ret; if (ctx->info->id == ID_IT66121) { ret = regmap_write_bits(ctx->regmap, IT66121_CLK_BANK_REG, IT66121_CLK_BANK_PWROFF_RCLK, 0); if (ret) return ret; } ret = regmap_write_bits(ctx->regmap, IT66121_INT_REG, IT66121_INT_TX_CLK_OFF, 0); if (ret) return ret; ret = regmap_write_bits(ctx->regmap, IT66121_AFE_DRV_REG, IT66121_AFE_DRV_PWD, 0); if (ret) return ret; ret = regmap_write_bits(ctx->regmap, IT66121_AFE_XP_REG, IT66121_AFE_XP_PWDI | IT66121_AFE_XP_PWDPLL, 0); if (ret) return ret; ret = regmap_write_bits(ctx->regmap, IT66121_AFE_IP_REG, IT66121_AFE_IP_PWDPLL, 0); if (ret) return ret; ret = regmap_write_bits(ctx->regmap, IT66121_AFE_DRV_REG, IT66121_AFE_DRV_RST, 0); if (ret) return ret; ret = regmap_write_bits(ctx->regmap, IT66121_AFE_XP_REG, IT66121_AFE_XP_RESETB, IT66121_AFE_XP_RESETB); if (ret) return ret; ret = regmap_write_bits(ctx->regmap, IT66121_AFE_IP_REG, IT66121_AFE_IP_RESETB, IT66121_AFE_IP_RESETB); if (ret) return ret; ret = regmap_write_bits(ctx->regmap, IT66121_SW_RST_REG, IT66121_SW_RST_REF, IT66121_SW_RST_REF); if (ret) return ret; /* Per programming manual, sleep here for bridge to settle */ msleep(50); return 0; } static int it66121_set_mute(struct it66121_ctx *ctx, bool mute) { int ret; unsigned int val = 0; if (mute) val = IT66121_AV_MUTE_ON; ret = regmap_write_bits(ctx->regmap, IT66121_AV_MUTE_REG, IT66121_AV_MUTE_ON, val); if (ret) return ret; return regmap_write(ctx->regmap, IT66121_PKT_GEN_CTRL_REG, IT66121_PKT_GEN_CTRL_ON | IT66121_PKT_GEN_CTRL_RPT); } #define MAX_OUTPUT_SEL_FORMATS 1 static u32 *it66121_bridge_atomic_get_output_bus_fmts(struct drm_bridge *bridge, struct drm_bridge_state *bridge_state, struct drm_crtc_state *crtc_state, struct drm_connector_state *conn_state, unsigned int *num_output_fmts) { u32 *output_fmts; output_fmts = kcalloc(MAX_OUTPUT_SEL_FORMATS, sizeof(*output_fmts), GFP_KERNEL); if (!output_fmts) return NULL; /* TOFIX handle more than MEDIA_BUS_FMT_RGB888_1X24 as output format */ output_fmts[0] = MEDIA_BUS_FMT_RGB888_1X24; *num_output_fmts = 1; return output_fmts; } #define MAX_INPUT_SEL_FORMATS 1 static u32 *it66121_bridge_atomic_get_input_bus_fmts(struct drm_bridge *bridge, struct drm_bridge_state *bridge_state, struct drm_crtc_state *crtc_state, struct drm_connector_state *conn_state, u32 output_fmt, unsigned int *num_input_fmts) { struct it66121_ctx *ctx = container_of(bridge, struct it66121_ctx, bridge); u32 *input_fmts; *num_input_fmts = 0; input_fmts = kcalloc(MAX_INPUT_SEL_FORMATS, sizeof(*input_fmts), GFP_KERNEL); if (!input_fmts) return NULL; if (ctx->bus_width == 12) /* IT66121FN Datasheet specifies Little-Endian ordering */ input_fmts[0] = MEDIA_BUS_FMT_RGB888_2X12_LE; else /* TOFIX support more input bus formats in 24bit width */ input_fmts[0] = MEDIA_BUS_FMT_RGB888_1X24; *num_input_fmts = 1; return input_fmts; } static void it66121_bridge_enable(struct drm_bridge *bridge, struct drm_bridge_state *bridge_state) { struct it66121_ctx *ctx = container_of(bridge, struct it66121_ctx, bridge); struct drm_atomic_state *state = bridge_state->base.state; ctx->connector = drm_atomic_get_new_connector_for_encoder(state, bridge->encoder); it66121_set_mute(ctx, false); } static void it66121_bridge_disable(struct drm_bridge *bridge, struct drm_bridge_state *bridge_state) { struct it66121_ctx *ctx = container_of(bridge, struct it66121_ctx, bridge); it66121_set_mute(ctx, true); ctx->connector = NULL; } static int it66121_bridge_check(struct drm_bridge *bridge, struct drm_bridge_state *bridge_state, struct drm_crtc_state *crtc_state, struct drm_connector_state *conn_state) { struct it66121_ctx *ctx = container_of(bridge, struct it66121_ctx, bridge); if (ctx->info->id == ID_IT6610) { /* The IT6610 only supports these settings */ bridge_state->input_bus_cfg.flags |= DRM_BUS_FLAG_DE_HIGH | DRM_BUS_FLAG_PIXDATA_DRIVE_NEGEDGE; bridge_state->input_bus_cfg.flags &= ~DRM_BUS_FLAG_PIXDATA_DRIVE_POSEDGE; } return 0; } static void it66121_bridge_mode_set(struct drm_bridge *bridge, const struct drm_display_mode *mode, const struct drm_display_mode *adjusted_mode) { u8 buf[HDMI_INFOFRAME_SIZE(AVI)]; struct it66121_ctx *ctx = container_of(bridge, struct it66121_ctx, bridge); int ret; mutex_lock(&ctx->lock); hdmi_avi_infoframe_init(&ctx->hdmi_avi_infoframe); ret = drm_hdmi_avi_infoframe_from_display_mode(&ctx->hdmi_avi_infoframe, ctx->connector, adjusted_mode); if (ret) { DRM_ERROR("Failed to setup AVI infoframe: %d\n", ret); goto unlock; } ret = hdmi_avi_infoframe_pack(&ctx->hdmi_avi_infoframe, buf, sizeof(buf)); if (ret < 0) { DRM_ERROR("Failed to pack infoframe: %d\n", ret); goto unlock; } /* Write new AVI infoframe packet */ ret = regmap_bulk_write(ctx->regmap, IT66121_AVIINFO_DB1_REG, &buf[HDMI_INFOFRAME_HEADER_SIZE], HDMI_AVI_INFOFRAME_SIZE); if (ret) goto unlock; if (regmap_write(ctx->regmap, IT66121_AVIINFO_CSUM_REG, buf[3])) goto unlock; /* Enable AVI infoframe */ if (regmap_write(ctx->regmap, IT66121_AVI_INFO_PKT_REG, IT66121_AVI_INFO_PKT_ON | IT66121_AVI_INFO_PKT_RPT)) goto unlock; /* Set TX mode to HDMI */ if (regmap_write(ctx->regmap, IT66121_HDMI_MODE_REG, IT66121_HDMI_MODE_HDMI)) goto unlock; if (ctx->info->id == ID_IT66121 && regmap_write_bits(ctx->regmap, IT66121_CLK_BANK_REG, IT66121_CLK_BANK_PWROFF_TXCLK, IT66121_CLK_BANK_PWROFF_TXCLK)) { goto unlock; } if (it66121_configure_input(ctx)) goto unlock; if (it66121_configure_afe(ctx, adjusted_mode)) goto unlock; if (ctx->info->id == ID_IT66121 && regmap_write_bits(ctx->regmap, IT66121_CLK_BANK_REG, IT66121_CLK_BANK_PWROFF_TXCLK, 0)) { goto unlock; } unlock: mutex_unlock(&ctx->lock); } static enum drm_mode_status it66121_bridge_mode_valid(struct drm_bridge *bridge, const struct drm_display_info *info, const struct drm_display_mode *mode) { struct it66121_ctx *ctx = container_of(bridge, struct it66121_ctx, bridge); unsigned long max_clock; max_clock = (ctx->bus_width == 12) ? 74250 : 148500; if (mode->clock > max_clock) return MODE_CLOCK_HIGH; if (mode->clock < 25000) return MODE_CLOCK_LOW; return MODE_OK; } static enum drm_connector_status it66121_bridge_detect(struct drm_bridge *bridge) { struct it66121_ctx *ctx = container_of(bridge, struct it66121_ctx, bridge); return it66121_is_hpd_detect(ctx) ? connector_status_connected : connector_status_disconnected; } static void it66121_bridge_hpd_enable(struct drm_bridge *bridge) { struct it66121_ctx *ctx = container_of(bridge, struct it66121_ctx, bridge); int ret; ret = regmap_write_bits(ctx->regmap, IT66121_INT_MASK1_REG, IT66121_INT_MASK1_HPD, 0); if (ret) dev_err(ctx->dev, "failed to enable HPD IRQ\n"); } static void it66121_bridge_hpd_disable(struct drm_bridge *bridge) { struct it66121_ctx *ctx = container_of(bridge, struct it66121_ctx, bridge); int ret; ret = regmap_write_bits(ctx->regmap, IT66121_INT_MASK1_REG, IT66121_INT_MASK1_HPD, IT66121_INT_MASK1_HPD); if (ret) dev_err(ctx->dev, "failed to disable HPD IRQ\n"); } static struct edid *it66121_bridge_get_edid(struct drm_bridge *bridge, struct drm_connector *connector) { struct it66121_ctx *ctx = container_of(bridge, struct it66121_ctx, bridge); struct edid *edid; int ret; mutex_lock(&ctx->lock); ret = it66121_preamble_ddc(ctx); if (ret) { edid = ERR_PTR(ret); goto out_unlock; } ret = regmap_write(ctx->regmap, IT66121_DDC_HEADER_REG, IT66121_DDC_HEADER_EDID); if (ret) { edid = ERR_PTR(ret); goto out_unlock; } edid = drm_do_get_edid(connector, it66121_get_edid_block, ctx); out_unlock: mutex_unlock(&ctx->lock); return edid; } static const struct drm_bridge_funcs it66121_bridge_funcs = { .atomic_duplicate_state = drm_atomic_helper_bridge_duplicate_state, .atomic_destroy_state = drm_atomic_helper_bridge_destroy_state, .atomic_reset = drm_atomic_helper_bridge_reset, .attach = it66121_bridge_attach, .atomic_get_output_bus_fmts = it66121_bridge_atomic_get_output_bus_fmts, .atomic_get_input_bus_fmts = it66121_bridge_atomic_get_input_bus_fmts, .atomic_enable = it66121_bridge_enable, .atomic_disable = it66121_bridge_disable, .atomic_check = it66121_bridge_check, .mode_set = it66121_bridge_mode_set, .mode_valid = it66121_bridge_mode_valid, .detect = it66121_bridge_detect, .get_edid = it66121_bridge_get_edid, .hpd_enable = it66121_bridge_hpd_enable, .hpd_disable = it66121_bridge_hpd_disable, }; static irqreturn_t it66121_irq_threaded_handler(int irq, void *dev_id) { int ret; unsigned int val; struct it66121_ctx *ctx = dev_id; struct device *dev = ctx->dev; enum drm_connector_status status; bool event = false; mutex_lock(&ctx->lock); ret = regmap_read(ctx->regmap, IT66121_SYS_STATUS_REG, &val); if (ret) goto unlock; if (!(val & IT66121_SYS_STATUS_ACTIVE_IRQ)) goto unlock; ret = regmap_read(ctx->regmap, IT66121_INT_STATUS1_REG, &val); if (ret) { dev_err(dev, "Cannot read STATUS1_REG %d\n", ret); } else if (val & IT66121_INT_STATUS1_HPD_STATUS) { regmap_write_bits(ctx->regmap, IT66121_INT_CLR1_REG, IT66121_INT_CLR1_HPD, IT66121_INT_CLR1_HPD); status = it66121_is_hpd_detect(ctx) ? connector_status_connected : connector_status_disconnected; event = true; } regmap_write_bits(ctx->regmap, IT66121_SYS_STATUS_REG, IT66121_SYS_STATUS_CLEAR_IRQ, IT66121_SYS_STATUS_CLEAR_IRQ); unlock: mutex_unlock(&ctx->lock); if (event) drm_bridge_hpd_notify(&ctx->bridge, status); return IRQ_HANDLED; } static int it661221_set_chstat(struct it66121_ctx *ctx, u8 iec60958_chstat[]) { int ret; ret = regmap_write(ctx->regmap, IT66121_AUD_CHST_MODE_REG, iec60958_chstat[0] & 0x7C); if (ret) return ret; ret = regmap_write(ctx->regmap, IT66121_AUD_CHST_CAT_REG, iec60958_chstat[1]); if (ret) return ret; ret = regmap_write(ctx->regmap, IT66121_AUD_CHST_SRCNUM_REG, iec60958_chstat[2] & 0x0F); if (ret) return ret; ret = regmap_write(ctx->regmap, IT66121_AUD_CHST_CHTNUM_REG, (iec60958_chstat[2] >> 4) & 0x0F); if (ret) return ret; ret = regmap_write(ctx->regmap, IT66121_AUD_CHST_CA_FS_REG, iec60958_chstat[3]); if (ret) return ret; return regmap_write(ctx->regmap, IT66121_AUD_CHST_OFS_WL_REG, iec60958_chstat[4]); } static int it661221_set_lpcm_audio(struct it66121_ctx *ctx, u8 audio_src_num, u8 audio_swl) { int ret; unsigned int audio_enable = 0; unsigned int audio_format = 0; switch (audio_swl) { case 16: audio_enable |= IT66121_AUD_16BIT; break; case 18: audio_enable |= IT66121_AUD_18BIT; break; case 20: audio_enable |= IT66121_AUD_20BIT; break; case 24: default: audio_enable |= IT66121_AUD_24BIT; break; } audio_format |= 0x40; switch (audio_src_num) { case 4: audio_enable |= IT66121_AUD_EN_I2S3 | IT66121_AUD_EN_I2S2 | IT66121_AUD_EN_I2S1 | IT66121_AUD_EN_I2S0; break; case 3: audio_enable |= IT66121_AUD_EN_I2S2 | IT66121_AUD_EN_I2S1 | IT66121_AUD_EN_I2S0; break; case 2: audio_enable |= IT66121_AUD_EN_I2S1 | IT66121_AUD_EN_I2S0; break; case 1: default: audio_format &= ~0x40; audio_enable |= IT66121_AUD_EN_I2S0; break; } audio_format |= 0x01; ctx->audio.ch_enable = audio_enable; ret = regmap_write(ctx->regmap, IT66121_AUD_CTRL0_REG, audio_enable & 0xF0); if (ret) return ret; ret = regmap_write(ctx->regmap, IT66121_AUD_CTRL1_REG, audio_format); if (ret) return ret; ret = regmap_write(ctx->regmap, IT66121_AUD_FIFOMAP_REG, 0xE4); if (ret) return ret; ret = regmap_write(ctx->regmap, IT66121_AUD_CTRL3_REG, 0x00); if (ret) return ret; ret = regmap_write(ctx->regmap, IT66121_AUD_SRCVALID_FLAT_REG, 0x00); if (ret) return ret; return regmap_write(ctx->regmap, IT66121_AUD_HDAUDIO_REG, 0x00); } static int it661221_set_ncts(struct it66121_ctx *ctx, u8 fs) { int ret; unsigned int n; switch (fs) { case IT66121_AUD_FS_32K: n = 4096; break; case IT66121_AUD_FS_44P1K: n = 6272; break; case IT66121_AUD_FS_48K: n = 6144; break; case IT66121_AUD_FS_88P2K: n = 12544; break; case IT66121_AUD_FS_96K: n = 12288; break; case IT66121_AUD_FS_176P4K: n = 25088; break; case IT66121_AUD_FS_192K: n = 24576; break; case IT66121_AUD_FS_768K: n = 24576; break; default: n = 6144; break; } ret = regmap_write(ctx->regmap, IT66121_AUD_PKT_N0_REG, (u8)((n) & 0xFF)); if (ret) return ret; ret = regmap_write(ctx->regmap, IT66121_AUD_PKT_N1_REG, (u8)((n >> 8) & 0xFF)); if (ret) return ret; ret = regmap_write(ctx->regmap, IT66121_AUD_PKT_N2_REG, (u8)((n >> 16) & 0xF)); if (ret) return ret; if (ctx->audio.auto_cts) { u8 loop_cnt = 255; u8 cts_stable_cnt = 0; unsigned int sum_cts = 0; unsigned int cts = 0; unsigned int last_cts = 0; unsigned int diff; unsigned int val; while (loop_cnt--) { msleep(30); regmap_read(ctx->regmap, IT66121_AUD_PKT_CTS_CNT2_REG, &val); cts = val << 12; regmap_read(ctx->regmap, IT66121_AUD_PKT_CTS_CNT1_REG, &val); cts |= val << 4; regmap_read(ctx->regmap, IT66121_AUD_PKT_CTS_CNT0_REG, &val); cts |= val >> 4; if (cts == 0) { continue; } else { if (last_cts > cts) diff = last_cts - cts; else diff = cts - last_cts; last_cts = cts; if (diff < 5) { cts_stable_cnt++; sum_cts += cts; } else { cts_stable_cnt = 0; sum_cts = 0; continue; } if (cts_stable_cnt >= 32) { last_cts = (sum_cts >> 5); break; } } } regmap_write(ctx->regmap, IT66121_AUD_PKT_CTS0_REG, (u8)((last_cts) & 0xFF)); regmap_write(ctx->regmap, IT66121_AUD_PKT_CTS1_REG, (u8)((last_cts >> 8) & 0xFF)); regmap_write(ctx->regmap, IT66121_AUD_PKT_CTS2_REG, (u8)((last_cts >> 16) & 0x0F)); } ret = regmap_write(ctx->regmap, 0xF8, 0xC3); if (ret) return ret; ret = regmap_write(ctx->regmap, 0xF8, 0xA5); if (ret) return ret; if (ctx->audio.auto_cts) { ret = regmap_write_bits(ctx->regmap, IT66121_PKT_CTS_CTRL_REG, IT66121_PKT_CTS_CTRL_SEL, 1); } else { ret = regmap_write_bits(ctx->regmap, IT66121_PKT_CTS_CTRL_REG, IT66121_PKT_CTS_CTRL_SEL, 0); } if (ret) return ret; return regmap_write(ctx->regmap, 0xF8, 0xFF); } static int it661221_audio_output_enable(struct it66121_ctx *ctx, bool enable) { int ret; if (enable) { ret = regmap_write_bits(ctx->regmap, IT66121_SW_RST_REG, IT66121_SW_RST_AUD | IT66121_SW_RST_AREF, 0); if (ret) return ret; ret = regmap_write_bits(ctx->regmap, IT66121_AUD_CTRL0_REG, IT66121_AUD_EN_I2S3 | IT66121_AUD_EN_I2S2 | IT66121_AUD_EN_I2S1 | IT66121_AUD_EN_I2S0, ctx->audio.ch_enable); } else { ret = regmap_write_bits(ctx->regmap, IT66121_AUD_CTRL0_REG, IT66121_AUD_EN_I2S3 | IT66121_AUD_EN_I2S2 | IT66121_AUD_EN_I2S1 | IT66121_AUD_EN_I2S0, ctx->audio.ch_enable & 0xF0); if (ret) return ret; ret = regmap_write_bits(ctx->regmap, IT66121_SW_RST_REG, IT66121_SW_RST_AUD | IT66121_SW_RST_AREF, IT66121_SW_RST_AUD | IT66121_SW_RST_AREF); } return ret; } static int it661221_audio_ch_enable(struct it66121_ctx *ctx, bool enable) { int ret; if (enable) { ret = regmap_write(ctx->regmap, IT66121_AUD_SRCVALID_FLAT_REG, 0); if (ret) return ret; ret = regmap_write(ctx->regmap, IT66121_AUD_CTRL0_REG, ctx->audio.ch_enable); } else { ret = regmap_write(ctx->regmap, IT66121_AUD_CTRL0_REG, ctx->audio.ch_enable & 0xF0); } return ret; } static int it66121_audio_hw_params(struct device *dev, void *data, struct hdmi_codec_daifmt *daifmt, struct hdmi_codec_params *params) { u8 fs; u8 swl; int ret; struct it66121_ctx *ctx = dev_get_drvdata(dev); static u8 iec60958_chstat[5]; unsigned int channels = params->channels; unsigned int sample_rate = params->sample_rate; unsigned int sample_width = params->sample_width; mutex_lock(&ctx->lock); dev_dbg(dev, "%s: %u, %u, %u, %u\n", __func__, daifmt->fmt, sample_rate, sample_width, channels); switch (daifmt->fmt) { case HDMI_I2S: dev_dbg(dev, "Using HDMI I2S\n"); break; default: dev_err(dev, "Invalid or unsupported DAI format %d\n", daifmt->fmt); ret = -EINVAL; goto out; } // Set audio clock recovery (N/CTS) ret = regmap_write(ctx->regmap, IT66121_CLK_CTRL0_REG, IT66121_CLK_CTRL0_AUTO_OVER_SAMPLING | IT66121_CLK_CTRL0_EXT_MCLK_256FS | IT66121_CLK_CTRL0_AUTO_IPCLK); if (ret) goto out; ret = regmap_write_bits(ctx->regmap, IT66121_AUD_CTRL0_REG, IT66121_AUD_CTRL0_AUD_SEL, 0); // remove spdif selection if (ret) goto out; switch (sample_rate) { case 44100L: fs = IT66121_AUD_FS_44P1K; break; case 88200L: fs = IT66121_AUD_FS_88P2K; break; case 176400L: fs = IT66121_AUD_FS_176P4K; break; case 32000L: fs = IT66121_AUD_FS_32K; break; case 48000L: fs = IT66121_AUD_FS_48K; break; case 96000L: fs = IT66121_AUD_FS_96K; break; case 192000L: fs = IT66121_AUD_FS_192K; break; case 768000L: fs = IT66121_AUD_FS_768K; break; default: fs = IT66121_AUD_FS_48K; break; } ctx->audio.fs = fs; ret = it661221_set_ncts(ctx, fs); if (ret) { dev_err(dev, "Failed to set N/CTS: %d\n", ret); goto out; } // Set audio format register (except audio channel enable) ret = it661221_set_lpcm_audio(ctx, (channels + 1) / 2, sample_width); if (ret) { dev_err(dev, "Failed to set LPCM audio: %d\n", ret); goto out; } // Set audio channel status iec60958_chstat[0] = 0; if ((channels + 1) / 2 == 1) iec60958_chstat[0] |= 0x1; iec60958_chstat[0] &= ~(1 << 1); iec60958_chstat[1] = 0; iec60958_chstat[2] = (channels + 1) / 2; iec60958_chstat[2] |= (channels << 4) & 0xF0; iec60958_chstat[3] = fs; switch (sample_width) { case 21L: swl = IT66121_AUD_SWL_21BIT; break; case 24L: swl = IT66121_AUD_SWL_24BIT; break; case 23L: swl = IT66121_AUD_SWL_23BIT; break; case 22L: swl = IT66121_AUD_SWL_22BIT; break; case 20L: swl = IT66121_AUD_SWL_20BIT; break; case 17L: swl = IT66121_AUD_SWL_17BIT; break; case 19L: swl = IT66121_AUD_SWL_19BIT; break; case 18L: swl = IT66121_AUD_SWL_18BIT; break; case 16L: swl = IT66121_AUD_SWL_16BIT; break; default: swl = IT66121_AUD_SWL_NOT_INDICATED; break; } iec60958_chstat[4] = (((~fs) << 4) & 0xF0) | swl; ret = it661221_set_chstat(ctx, iec60958_chstat); if (ret) { dev_err(dev, "Failed to set channel status: %d\n", ret); goto out; } // Enable audio channel enable while input clock stable (if SPDIF). ret = it661221_audio_ch_enable(ctx, true); if (ret) { dev_err(dev, "Failed to enable audio channel: %d\n", ret); goto out; } ret = regmap_write_bits(ctx->regmap, IT66121_INT_MASK1_REG, IT66121_INT_MASK1_AUD_OVF, 0); if (ret) goto out; dev_dbg(dev, "HDMI audio enabled.\n"); out: mutex_unlock(&ctx->lock); return ret; } static int it66121_audio_startup(struct device *dev, void *data) { int ret; struct it66121_ctx *ctx = dev_get_drvdata(dev); dev_dbg(dev, "%s\n", __func__); mutex_lock(&ctx->lock); ret = it661221_audio_output_enable(ctx, true); if (ret) dev_err(dev, "Failed to enable audio output: %d\n", ret); mutex_unlock(&ctx->lock); return ret; } static void it66121_audio_shutdown(struct device *dev, void *data) { int ret; struct it66121_ctx *ctx = dev_get_drvdata(dev); dev_dbg(dev, "%s\n", __func__); mutex_lock(&ctx->lock); ret = it661221_audio_output_enable(ctx, false); if (ret) dev_err(dev, "Failed to disable audio output: %d\n", ret); mutex_unlock(&ctx->lock); } static int it66121_audio_mute(struct device *dev, void *data, bool enable, int direction) { int ret; struct it66121_ctx *ctx = dev_get_drvdata(dev); dev_dbg(dev, "%s: enable=%s, direction=%d\n", __func__, enable ? "true" : "false", direction); mutex_lock(&ctx->lock); if (enable) { ret = regmap_write_bits(ctx->regmap, IT66121_AUD_SRCVALID_FLAT_REG, IT66121_AUD_FLAT_SRC0 | IT66121_AUD_FLAT_SRC1 | IT66121_AUD_FLAT_SRC2 | IT66121_AUD_FLAT_SRC3, IT66121_AUD_FLAT_SRC0 | IT66121_AUD_FLAT_SRC1 | IT66121_AUD_FLAT_SRC2 | IT66121_AUD_FLAT_SRC3); } else { ret = regmap_write_bits(ctx->regmap, IT66121_AUD_SRCVALID_FLAT_REG, IT66121_AUD_FLAT_SRC0 | IT66121_AUD_FLAT_SRC1 | IT66121_AUD_FLAT_SRC2 | IT66121_AUD_FLAT_SRC3, 0); } mutex_unlock(&ctx->lock); return ret; } static int it66121_audio_get_eld(struct device *dev, void *data, u8 *buf, size_t len) { struct it66121_ctx *ctx = dev_get_drvdata(dev); mutex_lock(&ctx->lock); memcpy(buf, ctx->connector->eld, min(sizeof(ctx->connector->eld), len)); mutex_unlock(&ctx->lock); return 0; } static const struct hdmi_codec_ops it66121_audio_codec_ops = { .hw_params = it66121_audio_hw_params, .audio_startup = it66121_audio_startup, .audio_shutdown = it66121_audio_shutdown, .mute_stream = it66121_audio_mute, .get_eld = it66121_audio_get_eld, .no_capture_mute = 1, }; static int it66121_audio_codec_init(struct it66121_ctx *ctx, struct device *dev) { struct hdmi_codec_pdata codec_data = { .ops = &it66121_audio_codec_ops, .i2s = 1, /* Only i2s support for now */ .spdif = 0, .max_i2s_channels = 8, }; dev_dbg(dev, "%s\n", __func__); if (!of_property_read_bool(dev->of_node, "#sound-dai-cells")) { dev_info(dev, "No \"#sound-dai-cells\", no audio\n"); return 0; } ctx->audio.pdev = platform_device_register_data(dev, HDMI_CODEC_DRV_NAME, PLATFORM_DEVID_AUTO, &codec_data, sizeof(codec_data)); if (IS_ERR(ctx->audio.pdev)) { dev_err(dev, "Failed to initialize HDMI audio codec: %d\n", PTR_ERR_OR_ZERO(ctx->audio.pdev)); } return PTR_ERR_OR_ZERO(ctx->audio.pdev); } static const char * const it66121_supplies[] = { "vcn33", "vcn18", "vrf12" }; static int it66121_probe(struct i2c_client *client) { const struct i2c_device_id *id = i2c_client_get_device_id(client); u32 revision_id, vendor_ids[2] = { 0 }, device_ids[2] = { 0 }; struct device_node *ep; int ret; struct it66121_ctx *ctx; struct device *dev = &client->dev; if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) { dev_err(dev, "I2C check functionality failed.\n"); return -ENXIO; } ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL); if (!ctx) return -ENOMEM; ep = of_graph_get_endpoint_by_regs(dev->of_node, 0, 0); if (!ep) return -EINVAL; ctx->dev = dev; ctx->client = client; ctx->info = (const struct it66121_chip_info *) id->driver_data; of_property_read_u32(ep, "bus-width", &ctx->bus_width); of_node_put(ep); if (ctx->bus_width != 12 && ctx->bus_width != 24) return -EINVAL; ep = of_graph_get_remote_node(dev->of_node, 1, -1); if (!ep) { dev_err(ctx->dev, "The endpoint is unconnected\n"); return -EINVAL; } if (!of_device_is_available(ep)) { of_node_put(ep); dev_err(ctx->dev, "The remote device is disabled\n"); return -ENODEV; } ctx->next_bridge = of_drm_find_bridge(ep); of_node_put(ep); if (!ctx->next_bridge) { dev_dbg(ctx->dev, "Next bridge not found, deferring probe\n"); return -EPROBE_DEFER; } i2c_set_clientdata(client, ctx); mutex_init(&ctx->lock); ret = devm_regulator_bulk_get_enable(dev, ARRAY_SIZE(it66121_supplies), it66121_supplies); if (ret) { dev_err(dev, "Failed to enable power supplies\n"); return ret; } it66121_hw_reset(ctx); ctx->regmap = devm_regmap_init_i2c(client, &it66121_regmap_config); if (IS_ERR(ctx->regmap)) return PTR_ERR(ctx->regmap); regmap_read(ctx->regmap, IT66121_VENDOR_ID0_REG, &vendor_ids[0]); regmap_read(ctx->regmap, IT66121_VENDOR_ID1_REG, &vendor_ids[1]); regmap_read(ctx->regmap, IT66121_DEVICE_ID0_REG, &device_ids[0]); regmap_read(ctx->regmap, IT66121_DEVICE_ID1_REG, &device_ids[1]); /* Revision is shared with DEVICE_ID1 */ revision_id = FIELD_GET(IT66121_REVISION_MASK, device_ids[1]); device_ids[1] &= IT66121_DEVICE_ID1_MASK; if ((vendor_ids[1] << 8 | vendor_ids[0]) != ctx->info->vid || (device_ids[1] << 8 | device_ids[0]) != ctx->info->pid) { return -ENODEV; } ctx->bridge.funcs = &it66121_bridge_funcs; ctx->bridge.of_node = dev->of_node; ctx->bridge.type = DRM_MODE_CONNECTOR_HDMIA; ctx->bridge.ops = DRM_BRIDGE_OP_DETECT | DRM_BRIDGE_OP_EDID | DRM_BRIDGE_OP_HPD; ret = devm_request_threaded_irq(dev, client->irq, NULL, it66121_irq_threaded_handler, IRQF_ONESHOT, dev_name(dev), ctx); if (ret < 0) { dev_err(dev, "Failed to request irq %d:%d\n", client->irq, ret); return ret; } it66121_audio_codec_init(ctx, dev); drm_bridge_add(&ctx->bridge); dev_info(ctx->dev, "IT66121 revision %d probed\n", revision_id); return 0; } static void it66121_remove(struct i2c_client *client) { struct it66121_ctx *ctx = i2c_get_clientdata(client); drm_bridge_remove(&ctx->bridge); mutex_destroy(&ctx->lock); } static const struct of_device_id it66121_dt_match[] = { { .compatible = "ite,it66121" }, { .compatible = "ite,it6610" }, { } }; MODULE_DEVICE_TABLE(of, it66121_dt_match); static const struct it66121_chip_info it66121_chip_info = { .id = ID_IT66121, .vid = 0x4954, .pid = 0x0612, }; static const struct it66121_chip_info it6610_chip_info = { .id = ID_IT6610, .vid = 0xca00, .pid = 0x0611, }; static const struct i2c_device_id it66121_id[] = { { "it66121", (kernel_ulong_t) &it66121_chip_info }, { "it6610", (kernel_ulong_t) &it6610_chip_info }, { } }; MODULE_DEVICE_TABLE(i2c, it66121_id); static struct i2c_driver it66121_driver = { .driver = { .name = "it66121", .of_match_table = it66121_dt_match, }, .probe = it66121_probe, .remove = it66121_remove, .id_table = it66121_id, }; module_i2c_driver(it66121_driver); MODULE_AUTHOR("Phong LE"); MODULE_DESCRIPTION("IT66121 HDMI transmitter driver"); MODULE_LICENSE("GPL v2");
linux-master
drivers/gpu/drm/bridge/ite-it66121.c
// SPDX-License-Identifier: GPL-2.0-only /* * adv7511_cec.c - Analog Devices ADV7511/33 cec driver * * Copyright 2017 Cisco Systems, Inc. and/or its affiliates. All rights reserved. */ #include <linux/device.h> #include <linux/module.h> #include <linux/slab.h> #include <linux/clk.h> #include <media/cec.h> #include "adv7511.h" static const u8 ADV7511_REG_CEC_RX_FRAME_HDR[] = { ADV7511_REG_CEC_RX1_FRAME_HDR, ADV7511_REG_CEC_RX2_FRAME_HDR, ADV7511_REG_CEC_RX3_FRAME_HDR, }; static const u8 ADV7511_REG_CEC_RX_FRAME_LEN[] = { ADV7511_REG_CEC_RX1_FRAME_LEN, ADV7511_REG_CEC_RX2_FRAME_LEN, ADV7511_REG_CEC_RX3_FRAME_LEN, }; #define ADV7511_INT1_CEC_MASK \ (ADV7511_INT1_CEC_TX_READY | ADV7511_INT1_CEC_TX_ARBIT_LOST | \ ADV7511_INT1_CEC_TX_RETRY_TIMEOUT | ADV7511_INT1_CEC_RX_READY1 | \ ADV7511_INT1_CEC_RX_READY2 | ADV7511_INT1_CEC_RX_READY3) static void adv_cec_tx_raw_status(struct adv7511 *adv7511, u8 tx_raw_status) { unsigned int offset = adv7511->reg_cec_offset; unsigned int val; if (regmap_read(adv7511->regmap_cec, ADV7511_REG_CEC_TX_ENABLE + offset, &val)) return; if ((val & 0x01) == 0) return; if (tx_raw_status & ADV7511_INT1_CEC_TX_ARBIT_LOST) { cec_transmit_attempt_done(adv7511->cec_adap, CEC_TX_STATUS_ARB_LOST); return; } if (tx_raw_status & ADV7511_INT1_CEC_TX_RETRY_TIMEOUT) { u8 status; u8 err_cnt = 0; u8 nack_cnt = 0; u8 low_drive_cnt = 0; unsigned int cnt; /* * We set this status bit since this hardware performs * retransmissions. */ status = CEC_TX_STATUS_MAX_RETRIES; if (regmap_read(adv7511->regmap_cec, ADV7511_REG_CEC_TX_LOW_DRV_CNT + offset, &cnt)) { err_cnt = 1; status |= CEC_TX_STATUS_ERROR; } else { nack_cnt = cnt & 0xf; if (nack_cnt) status |= CEC_TX_STATUS_NACK; low_drive_cnt = cnt >> 4; if (low_drive_cnt) status |= CEC_TX_STATUS_LOW_DRIVE; } cec_transmit_done(adv7511->cec_adap, status, 0, nack_cnt, low_drive_cnt, err_cnt); return; } if (tx_raw_status & ADV7511_INT1_CEC_TX_READY) { cec_transmit_attempt_done(adv7511->cec_adap, CEC_TX_STATUS_OK); return; } } static void adv7511_cec_rx(struct adv7511 *adv7511, int rx_buf) { unsigned int offset = adv7511->reg_cec_offset; struct cec_msg msg = {}; unsigned int len; unsigned int val; u8 i; if (regmap_read(adv7511->regmap_cec, ADV7511_REG_CEC_RX_FRAME_LEN[rx_buf] + offset, &len)) return; msg.len = len & 0x1f; if (msg.len > 16) msg.len = 16; if (!msg.len) return; for (i = 0; i < msg.len; i++) { regmap_read(adv7511->regmap_cec, i + ADV7511_REG_CEC_RX_FRAME_HDR[rx_buf] + offset, &val); msg.msg[i] = val; } /* Toggle RX Ready Clear bit to re-enable this RX buffer */ regmap_update_bits(adv7511->regmap_cec, ADV7511_REG_CEC_RX_BUFFERS + offset, BIT(rx_buf), BIT(rx_buf)); regmap_update_bits(adv7511->regmap_cec, ADV7511_REG_CEC_RX_BUFFERS + offset, BIT(rx_buf), 0); cec_received_msg(adv7511->cec_adap, &msg); } void adv7511_cec_irq_process(struct adv7511 *adv7511, unsigned int irq1) { unsigned int offset = adv7511->reg_cec_offset; const u32 irq_tx_mask = ADV7511_INT1_CEC_TX_READY | ADV7511_INT1_CEC_TX_ARBIT_LOST | ADV7511_INT1_CEC_TX_RETRY_TIMEOUT; const u32 irq_rx_mask = ADV7511_INT1_CEC_RX_READY1 | ADV7511_INT1_CEC_RX_READY2 | ADV7511_INT1_CEC_RX_READY3; unsigned int rx_status; int rx_order[3] = { -1, -1, -1 }; int i; if (irq1 & irq_tx_mask) adv_cec_tx_raw_status(adv7511, irq1); if (!(irq1 & irq_rx_mask)) return; if (regmap_read(adv7511->regmap_cec, ADV7511_REG_CEC_RX_STATUS + offset, &rx_status)) return; /* * ADV7511_REG_CEC_RX_STATUS[5:0] contains the reception order of RX * buffers 0, 1, and 2 in bits [1:0], [3:2], and [5:4] respectively. * The values are to be interpreted as follows: * * 0 = buffer unused * 1 = buffer contains oldest received frame (if applicable) * 2 = buffer contains second oldest received frame (if applicable) * 3 = buffer contains third oldest received frame (if applicable) * * Fill rx_order with the sequence of RX buffer indices to * read from in order, where -1 indicates that there are no * more buffers to process. */ for (i = 0; i < 3; i++) { unsigned int timestamp = (rx_status >> (2 * i)) & 0x3; if (timestamp) rx_order[timestamp - 1] = i; } /* Read CEC RX buffers in the appropriate order as prescribed above */ for (i = 0; i < 3; i++) { int rx_buf = rx_order[i]; if (rx_buf < 0) break; adv7511_cec_rx(adv7511, rx_buf); } } static int adv7511_cec_adap_enable(struct cec_adapter *adap, bool enable) { struct adv7511 *adv7511 = cec_get_drvdata(adap); unsigned int offset = adv7511->reg_cec_offset; if (adv7511->i2c_cec == NULL) return -EIO; if (!adv7511->cec_enabled_adap && enable) { /* power up cec section */ regmap_update_bits(adv7511->regmap_cec, ADV7511_REG_CEC_CLK_DIV + offset, 0x03, 0x01); /* non-legacy mode and clear all rx buffers */ regmap_write(adv7511->regmap_cec, ADV7511_REG_CEC_RX_BUFFERS + offset, 0x0f); regmap_write(adv7511->regmap_cec, ADV7511_REG_CEC_RX_BUFFERS + offset, 0x08); /* initially disable tx */ regmap_update_bits(adv7511->regmap_cec, ADV7511_REG_CEC_TX_ENABLE + offset, 1, 0); /* enabled irqs: */ /* tx: ready */ /* tx: arbitration lost */ /* tx: retry timeout */ /* rx: ready 1-3 */ regmap_update_bits(adv7511->regmap, ADV7511_REG_INT_ENABLE(1), 0x3f, ADV7511_INT1_CEC_MASK); } else if (adv7511->cec_enabled_adap && !enable) { regmap_update_bits(adv7511->regmap, ADV7511_REG_INT_ENABLE(1), 0x3f, 0); /* disable address mask 1-3 */ regmap_update_bits(adv7511->regmap_cec, ADV7511_REG_CEC_LOG_ADDR_MASK + offset, 0x70, 0x00); /* power down cec section */ regmap_update_bits(adv7511->regmap_cec, ADV7511_REG_CEC_CLK_DIV + offset, 0x03, 0x00); adv7511->cec_valid_addrs = 0; } adv7511->cec_enabled_adap = enable; return 0; } static int adv7511_cec_adap_log_addr(struct cec_adapter *adap, u8 addr) { struct adv7511 *adv7511 = cec_get_drvdata(adap); unsigned int offset = adv7511->reg_cec_offset; unsigned int i, free_idx = ADV7511_MAX_ADDRS; if (!adv7511->cec_enabled_adap) return addr == CEC_LOG_ADDR_INVALID ? 0 : -EIO; if (addr == CEC_LOG_ADDR_INVALID) { regmap_update_bits(adv7511->regmap_cec, ADV7511_REG_CEC_LOG_ADDR_MASK + offset, 0x70, 0); adv7511->cec_valid_addrs = 0; return 0; } for (i = 0; i < ADV7511_MAX_ADDRS; i++) { bool is_valid = adv7511->cec_valid_addrs & (1 << i); if (free_idx == ADV7511_MAX_ADDRS && !is_valid) free_idx = i; if (is_valid && adv7511->cec_addr[i] == addr) return 0; } if (i == ADV7511_MAX_ADDRS) { i = free_idx; if (i == ADV7511_MAX_ADDRS) return -ENXIO; } adv7511->cec_addr[i] = addr; adv7511->cec_valid_addrs |= 1 << i; switch (i) { case 0: /* enable address mask 0 */ regmap_update_bits(adv7511->regmap_cec, ADV7511_REG_CEC_LOG_ADDR_MASK + offset, 0x10, 0x10); /* set address for mask 0 */ regmap_update_bits(adv7511->regmap_cec, ADV7511_REG_CEC_LOG_ADDR_0_1 + offset, 0x0f, addr); break; case 1: /* enable address mask 1 */ regmap_update_bits(adv7511->regmap_cec, ADV7511_REG_CEC_LOG_ADDR_MASK + offset, 0x20, 0x20); /* set address for mask 1 */ regmap_update_bits(adv7511->regmap_cec, ADV7511_REG_CEC_LOG_ADDR_0_1 + offset, 0xf0, addr << 4); break; case 2: /* enable address mask 2 */ regmap_update_bits(adv7511->regmap_cec, ADV7511_REG_CEC_LOG_ADDR_MASK + offset, 0x40, 0x40); /* set address for mask 1 */ regmap_update_bits(adv7511->regmap_cec, ADV7511_REG_CEC_LOG_ADDR_2 + offset, 0x0f, addr); break; } return 0; } static int adv7511_cec_adap_transmit(struct cec_adapter *adap, u8 attempts, u32 signal_free_time, struct cec_msg *msg) { struct adv7511 *adv7511 = cec_get_drvdata(adap); unsigned int offset = adv7511->reg_cec_offset; u8 len = msg->len; unsigned int i; /* * The number of retries is the number of attempts - 1, but retry * at least once. It's not clear if a value of 0 is allowed, so * let's do at least one retry. */ regmap_update_bits(adv7511->regmap_cec, ADV7511_REG_CEC_TX_RETRY + offset, 0x70, max(1, attempts - 1) << 4); /* blocking, clear cec tx irq status */ regmap_update_bits(adv7511->regmap, ADV7511_REG_INT(1), 0x38, 0x38); /* write data */ for (i = 0; i < len; i++) regmap_write(adv7511->regmap_cec, i + ADV7511_REG_CEC_TX_FRAME_HDR + offset, msg->msg[i]); /* set length (data + header) */ regmap_write(adv7511->regmap_cec, ADV7511_REG_CEC_TX_FRAME_LEN + offset, len); /* start transmit, enable tx */ regmap_write(adv7511->regmap_cec, ADV7511_REG_CEC_TX_ENABLE + offset, 0x01); return 0; } static const struct cec_adap_ops adv7511_cec_adap_ops = { .adap_enable = adv7511_cec_adap_enable, .adap_log_addr = adv7511_cec_adap_log_addr, .adap_transmit = adv7511_cec_adap_transmit, }; static int adv7511_cec_parse_dt(struct device *dev, struct adv7511 *adv7511) { adv7511->cec_clk = devm_clk_get(dev, "cec"); if (IS_ERR(adv7511->cec_clk)) { int ret = PTR_ERR(adv7511->cec_clk); adv7511->cec_clk = NULL; return ret; } clk_prepare_enable(adv7511->cec_clk); adv7511->cec_clk_freq = clk_get_rate(adv7511->cec_clk); return 0; } int adv7511_cec_init(struct device *dev, struct adv7511 *adv7511) { unsigned int offset = adv7511->reg_cec_offset; int ret = adv7511_cec_parse_dt(dev, adv7511); if (ret) goto err_cec_parse_dt; adv7511->cec_adap = cec_allocate_adapter(&adv7511_cec_adap_ops, adv7511, dev_name(dev), CEC_CAP_DEFAULTS, ADV7511_MAX_ADDRS); if (IS_ERR(adv7511->cec_adap)) { ret = PTR_ERR(adv7511->cec_adap); goto err_cec_alloc; } regmap_write(adv7511->regmap, ADV7511_REG_CEC_CTRL, 0); /* cec soft reset */ regmap_write(adv7511->regmap_cec, ADV7511_REG_CEC_SOFT_RESET + offset, 0x01); regmap_write(adv7511->regmap_cec, ADV7511_REG_CEC_SOFT_RESET + offset, 0x00); /* non-legacy mode - use all three RX buffers */ regmap_write(adv7511->regmap_cec, ADV7511_REG_CEC_RX_BUFFERS + offset, 0x08); regmap_write(adv7511->regmap_cec, ADV7511_REG_CEC_CLK_DIV + offset, ((adv7511->cec_clk_freq / 750000) - 1) << 2); ret = cec_register_adapter(adv7511->cec_adap, dev); if (ret) goto err_cec_register; return 0; err_cec_register: cec_delete_adapter(adv7511->cec_adap); adv7511->cec_adap = NULL; err_cec_alloc: dev_info(dev, "Initializing CEC failed with error %d, disabling CEC\n", ret); err_cec_parse_dt: regmap_write(adv7511->regmap, ADV7511_REG_CEC_CTRL, ADV7511_CEC_CTRL_POWER_DOWN); return ret == -EPROBE_DEFER ? ret : 0; }
linux-master
drivers/gpu/drm/bridge/adv7511/adv7511_cec.c
// SPDX-License-Identifier: GPL-2.0-only /* * Analog Devices ADV7511 HDMI transmitter driver * * Copyright 2012 Analog Devices Inc. * Copyright (c) 2016, Linaro Limited */ #include <sound/core.h> #include <sound/hdmi-codec.h> #include <sound/pcm.h> #include <sound/soc.h> #include <linux/of_graph.h> #include "adv7511.h" static void adv7511_calc_cts_n(unsigned int f_tmds, unsigned int fs, unsigned int *cts, unsigned int *n) { switch (fs) { case 32000: case 48000: case 96000: case 192000: *n = fs * 128 / 1000; break; case 44100: case 88200: case 176400: *n = fs * 128 / 900; break; } *cts = ((f_tmds * *n) / (128 * fs)) * 1000; } static int adv7511_update_cts_n(struct adv7511 *adv7511) { unsigned int cts = 0; unsigned int n = 0; adv7511_calc_cts_n(adv7511->f_tmds, adv7511->f_audio, &cts, &n); regmap_write(adv7511->regmap, ADV7511_REG_N0, (n >> 16) & 0xf); regmap_write(adv7511->regmap, ADV7511_REG_N1, (n >> 8) & 0xff); regmap_write(adv7511->regmap, ADV7511_REG_N2, n & 0xff); regmap_write(adv7511->regmap, ADV7511_REG_CTS_MANUAL0, (cts >> 16) & 0xf); regmap_write(adv7511->regmap, ADV7511_REG_CTS_MANUAL1, (cts >> 8) & 0xff); regmap_write(adv7511->regmap, ADV7511_REG_CTS_MANUAL2, cts & 0xff); return 0; } static int adv7511_hdmi_hw_params(struct device *dev, void *data, struct hdmi_codec_daifmt *fmt, struct hdmi_codec_params *hparms) { struct adv7511 *adv7511 = dev_get_drvdata(dev); unsigned int audio_source, i2s_format = 0; unsigned int invert_clock; unsigned int rate; unsigned int len; switch (hparms->sample_rate) { case 32000: rate = ADV7511_SAMPLE_FREQ_32000; break; case 44100: rate = ADV7511_SAMPLE_FREQ_44100; break; case 48000: rate = ADV7511_SAMPLE_FREQ_48000; break; case 88200: rate = ADV7511_SAMPLE_FREQ_88200; break; case 96000: rate = ADV7511_SAMPLE_FREQ_96000; break; case 176400: rate = ADV7511_SAMPLE_FREQ_176400; break; case 192000: rate = ADV7511_SAMPLE_FREQ_192000; break; default: return -EINVAL; } switch (hparms->sample_width) { case 16: len = ADV7511_I2S_SAMPLE_LEN_16; break; case 18: len = ADV7511_I2S_SAMPLE_LEN_18; break; case 20: len = ADV7511_I2S_SAMPLE_LEN_20; break; case 32: if (fmt->bit_fmt != SNDRV_PCM_FORMAT_IEC958_SUBFRAME_LE) return -EINVAL; fallthrough; case 24: len = ADV7511_I2S_SAMPLE_LEN_24; break; default: return -EINVAL; } switch (fmt->fmt) { case HDMI_I2S: audio_source = ADV7511_AUDIO_SOURCE_I2S; i2s_format = ADV7511_I2S_FORMAT_I2S; if (fmt->bit_fmt == SNDRV_PCM_FORMAT_IEC958_SUBFRAME_LE) i2s_format = ADV7511_I2S_IEC958_DIRECT; break; case HDMI_RIGHT_J: audio_source = ADV7511_AUDIO_SOURCE_I2S; i2s_format = ADV7511_I2S_FORMAT_RIGHT_J; break; case HDMI_LEFT_J: audio_source = ADV7511_AUDIO_SOURCE_I2S; i2s_format = ADV7511_I2S_FORMAT_LEFT_J; break; case HDMI_SPDIF: audio_source = ADV7511_AUDIO_SOURCE_SPDIF; break; default: return -EINVAL; } invert_clock = fmt->bit_clk_inv; regmap_update_bits(adv7511->regmap, ADV7511_REG_AUDIO_SOURCE, 0x70, audio_source << 4); regmap_update_bits(adv7511->regmap, ADV7511_REG_AUDIO_CONFIG, BIT(6), invert_clock << 6); regmap_update_bits(adv7511->regmap, ADV7511_REG_I2S_CONFIG, 0x03, i2s_format); adv7511->audio_source = audio_source; adv7511->f_audio = hparms->sample_rate; adv7511_update_cts_n(adv7511); regmap_update_bits(adv7511->regmap, ADV7511_REG_AUDIO_CFG3, ADV7511_AUDIO_CFG3_LEN_MASK, len); regmap_update_bits(adv7511->regmap, ADV7511_REG_I2C_FREQ_ID_CFG, ADV7511_I2C_FREQ_ID_CFG_RATE_MASK, rate << 4); regmap_write(adv7511->regmap, 0x73, 0x1); return 0; } static int audio_startup(struct device *dev, void *data) { struct adv7511 *adv7511 = dev_get_drvdata(dev); regmap_update_bits(adv7511->regmap, ADV7511_REG_AUDIO_CONFIG, BIT(7), 0); /* hide Audio infoframe updates */ regmap_update_bits(adv7511->regmap, ADV7511_REG_INFOFRAME_UPDATE, BIT(5), BIT(5)); /* enable N/CTS, enable Audio sample packets */ regmap_update_bits(adv7511->regmap, ADV7511_REG_PACKET_ENABLE1, BIT(5), BIT(5)); /* enable N/CTS */ regmap_update_bits(adv7511->regmap, ADV7511_REG_PACKET_ENABLE1, BIT(6), BIT(6)); /* not copyrighted */ regmap_update_bits(adv7511->regmap, ADV7511_REG_AUDIO_CFG1, BIT(5), BIT(5)); /* enable audio infoframes */ regmap_update_bits(adv7511->regmap, ADV7511_REG_PACKET_ENABLE1, BIT(3), BIT(3)); /* AV mute disable */ regmap_update_bits(adv7511->regmap, ADV7511_REG_GC(0), BIT(7) | BIT(6), BIT(7)); /* use Audio infoframe updated info */ regmap_update_bits(adv7511->regmap, ADV7511_REG_GC(1), BIT(5), 0); /* enable SPDIF receiver */ if (adv7511->audio_source == ADV7511_AUDIO_SOURCE_SPDIF) regmap_update_bits(adv7511->regmap, ADV7511_REG_AUDIO_CONFIG, BIT(7), BIT(7)); return 0; } static void audio_shutdown(struct device *dev, void *data) { struct adv7511 *adv7511 = dev_get_drvdata(dev); if (adv7511->audio_source == ADV7511_AUDIO_SOURCE_SPDIF) regmap_update_bits(adv7511->regmap, ADV7511_REG_AUDIO_CONFIG, BIT(7), 0); } static int adv7511_hdmi_i2s_get_dai_id(struct snd_soc_component *component, struct device_node *endpoint) { struct of_endpoint of_ep; int ret; ret = of_graph_parse_endpoint(endpoint, &of_ep); if (ret < 0) return ret; /* * HDMI sound should be located as reg = <2> * Then, it is sound port 0 */ if (of_ep.port == 2) return 0; return -EINVAL; } static const struct hdmi_codec_ops adv7511_codec_ops = { .hw_params = adv7511_hdmi_hw_params, .audio_shutdown = audio_shutdown, .audio_startup = audio_startup, .get_dai_id = adv7511_hdmi_i2s_get_dai_id, }; static const struct hdmi_codec_pdata codec_data = { .ops = &adv7511_codec_ops, .max_i2s_channels = 2, .i2s = 1, .spdif = 1, }; int adv7511_audio_init(struct device *dev, struct adv7511 *adv7511) { adv7511->audio_pdev = platform_device_register_data(dev, HDMI_CODEC_DRV_NAME, PLATFORM_DEVID_AUTO, &codec_data, sizeof(codec_data)); return PTR_ERR_OR_ZERO(adv7511->audio_pdev); } void adv7511_audio_exit(struct adv7511 *adv7511) { if (adv7511->audio_pdev) { platform_device_unregister(adv7511->audio_pdev); adv7511->audio_pdev = NULL; } }
linux-master
drivers/gpu/drm/bridge/adv7511/adv7511_audio.c
// SPDX-License-Identifier: GPL-2.0-only /* * Analog Devices ADV7511 HDMI transmitter driver * * Copyright 2012 Analog Devices Inc. */ #include <linux/clk.h> #include <linux/device.h> #include <linux/gpio/consumer.h> #include <linux/module.h> #include <linux/of.h> #include <linux/slab.h> #include <media/cec.h> #include <drm/drm_atomic.h> #include <drm/drm_atomic_helper.h> #include <drm/drm_edid.h> #include <drm/drm_print.h> #include <drm/drm_probe_helper.h> #include "adv7511.h" /* ADI recommended values for proper operation. */ static const struct reg_sequence adv7511_fixed_registers[] = { { 0x98, 0x03 }, { 0x9a, 0xe0 }, { 0x9c, 0x30 }, { 0x9d, 0x61 }, { 0xa2, 0xa4 }, { 0xa3, 0xa4 }, { 0xe0, 0xd0 }, { 0xf9, 0x00 }, { 0x55, 0x02 }, }; /* ----------------------------------------------------------------------------- * Register access */ static const uint8_t adv7511_register_defaults[] = { 0x12, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 00 */ 0x00, 0x00, 0x01, 0x0e, 0xbc, 0x18, 0x01, 0x13, 0x25, 0x37, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 10 */ 0x46, 0x62, 0x04, 0xa8, 0x00, 0x00, 0x1c, 0x84, 0x1c, 0xbf, 0x04, 0xa8, 0x1e, 0x70, 0x02, 0x1e, /* 20 */ 0x00, 0x00, 0x04, 0xa8, 0x08, 0x12, 0x1b, 0xac, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 30 */ 0x00, 0x00, 0x00, 0x80, 0x00, 0x00, 0x00, 0xb0, 0x00, 0x50, 0x90, 0x7e, 0x79, 0x70, 0x00, 0x00, /* 40 */ 0x00, 0xa8, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0x0d, 0x00, 0x00, 0x00, 0x00, /* 50 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 60 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x0a, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 70 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 80 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xc0, 0x00, 0x00, 0x00, /* 90 */ 0x0b, 0x02, 0x00, 0x18, 0x5a, 0x60, 0x00, 0x00, 0x00, 0x00, 0x80, 0x80, 0x08, 0x04, 0x00, 0x00, /* a0 */ 0x00, 0x00, 0x00, 0x40, 0x00, 0x00, 0x40, 0x14, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* b0 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* c0 */ 0x00, 0x03, 0x00, 0x00, 0x02, 0x00, 0x01, 0x04, 0x30, 0xff, 0x80, 0x80, 0x80, 0x00, 0x00, 0x00, /* d0 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x10, 0x01, 0x80, 0x75, 0x00, 0x00, 0x60, 0x00, 0x00, 0x00, /* e0 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x75, 0x11, 0x00, /* f0 */ 0x00, 0x7c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, }; static bool adv7511_register_volatile(struct device *dev, unsigned int reg) { switch (reg) { case ADV7511_REG_CHIP_REVISION: case ADV7511_REG_SPDIF_FREQ: case ADV7511_REG_CTS_AUTOMATIC1: case ADV7511_REG_CTS_AUTOMATIC2: case ADV7511_REG_VIC_DETECTED: case ADV7511_REG_VIC_SEND: case ADV7511_REG_AUX_VIC_DETECTED: case ADV7511_REG_STATUS: case ADV7511_REG_GC(1): case ADV7511_REG_INT(0): case ADV7511_REG_INT(1): case ADV7511_REG_PLL_STATUS: case ADV7511_REG_AN(0): case ADV7511_REG_AN(1): case ADV7511_REG_AN(2): case ADV7511_REG_AN(3): case ADV7511_REG_AN(4): case ADV7511_REG_AN(5): case ADV7511_REG_AN(6): case ADV7511_REG_AN(7): case ADV7511_REG_HDCP_STATUS: case ADV7511_REG_BCAPS: case ADV7511_REG_BKSV(0): case ADV7511_REG_BKSV(1): case ADV7511_REG_BKSV(2): case ADV7511_REG_BKSV(3): case ADV7511_REG_BKSV(4): case ADV7511_REG_DDC_STATUS: case ADV7511_REG_EDID_READ_CTRL: case ADV7511_REG_BSTATUS(0): case ADV7511_REG_BSTATUS(1): case ADV7511_REG_CHIP_ID_HIGH: case ADV7511_REG_CHIP_ID_LOW: return true; } return false; } static const struct regmap_config adv7511_regmap_config = { .reg_bits = 8, .val_bits = 8, .max_register = 0xff, .cache_type = REGCACHE_RBTREE, .reg_defaults_raw = adv7511_register_defaults, .num_reg_defaults_raw = ARRAY_SIZE(adv7511_register_defaults), .volatile_reg = adv7511_register_volatile, }; /* ----------------------------------------------------------------------------- * Hardware configuration */ static void adv7511_set_colormap(struct adv7511 *adv7511, bool enable, const uint16_t *coeff, unsigned int scaling_factor) { unsigned int i; regmap_update_bits(adv7511->regmap, ADV7511_REG_CSC_UPPER(1), ADV7511_CSC_UPDATE_MODE, ADV7511_CSC_UPDATE_MODE); if (enable) { for (i = 0; i < 12; ++i) { regmap_update_bits(adv7511->regmap, ADV7511_REG_CSC_UPPER(i), 0x1f, coeff[i] >> 8); regmap_write(adv7511->regmap, ADV7511_REG_CSC_LOWER(i), coeff[i] & 0xff); } } if (enable) regmap_update_bits(adv7511->regmap, ADV7511_REG_CSC_UPPER(0), 0xe0, 0x80 | (scaling_factor << 5)); else regmap_update_bits(adv7511->regmap, ADV7511_REG_CSC_UPPER(0), 0x80, 0x00); regmap_update_bits(adv7511->regmap, ADV7511_REG_CSC_UPPER(1), ADV7511_CSC_UPDATE_MODE, 0); } static int adv7511_packet_enable(struct adv7511 *adv7511, unsigned int packet) { if (packet & 0xff) regmap_update_bits(adv7511->regmap, ADV7511_REG_PACKET_ENABLE0, packet, 0xff); if (packet & 0xff00) { packet >>= 8; regmap_update_bits(adv7511->regmap, ADV7511_REG_PACKET_ENABLE1, packet, 0xff); } return 0; } static int adv7511_packet_disable(struct adv7511 *adv7511, unsigned int packet) { if (packet & 0xff) regmap_update_bits(adv7511->regmap, ADV7511_REG_PACKET_ENABLE0, packet, 0x00); if (packet & 0xff00) { packet >>= 8; regmap_update_bits(adv7511->regmap, ADV7511_REG_PACKET_ENABLE1, packet, 0x00); } return 0; } /* Coefficients for adv7511 color space conversion */ static const uint16_t adv7511_csc_ycbcr_to_rgb[] = { 0x0734, 0x04ad, 0x0000, 0x1c1b, 0x1ddc, 0x04ad, 0x1f24, 0x0135, 0x0000, 0x04ad, 0x087c, 0x1b77, }; static void adv7511_set_config_csc(struct adv7511 *adv7511, struct drm_connector *connector, bool rgb, bool hdmi_mode) { struct adv7511_video_config config; bool output_format_422, output_format_ycbcr; unsigned int mode; uint8_t infoframe[17]; config.hdmi_mode = hdmi_mode; hdmi_avi_infoframe_init(&config.avi_infoframe); config.avi_infoframe.scan_mode = HDMI_SCAN_MODE_UNDERSCAN; if (rgb) { config.csc_enable = false; config.avi_infoframe.colorspace = HDMI_COLORSPACE_RGB; } else { config.csc_scaling_factor = ADV7511_CSC_SCALING_4; config.csc_coefficents = adv7511_csc_ycbcr_to_rgb; if ((connector->display_info.color_formats & DRM_COLOR_FORMAT_YCBCR422) && config.hdmi_mode) { config.csc_enable = false; config.avi_infoframe.colorspace = HDMI_COLORSPACE_YUV422; } else { config.csc_enable = true; config.avi_infoframe.colorspace = HDMI_COLORSPACE_RGB; } } if (config.hdmi_mode) { mode = ADV7511_HDMI_CFG_MODE_HDMI; switch (config.avi_infoframe.colorspace) { case HDMI_COLORSPACE_YUV444: output_format_422 = false; output_format_ycbcr = true; break; case HDMI_COLORSPACE_YUV422: output_format_422 = true; output_format_ycbcr = true; break; default: output_format_422 = false; output_format_ycbcr = false; break; } } else { mode = ADV7511_HDMI_CFG_MODE_DVI; output_format_422 = false; output_format_ycbcr = false; } adv7511_packet_disable(adv7511, ADV7511_PACKET_ENABLE_AVI_INFOFRAME); adv7511_set_colormap(adv7511, config.csc_enable, config.csc_coefficents, config.csc_scaling_factor); regmap_update_bits(adv7511->regmap, ADV7511_REG_VIDEO_INPUT_CFG1, 0x81, (output_format_422 << 7) | output_format_ycbcr); regmap_update_bits(adv7511->regmap, ADV7511_REG_HDCP_HDMI_CFG, ADV7511_HDMI_CFG_MODE_MASK, mode); hdmi_avi_infoframe_pack(&config.avi_infoframe, infoframe, sizeof(infoframe)); /* The AVI infoframe id is not configurable */ regmap_bulk_write(adv7511->regmap, ADV7511_REG_AVI_INFOFRAME_VERSION, infoframe + 1, sizeof(infoframe) - 1); adv7511_packet_enable(adv7511, ADV7511_PACKET_ENABLE_AVI_INFOFRAME); } static void adv7511_set_link_config(struct adv7511 *adv7511, const struct adv7511_link_config *config) { /* * The input style values documented in the datasheet don't match the * hardware register field values :-( */ static const unsigned int input_styles[4] = { 0, 2, 1, 3 }; unsigned int clock_delay; unsigned int color_depth; unsigned int input_id; clock_delay = (config->clock_delay + 1200) / 400; color_depth = config->input_color_depth == 8 ? 3 : (config->input_color_depth == 10 ? 1 : 2); /* TODO Support input ID 6 */ if (config->input_colorspace != HDMI_COLORSPACE_YUV422) input_id = config->input_clock == ADV7511_INPUT_CLOCK_DDR ? 5 : 0; else if (config->input_clock == ADV7511_INPUT_CLOCK_DDR) input_id = config->embedded_sync ? 8 : 7; else if (config->input_clock == ADV7511_INPUT_CLOCK_2X) input_id = config->embedded_sync ? 4 : 3; else input_id = config->embedded_sync ? 2 : 1; regmap_update_bits(adv7511->regmap, ADV7511_REG_I2C_FREQ_ID_CFG, 0xf, input_id); regmap_update_bits(adv7511->regmap, ADV7511_REG_VIDEO_INPUT_CFG1, 0x7e, (color_depth << 4) | (input_styles[config->input_style] << 2)); regmap_write(adv7511->regmap, ADV7511_REG_VIDEO_INPUT_CFG2, config->input_justification << 3); regmap_write(adv7511->regmap, ADV7511_REG_TIMING_GEN_SEQ, config->sync_pulse << 2); regmap_write(adv7511->regmap, 0xba, clock_delay << 5); adv7511->embedded_sync = config->embedded_sync; adv7511->hsync_polarity = config->hsync_polarity; adv7511->vsync_polarity = config->vsync_polarity; adv7511->rgb = config->input_colorspace == HDMI_COLORSPACE_RGB; } static void __adv7511_power_on(struct adv7511 *adv7511) { adv7511->current_edid_segment = -1; regmap_update_bits(adv7511->regmap, ADV7511_REG_POWER, ADV7511_POWER_POWER_DOWN, 0); if (adv7511->i2c_main->irq) { /* * Documentation says the INT_ENABLE registers are reset in * POWER_DOWN mode. My 7511w preserved the bits, however. * Still, let's be safe and stick to the documentation. */ regmap_write(adv7511->regmap, ADV7511_REG_INT_ENABLE(0), ADV7511_INT0_EDID_READY | ADV7511_INT0_HPD); regmap_update_bits(adv7511->regmap, ADV7511_REG_INT_ENABLE(1), ADV7511_INT1_DDC_ERROR, ADV7511_INT1_DDC_ERROR); } /* * Per spec it is allowed to pulse the HPD signal to indicate that the * EDID information has changed. Some monitors do this when they wakeup * from standby or are enabled. When the HPD goes low the adv7511 is * reset and the outputs are disabled which might cause the monitor to * go to standby again. To avoid this we ignore the HPD pin for the * first few seconds after enabling the output. On the other hand * adv7535 require to enable HPD Override bit for proper HPD. */ if (adv7511->type == ADV7535) regmap_update_bits(adv7511->regmap, ADV7511_REG_POWER2, ADV7535_REG_POWER2_HPD_OVERRIDE, ADV7535_REG_POWER2_HPD_OVERRIDE); else regmap_update_bits(adv7511->regmap, ADV7511_REG_POWER2, ADV7511_REG_POWER2_HPD_SRC_MASK, ADV7511_REG_POWER2_HPD_SRC_NONE); } static void adv7511_power_on(struct adv7511 *adv7511) { __adv7511_power_on(adv7511); /* * Most of the registers are reset during power down or when HPD is low. */ regcache_sync(adv7511->regmap); if (adv7511->type == ADV7533 || adv7511->type == ADV7535) adv7533_dsi_power_on(adv7511); adv7511->powered = true; } static void __adv7511_power_off(struct adv7511 *adv7511) { /* TODO: setup additional power down modes */ if (adv7511->type == ADV7535) regmap_update_bits(adv7511->regmap, ADV7511_REG_POWER2, ADV7535_REG_POWER2_HPD_OVERRIDE, 0); regmap_update_bits(adv7511->regmap, ADV7511_REG_POWER, ADV7511_POWER_POWER_DOWN, ADV7511_POWER_POWER_DOWN); regmap_update_bits(adv7511->regmap, ADV7511_REG_INT_ENABLE(1), ADV7511_INT1_DDC_ERROR, 0); regcache_mark_dirty(adv7511->regmap); } static void adv7511_power_off(struct adv7511 *adv7511) { __adv7511_power_off(adv7511); if (adv7511->type == ADV7533 || adv7511->type == ADV7535) adv7533_dsi_power_off(adv7511); adv7511->powered = false; } /* ----------------------------------------------------------------------------- * Interrupt and hotplug detection */ static bool adv7511_hpd(struct adv7511 *adv7511) { unsigned int irq0; int ret; ret = regmap_read(adv7511->regmap, ADV7511_REG_INT(0), &irq0); if (ret < 0) return false; if (irq0 & ADV7511_INT0_HPD) { regmap_write(adv7511->regmap, ADV7511_REG_INT(0), ADV7511_INT0_HPD); return true; } return false; } static void adv7511_hpd_work(struct work_struct *work) { struct adv7511 *adv7511 = container_of(work, struct adv7511, hpd_work); enum drm_connector_status status; unsigned int val; int ret; ret = regmap_read(adv7511->regmap, ADV7511_REG_STATUS, &val); if (ret < 0) status = connector_status_disconnected; else if (val & ADV7511_STATUS_HPD) status = connector_status_connected; else status = connector_status_disconnected; /* * The bridge resets its registers on unplug. So when we get a plug * event and we're already supposed to be powered, cycle the bridge to * restore its state. */ if (status == connector_status_connected && adv7511->connector.status == connector_status_disconnected && adv7511->powered) { regcache_mark_dirty(adv7511->regmap); adv7511_power_on(adv7511); } if (adv7511->connector.status != status) { adv7511->connector.status = status; if (adv7511->connector.dev) { if (status == connector_status_disconnected) cec_phys_addr_invalidate(adv7511->cec_adap); drm_kms_helper_hotplug_event(adv7511->connector.dev); } else { drm_bridge_hpd_notify(&adv7511->bridge, status); } } } static int adv7511_irq_process(struct adv7511 *adv7511, bool process_hpd) { unsigned int irq0, irq1; int ret; ret = regmap_read(adv7511->regmap, ADV7511_REG_INT(0), &irq0); if (ret < 0) return ret; ret = regmap_read(adv7511->regmap, ADV7511_REG_INT(1), &irq1); if (ret < 0) return ret; regmap_write(adv7511->regmap, ADV7511_REG_INT(0), irq0); regmap_write(adv7511->regmap, ADV7511_REG_INT(1), irq1); if (process_hpd && irq0 & ADV7511_INT0_HPD && adv7511->bridge.encoder) schedule_work(&adv7511->hpd_work); if (irq0 & ADV7511_INT0_EDID_READY || irq1 & ADV7511_INT1_DDC_ERROR) { adv7511->edid_read = true; if (adv7511->i2c_main->irq) wake_up_all(&adv7511->wq); } #ifdef CONFIG_DRM_I2C_ADV7511_CEC adv7511_cec_irq_process(adv7511, irq1); #endif return 0; } static irqreturn_t adv7511_irq_handler(int irq, void *devid) { struct adv7511 *adv7511 = devid; int ret; ret = adv7511_irq_process(adv7511, true); return ret < 0 ? IRQ_NONE : IRQ_HANDLED; } /* ----------------------------------------------------------------------------- * EDID retrieval */ static int adv7511_wait_for_edid(struct adv7511 *adv7511, int timeout) { int ret; if (adv7511->i2c_main->irq) { ret = wait_event_interruptible_timeout(adv7511->wq, adv7511->edid_read, msecs_to_jiffies(timeout)); } else { for (; timeout > 0; timeout -= 25) { ret = adv7511_irq_process(adv7511, false); if (ret < 0) break; if (adv7511->edid_read) break; msleep(25); } } return adv7511->edid_read ? 0 : -EIO; } static int adv7511_get_edid_block(void *data, u8 *buf, unsigned int block, size_t len) { struct adv7511 *adv7511 = data; struct i2c_msg xfer[2]; uint8_t offset; unsigned int i; int ret; if (len > 128) return -EINVAL; if (adv7511->current_edid_segment != block / 2) { unsigned int status; ret = regmap_read(adv7511->regmap, ADV7511_REG_DDC_STATUS, &status); if (ret < 0) return ret; if (status != 2) { adv7511->edid_read = false; regmap_write(adv7511->regmap, ADV7511_REG_EDID_SEGMENT, block); ret = adv7511_wait_for_edid(adv7511, 200); if (ret < 0) return ret; } /* Break this apart, hopefully more I2C controllers will * support 64 byte transfers than 256 byte transfers */ xfer[0].addr = adv7511->i2c_edid->addr; xfer[0].flags = 0; xfer[0].len = 1; xfer[0].buf = &offset; xfer[1].addr = adv7511->i2c_edid->addr; xfer[1].flags = I2C_M_RD; xfer[1].len = 64; xfer[1].buf = adv7511->edid_buf; offset = 0; for (i = 0; i < 4; ++i) { ret = i2c_transfer(adv7511->i2c_edid->adapter, xfer, ARRAY_SIZE(xfer)); if (ret < 0) return ret; else if (ret != 2) return -EIO; xfer[1].buf += 64; offset += 64; } adv7511->current_edid_segment = block / 2; } if (block % 2 == 0) memcpy(buf, adv7511->edid_buf, len); else memcpy(buf, adv7511->edid_buf + 128, len); return 0; } /* ----------------------------------------------------------------------------- * ADV75xx helpers */ static struct edid *adv7511_get_edid(struct adv7511 *adv7511, struct drm_connector *connector) { struct edid *edid; /* Reading the EDID only works if the device is powered */ if (!adv7511->powered) { unsigned int edid_i2c_addr = (adv7511->i2c_edid->addr << 1); __adv7511_power_on(adv7511); /* Reset the EDID_I2C_ADDR register as it might be cleared */ regmap_write(adv7511->regmap, ADV7511_REG_EDID_I2C_ADDR, edid_i2c_addr); } edid = drm_do_get_edid(connector, adv7511_get_edid_block, adv7511); if (!adv7511->powered) __adv7511_power_off(adv7511); adv7511_set_config_csc(adv7511, connector, adv7511->rgb, drm_detect_hdmi_monitor(edid)); cec_s_phys_addr_from_edid(adv7511->cec_adap, edid); return edid; } static int adv7511_get_modes(struct adv7511 *adv7511, struct drm_connector *connector) { struct edid *edid; unsigned int count; edid = adv7511_get_edid(adv7511, connector); drm_connector_update_edid_property(connector, edid); count = drm_add_edid_modes(connector, edid); kfree(edid); return count; } static enum drm_connector_status adv7511_detect(struct adv7511 *adv7511, struct drm_connector *connector) { enum drm_connector_status status; unsigned int val; bool hpd; int ret; ret = regmap_read(adv7511->regmap, ADV7511_REG_STATUS, &val); if (ret < 0) return connector_status_disconnected; if (val & ADV7511_STATUS_HPD) status = connector_status_connected; else status = connector_status_disconnected; hpd = adv7511_hpd(adv7511); /* The chip resets itself when the cable is disconnected, so in case * there is a pending HPD interrupt and the cable is connected there was * at least one transition from disconnected to connected and the chip * has to be reinitialized. */ if (status == connector_status_connected && hpd && adv7511->powered) { regcache_mark_dirty(adv7511->regmap); adv7511_power_on(adv7511); if (connector) adv7511_get_modes(adv7511, connector); if (adv7511->status == connector_status_connected) status = connector_status_disconnected; } else { /* Renable HPD sensing */ if (adv7511->type == ADV7535) regmap_update_bits(adv7511->regmap, ADV7511_REG_POWER2, ADV7535_REG_POWER2_HPD_OVERRIDE, ADV7535_REG_POWER2_HPD_OVERRIDE); else regmap_update_bits(adv7511->regmap, ADV7511_REG_POWER2, ADV7511_REG_POWER2_HPD_SRC_MASK, ADV7511_REG_POWER2_HPD_SRC_BOTH); } adv7511->status = status; return status; } static enum drm_mode_status adv7511_mode_valid(struct adv7511 *adv7511, const struct drm_display_mode *mode) { if (mode->clock > 165000) return MODE_CLOCK_HIGH; return MODE_OK; } static void adv7511_mode_set(struct adv7511 *adv7511, const struct drm_display_mode *mode, const struct drm_display_mode *adj_mode) { unsigned int low_refresh_rate; unsigned int hsync_polarity = 0; unsigned int vsync_polarity = 0; if (adv7511->embedded_sync) { unsigned int hsync_offset, hsync_len; unsigned int vsync_offset, vsync_len; hsync_offset = adj_mode->crtc_hsync_start - adj_mode->crtc_hdisplay; vsync_offset = adj_mode->crtc_vsync_start - adj_mode->crtc_vdisplay; hsync_len = adj_mode->crtc_hsync_end - adj_mode->crtc_hsync_start; vsync_len = adj_mode->crtc_vsync_end - adj_mode->crtc_vsync_start; /* The hardware vsync generator has a off-by-one bug */ vsync_offset += 1; regmap_write(adv7511->regmap, ADV7511_REG_HSYNC_PLACEMENT_MSB, ((hsync_offset >> 10) & 0x7) << 5); regmap_write(adv7511->regmap, ADV7511_REG_SYNC_DECODER(0), (hsync_offset >> 2) & 0xff); regmap_write(adv7511->regmap, ADV7511_REG_SYNC_DECODER(1), ((hsync_offset & 0x3) << 6) | ((hsync_len >> 4) & 0x3f)); regmap_write(adv7511->regmap, ADV7511_REG_SYNC_DECODER(2), ((hsync_len & 0xf) << 4) | ((vsync_offset >> 6) & 0xf)); regmap_write(adv7511->regmap, ADV7511_REG_SYNC_DECODER(3), ((vsync_offset & 0x3f) << 2) | ((vsync_len >> 8) & 0x3)); regmap_write(adv7511->regmap, ADV7511_REG_SYNC_DECODER(4), vsync_len & 0xff); hsync_polarity = !(adj_mode->flags & DRM_MODE_FLAG_PHSYNC); vsync_polarity = !(adj_mode->flags & DRM_MODE_FLAG_PVSYNC); } else { enum adv7511_sync_polarity mode_hsync_polarity; enum adv7511_sync_polarity mode_vsync_polarity; /** * If the input signal is always low or always high we want to * invert or let it passthrough depending on the polarity of the * current mode. **/ if (adj_mode->flags & DRM_MODE_FLAG_NHSYNC) mode_hsync_polarity = ADV7511_SYNC_POLARITY_LOW; else mode_hsync_polarity = ADV7511_SYNC_POLARITY_HIGH; if (adj_mode->flags & DRM_MODE_FLAG_NVSYNC) mode_vsync_polarity = ADV7511_SYNC_POLARITY_LOW; else mode_vsync_polarity = ADV7511_SYNC_POLARITY_HIGH; if (adv7511->hsync_polarity != mode_hsync_polarity && adv7511->hsync_polarity != ADV7511_SYNC_POLARITY_PASSTHROUGH) hsync_polarity = 1; if (adv7511->vsync_polarity != mode_vsync_polarity && adv7511->vsync_polarity != ADV7511_SYNC_POLARITY_PASSTHROUGH) vsync_polarity = 1; } if (drm_mode_vrefresh(mode) <= 24) low_refresh_rate = ADV7511_LOW_REFRESH_RATE_24HZ; else if (drm_mode_vrefresh(mode) <= 25) low_refresh_rate = ADV7511_LOW_REFRESH_RATE_25HZ; else if (drm_mode_vrefresh(mode) <= 30) low_refresh_rate = ADV7511_LOW_REFRESH_RATE_30HZ; else low_refresh_rate = ADV7511_LOW_REFRESH_RATE_NONE; if (adv7511->type == ADV7511) regmap_update_bits(adv7511->regmap, 0xfb, 0x6, low_refresh_rate << 1); else regmap_update_bits(adv7511->regmap, 0x4a, 0xc, low_refresh_rate << 2); regmap_update_bits(adv7511->regmap, 0x17, 0x60, (vsync_polarity << 6) | (hsync_polarity << 5)); drm_mode_copy(&adv7511->curr_mode, adj_mode); /* * TODO Test first order 4:2:2 to 4:4:4 up conversion method, which is * supposed to give better results. */ adv7511->f_tmds = mode->clock; } /* ----------------------------------------------------------------------------- * DRM Connector Operations */ static struct adv7511 *connector_to_adv7511(struct drm_connector *connector) { return container_of(connector, struct adv7511, connector); } static int adv7511_connector_get_modes(struct drm_connector *connector) { struct adv7511 *adv = connector_to_adv7511(connector); return adv7511_get_modes(adv, connector); } static enum drm_mode_status adv7511_connector_mode_valid(struct drm_connector *connector, struct drm_display_mode *mode) { struct adv7511 *adv = connector_to_adv7511(connector); return adv7511_mode_valid(adv, mode); } static struct drm_connector_helper_funcs adv7511_connector_helper_funcs = { .get_modes = adv7511_connector_get_modes, .mode_valid = adv7511_connector_mode_valid, }; static enum drm_connector_status adv7511_connector_detect(struct drm_connector *connector, bool force) { struct adv7511 *adv = connector_to_adv7511(connector); return adv7511_detect(adv, connector); } static const struct drm_connector_funcs adv7511_connector_funcs = { .fill_modes = drm_helper_probe_single_connector_modes, .detect = adv7511_connector_detect, .destroy = drm_connector_cleanup, .reset = drm_atomic_helper_connector_reset, .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state, .atomic_destroy_state = drm_atomic_helper_connector_destroy_state, }; static int adv7511_connector_init(struct adv7511 *adv) { struct drm_bridge *bridge = &adv->bridge; int ret; if (!bridge->encoder) { DRM_ERROR("Parent encoder object not found"); return -ENODEV; } if (adv->i2c_main->irq) adv->connector.polled = DRM_CONNECTOR_POLL_HPD; else adv->connector.polled = DRM_CONNECTOR_POLL_CONNECT | DRM_CONNECTOR_POLL_DISCONNECT; ret = drm_connector_init(bridge->dev, &adv->connector, &adv7511_connector_funcs, DRM_MODE_CONNECTOR_HDMIA); if (ret < 0) { DRM_ERROR("Failed to initialize connector with drm\n"); return ret; } drm_connector_helper_add(&adv->connector, &adv7511_connector_helper_funcs); drm_connector_attach_encoder(&adv->connector, bridge->encoder); return 0; } /* ----------------------------------------------------------------------------- * DRM Bridge Operations */ static struct adv7511 *bridge_to_adv7511(struct drm_bridge *bridge) { return container_of(bridge, struct adv7511, bridge); } static void adv7511_bridge_enable(struct drm_bridge *bridge) { struct adv7511 *adv = bridge_to_adv7511(bridge); adv7511_power_on(adv); } static void adv7511_bridge_disable(struct drm_bridge *bridge) { struct adv7511 *adv = bridge_to_adv7511(bridge); adv7511_power_off(adv); } static void adv7511_bridge_mode_set(struct drm_bridge *bridge, const struct drm_display_mode *mode, const struct drm_display_mode *adj_mode) { struct adv7511 *adv = bridge_to_adv7511(bridge); adv7511_mode_set(adv, mode, adj_mode); } static enum drm_mode_status adv7511_bridge_mode_valid(struct drm_bridge *bridge, const struct drm_display_info *info, const struct drm_display_mode *mode) { struct adv7511 *adv = bridge_to_adv7511(bridge); if (adv->type == ADV7533 || adv->type == ADV7535) return adv7533_mode_valid(adv, mode); else return adv7511_mode_valid(adv, mode); } static int adv7511_bridge_attach(struct drm_bridge *bridge, enum drm_bridge_attach_flags flags) { struct adv7511 *adv = bridge_to_adv7511(bridge); int ret = 0; if (!(flags & DRM_BRIDGE_ATTACH_NO_CONNECTOR)) { ret = adv7511_connector_init(adv); if (ret < 0) return ret; } if (adv->i2c_main->irq) regmap_write(adv->regmap, ADV7511_REG_INT_ENABLE(0), ADV7511_INT0_HPD); return ret; } static enum drm_connector_status adv7511_bridge_detect(struct drm_bridge *bridge) { struct adv7511 *adv = bridge_to_adv7511(bridge); return adv7511_detect(adv, NULL); } static struct edid *adv7511_bridge_get_edid(struct drm_bridge *bridge, struct drm_connector *connector) { struct adv7511 *adv = bridge_to_adv7511(bridge); return adv7511_get_edid(adv, connector); } static void adv7511_bridge_hpd_notify(struct drm_bridge *bridge, enum drm_connector_status status) { struct adv7511 *adv = bridge_to_adv7511(bridge); if (status == connector_status_disconnected) cec_phys_addr_invalidate(adv->cec_adap); } static const struct drm_bridge_funcs adv7511_bridge_funcs = { .enable = adv7511_bridge_enable, .disable = adv7511_bridge_disable, .mode_set = adv7511_bridge_mode_set, .mode_valid = adv7511_bridge_mode_valid, .attach = adv7511_bridge_attach, .detect = adv7511_bridge_detect, .get_edid = adv7511_bridge_get_edid, .hpd_notify = adv7511_bridge_hpd_notify, }; /* ----------------------------------------------------------------------------- * Probe & remove */ static const char * const adv7511_supply_names[] = { "avdd", "dvdd", "pvdd", "bgvdd", "dvdd-3v", }; static const char * const adv7533_supply_names[] = { "avdd", "dvdd", "pvdd", "a2vdd", "v3p3", "v1p2", }; static int adv7511_init_regulators(struct adv7511 *adv) { struct device *dev = &adv->i2c_main->dev; const char * const *supply_names; unsigned int i; int ret; if (adv->type == ADV7511) { supply_names = adv7511_supply_names; adv->num_supplies = ARRAY_SIZE(adv7511_supply_names); } else { supply_names = adv7533_supply_names; adv->num_supplies = ARRAY_SIZE(adv7533_supply_names); } adv->supplies = devm_kcalloc(dev, adv->num_supplies, sizeof(*adv->supplies), GFP_KERNEL); if (!adv->supplies) return -ENOMEM; for (i = 0; i < adv->num_supplies; i++) adv->supplies[i].supply = supply_names[i]; ret = devm_regulator_bulk_get(dev, adv->num_supplies, adv->supplies); if (ret) return ret; return regulator_bulk_enable(adv->num_supplies, adv->supplies); } static void adv7511_uninit_regulators(struct adv7511 *adv) { regulator_bulk_disable(adv->num_supplies, adv->supplies); } static bool adv7511_cec_register_volatile(struct device *dev, unsigned int reg) { struct i2c_client *i2c = to_i2c_client(dev); struct adv7511 *adv7511 = i2c_get_clientdata(i2c); reg -= adv7511->reg_cec_offset; switch (reg) { case ADV7511_REG_CEC_RX1_FRAME_HDR: case ADV7511_REG_CEC_RX1_FRAME_DATA0 ... ADV7511_REG_CEC_RX1_FRAME_DATA0 + 14: case ADV7511_REG_CEC_RX1_FRAME_LEN: case ADV7511_REG_CEC_RX2_FRAME_HDR: case ADV7511_REG_CEC_RX2_FRAME_DATA0 ... ADV7511_REG_CEC_RX2_FRAME_DATA0 + 14: case ADV7511_REG_CEC_RX2_FRAME_LEN: case ADV7511_REG_CEC_RX3_FRAME_HDR: case ADV7511_REG_CEC_RX3_FRAME_DATA0 ... ADV7511_REG_CEC_RX3_FRAME_DATA0 + 14: case ADV7511_REG_CEC_RX3_FRAME_LEN: case ADV7511_REG_CEC_RX_STATUS: case ADV7511_REG_CEC_RX_BUFFERS: case ADV7511_REG_CEC_TX_LOW_DRV_CNT: return true; } return false; } static const struct regmap_config adv7511_cec_regmap_config = { .reg_bits = 8, .val_bits = 8, .max_register = 0xff, .cache_type = REGCACHE_RBTREE, .volatile_reg = adv7511_cec_register_volatile, }; static int adv7511_init_cec_regmap(struct adv7511 *adv) { int ret; adv->i2c_cec = i2c_new_ancillary_device(adv->i2c_main, "cec", ADV7511_CEC_I2C_ADDR_DEFAULT); if (IS_ERR(adv->i2c_cec)) return PTR_ERR(adv->i2c_cec); regmap_write(adv->regmap, ADV7511_REG_CEC_I2C_ADDR, adv->i2c_cec->addr << 1); i2c_set_clientdata(adv->i2c_cec, adv); adv->regmap_cec = devm_regmap_init_i2c(adv->i2c_cec, &adv7511_cec_regmap_config); if (IS_ERR(adv->regmap_cec)) { ret = PTR_ERR(adv->regmap_cec); goto err; } if (adv->type == ADV7533 || adv->type == ADV7535) { ret = adv7533_patch_cec_registers(adv); if (ret) goto err; adv->reg_cec_offset = ADV7533_REG_CEC_OFFSET; } return 0; err: i2c_unregister_device(adv->i2c_cec); return ret; } static int adv7511_parse_dt(struct device_node *np, struct adv7511_link_config *config) { const char *str; int ret; of_property_read_u32(np, "adi,input-depth", &config->input_color_depth); if (config->input_color_depth != 8 && config->input_color_depth != 10 && config->input_color_depth != 12) return -EINVAL; ret = of_property_read_string(np, "adi,input-colorspace", &str); if (ret < 0) return ret; if (!strcmp(str, "rgb")) config->input_colorspace = HDMI_COLORSPACE_RGB; else if (!strcmp(str, "yuv422")) config->input_colorspace = HDMI_COLORSPACE_YUV422; else if (!strcmp(str, "yuv444")) config->input_colorspace = HDMI_COLORSPACE_YUV444; else return -EINVAL; ret = of_property_read_string(np, "adi,input-clock", &str); if (ret < 0) return ret; if (!strcmp(str, "1x")) config->input_clock = ADV7511_INPUT_CLOCK_1X; else if (!strcmp(str, "2x")) config->input_clock = ADV7511_INPUT_CLOCK_2X; else if (!strcmp(str, "ddr")) config->input_clock = ADV7511_INPUT_CLOCK_DDR; else return -EINVAL; if (config->input_colorspace == HDMI_COLORSPACE_YUV422 || config->input_clock != ADV7511_INPUT_CLOCK_1X) { ret = of_property_read_u32(np, "adi,input-style", &config->input_style); if (ret) return ret; if (config->input_style < 1 || config->input_style > 3) return -EINVAL; ret = of_property_read_string(np, "adi,input-justification", &str); if (ret < 0) return ret; if (!strcmp(str, "left")) config->input_justification = ADV7511_INPUT_JUSTIFICATION_LEFT; else if (!strcmp(str, "evenly")) config->input_justification = ADV7511_INPUT_JUSTIFICATION_EVENLY; else if (!strcmp(str, "right")) config->input_justification = ADV7511_INPUT_JUSTIFICATION_RIGHT; else return -EINVAL; } else { config->input_style = 1; config->input_justification = ADV7511_INPUT_JUSTIFICATION_LEFT; } of_property_read_u32(np, "adi,clock-delay", &config->clock_delay); if (config->clock_delay < -1200 || config->clock_delay > 1600) return -EINVAL; config->embedded_sync = of_property_read_bool(np, "adi,embedded-sync"); /* Hardcode the sync pulse configurations for now. */ config->sync_pulse = ADV7511_INPUT_SYNC_PULSE_NONE; config->vsync_polarity = ADV7511_SYNC_POLARITY_PASSTHROUGH; config->hsync_polarity = ADV7511_SYNC_POLARITY_PASSTHROUGH; return 0; } static int adv7511_probe(struct i2c_client *i2c) { const struct i2c_device_id *id = i2c_client_get_device_id(i2c); struct adv7511_link_config link_config; struct adv7511 *adv7511; struct device *dev = &i2c->dev; unsigned int val; int ret; if (!dev->of_node) return -EINVAL; adv7511 = devm_kzalloc(dev, sizeof(*adv7511), GFP_KERNEL); if (!adv7511) return -ENOMEM; adv7511->i2c_main = i2c; adv7511->powered = false; adv7511->status = connector_status_disconnected; if (dev->of_node) adv7511->type = (enum adv7511_type)of_device_get_match_data(dev); else adv7511->type = id->driver_data; memset(&link_config, 0, sizeof(link_config)); if (adv7511->type == ADV7511) ret = adv7511_parse_dt(dev->of_node, &link_config); else ret = adv7533_parse_dt(dev->of_node, adv7511); if (ret) return ret; ret = adv7511_init_regulators(adv7511); if (ret) return dev_err_probe(dev, ret, "failed to init regulators\n"); /* * The power down GPIO is optional. If present, toggle it from active to * inactive to wake up the encoder. */ adv7511->gpio_pd = devm_gpiod_get_optional(dev, "pd", GPIOD_OUT_HIGH); if (IS_ERR(adv7511->gpio_pd)) { ret = PTR_ERR(adv7511->gpio_pd); goto uninit_regulators; } if (adv7511->gpio_pd) { usleep_range(5000, 6000); gpiod_set_value_cansleep(adv7511->gpio_pd, 0); } adv7511->regmap = devm_regmap_init_i2c(i2c, &adv7511_regmap_config); if (IS_ERR(adv7511->regmap)) { ret = PTR_ERR(adv7511->regmap); goto uninit_regulators; } ret = regmap_read(adv7511->regmap, ADV7511_REG_CHIP_REVISION, &val); if (ret) goto uninit_regulators; dev_dbg(dev, "Rev. %d\n", val); if (adv7511->type == ADV7511) ret = regmap_register_patch(adv7511->regmap, adv7511_fixed_registers, ARRAY_SIZE(adv7511_fixed_registers)); else ret = adv7533_patch_registers(adv7511); if (ret) goto uninit_regulators; adv7511_packet_disable(adv7511, 0xffff); adv7511->i2c_edid = i2c_new_ancillary_device(i2c, "edid", ADV7511_EDID_I2C_ADDR_DEFAULT); if (IS_ERR(adv7511->i2c_edid)) { ret = PTR_ERR(adv7511->i2c_edid); goto uninit_regulators; } regmap_write(adv7511->regmap, ADV7511_REG_EDID_I2C_ADDR, adv7511->i2c_edid->addr << 1); adv7511->i2c_packet = i2c_new_ancillary_device(i2c, "packet", ADV7511_PACKET_I2C_ADDR_DEFAULT); if (IS_ERR(adv7511->i2c_packet)) { ret = PTR_ERR(adv7511->i2c_packet); goto err_i2c_unregister_edid; } regmap_write(adv7511->regmap, ADV7511_REG_PACKET_I2C_ADDR, adv7511->i2c_packet->addr << 1); ret = adv7511_init_cec_regmap(adv7511); if (ret) goto err_i2c_unregister_packet; INIT_WORK(&adv7511->hpd_work, adv7511_hpd_work); if (i2c->irq) { init_waitqueue_head(&adv7511->wq); ret = devm_request_threaded_irq(dev, i2c->irq, NULL, adv7511_irq_handler, IRQF_ONESHOT, dev_name(dev), adv7511); if (ret) goto err_unregister_cec; } adv7511_power_off(adv7511); i2c_set_clientdata(i2c, adv7511); if (adv7511->type == ADV7511) adv7511_set_link_config(adv7511, &link_config); ret = adv7511_cec_init(dev, adv7511); if (ret) goto err_unregister_cec; adv7511->bridge.funcs = &adv7511_bridge_funcs; adv7511->bridge.ops = DRM_BRIDGE_OP_DETECT | DRM_BRIDGE_OP_EDID; if (adv7511->i2c_main->irq) adv7511->bridge.ops |= DRM_BRIDGE_OP_HPD; adv7511->bridge.of_node = dev->of_node; adv7511->bridge.type = DRM_MODE_CONNECTOR_HDMIA; drm_bridge_add(&adv7511->bridge); adv7511_audio_init(dev, adv7511); if (adv7511->type == ADV7533 || adv7511->type == ADV7535) { ret = adv7533_attach_dsi(adv7511); if (ret) goto err_unregister_audio; } return 0; err_unregister_audio: adv7511_audio_exit(adv7511); drm_bridge_remove(&adv7511->bridge); err_unregister_cec: cec_unregister_adapter(adv7511->cec_adap); i2c_unregister_device(adv7511->i2c_cec); clk_disable_unprepare(adv7511->cec_clk); err_i2c_unregister_packet: i2c_unregister_device(adv7511->i2c_packet); err_i2c_unregister_edid: i2c_unregister_device(adv7511->i2c_edid); uninit_regulators: adv7511_uninit_regulators(adv7511); return ret; } static void adv7511_remove(struct i2c_client *i2c) { struct adv7511 *adv7511 = i2c_get_clientdata(i2c); adv7511_uninit_regulators(adv7511); drm_bridge_remove(&adv7511->bridge); adv7511_audio_exit(adv7511); cec_unregister_adapter(adv7511->cec_adap); i2c_unregister_device(adv7511->i2c_cec); clk_disable_unprepare(adv7511->cec_clk); i2c_unregister_device(adv7511->i2c_packet); i2c_unregister_device(adv7511->i2c_edid); } static const struct i2c_device_id adv7511_i2c_ids[] = { { "adv7511", ADV7511 }, { "adv7511w", ADV7511 }, { "adv7513", ADV7511 }, { "adv7533", ADV7533 }, { "adv7535", ADV7535 }, { } }; MODULE_DEVICE_TABLE(i2c, adv7511_i2c_ids); static const struct of_device_id adv7511_of_ids[] = { { .compatible = "adi,adv7511", .data = (void *)ADV7511 }, { .compatible = "adi,adv7511w", .data = (void *)ADV7511 }, { .compatible = "adi,adv7513", .data = (void *)ADV7511 }, { .compatible = "adi,adv7533", .data = (void *)ADV7533 }, { .compatible = "adi,adv7535", .data = (void *)ADV7535 }, { } }; MODULE_DEVICE_TABLE(of, adv7511_of_ids); static struct mipi_dsi_driver adv7533_dsi_driver = { .driver.name = "adv7533", }; static struct i2c_driver adv7511_driver = { .driver = { .name = "adv7511", .of_match_table = adv7511_of_ids, }, .id_table = adv7511_i2c_ids, .probe = adv7511_probe, .remove = adv7511_remove, }; static int __init adv7511_init(void) { int ret; if (IS_ENABLED(CONFIG_DRM_MIPI_DSI)) { ret = mipi_dsi_driver_register(&adv7533_dsi_driver); if (ret) return ret; } ret = i2c_add_driver(&adv7511_driver); if (ret) { if (IS_ENABLED(CONFIG_DRM_MIPI_DSI)) mipi_dsi_driver_unregister(&adv7533_dsi_driver); } return ret; } module_init(adv7511_init); static void __exit adv7511_exit(void) { i2c_del_driver(&adv7511_driver); if (IS_ENABLED(CONFIG_DRM_MIPI_DSI)) mipi_dsi_driver_unregister(&adv7533_dsi_driver); } module_exit(adv7511_exit); MODULE_AUTHOR("Lars-Peter Clausen <[email protected]>"); MODULE_DESCRIPTION("ADV7511 HDMI transmitter driver"); MODULE_LICENSE("GPL");
linux-master
drivers/gpu/drm/bridge/adv7511/adv7511_drv.c
// SPDX-License-Identifier: GPL-2.0-only /* * Copyright (c) 2016, The Linux Foundation. All rights reserved. */ #include <linux/of_graph.h> #include "adv7511.h" static const struct reg_sequence adv7533_fixed_registers[] = { { 0x16, 0x20 }, { 0x9a, 0xe0 }, { 0xba, 0x70 }, { 0xde, 0x82 }, { 0xe4, 0x40 }, { 0xe5, 0x80 }, }; static const struct reg_sequence adv7533_cec_fixed_registers[] = { { 0x15, 0xd0 }, { 0x17, 0xd0 }, { 0x24, 0x20 }, { 0x57, 0x11 }, { 0x05, 0xc8 }, }; static void adv7511_dsi_config_timing_gen(struct adv7511 *adv) { struct mipi_dsi_device *dsi = adv->dsi; struct drm_display_mode *mode = &adv->curr_mode; unsigned int hsw, hfp, hbp, vsw, vfp, vbp; static const u8 clock_div_by_lanes[] = { 6, 4, 3 }; /* 2, 3, 4 lanes */ hsw = mode->hsync_end - mode->hsync_start; hfp = mode->hsync_start - mode->hdisplay; hbp = mode->htotal - mode->hsync_end; vsw = mode->vsync_end - mode->vsync_start; vfp = mode->vsync_start - mode->vdisplay; vbp = mode->vtotal - mode->vsync_end; /* set pixel clock divider mode */ regmap_write(adv->regmap_cec, 0x16, clock_div_by_lanes[dsi->lanes - 2] << 3); /* horizontal porch params */ regmap_write(adv->regmap_cec, 0x28, mode->htotal >> 4); regmap_write(adv->regmap_cec, 0x29, (mode->htotal << 4) & 0xff); regmap_write(adv->regmap_cec, 0x2a, hsw >> 4); regmap_write(adv->regmap_cec, 0x2b, (hsw << 4) & 0xff); regmap_write(adv->regmap_cec, 0x2c, hfp >> 4); regmap_write(adv->regmap_cec, 0x2d, (hfp << 4) & 0xff); regmap_write(adv->regmap_cec, 0x2e, hbp >> 4); regmap_write(adv->regmap_cec, 0x2f, (hbp << 4) & 0xff); /* vertical porch params */ regmap_write(adv->regmap_cec, 0x30, mode->vtotal >> 4); regmap_write(adv->regmap_cec, 0x31, (mode->vtotal << 4) & 0xff); regmap_write(adv->regmap_cec, 0x32, vsw >> 4); regmap_write(adv->regmap_cec, 0x33, (vsw << 4) & 0xff); regmap_write(adv->regmap_cec, 0x34, vfp >> 4); regmap_write(adv->regmap_cec, 0x35, (vfp << 4) & 0xff); regmap_write(adv->regmap_cec, 0x36, vbp >> 4); regmap_write(adv->regmap_cec, 0x37, (vbp << 4) & 0xff); } void adv7533_dsi_power_on(struct adv7511 *adv) { struct mipi_dsi_device *dsi = adv->dsi; if (adv->use_timing_gen) adv7511_dsi_config_timing_gen(adv); /* set number of dsi lanes */ regmap_write(adv->regmap_cec, 0x1c, dsi->lanes << 4); if (adv->use_timing_gen) { /* reset internal timing generator */ regmap_write(adv->regmap_cec, 0x27, 0xcb); regmap_write(adv->regmap_cec, 0x27, 0x8b); regmap_write(adv->regmap_cec, 0x27, 0xcb); } else { /* disable internal timing generator */ regmap_write(adv->regmap_cec, 0x27, 0x0b); } /* enable hdmi */ regmap_write(adv->regmap_cec, 0x03, 0x89); /* disable test mode */ regmap_write(adv->regmap_cec, 0x55, 0x00); regmap_register_patch(adv->regmap_cec, adv7533_cec_fixed_registers, ARRAY_SIZE(adv7533_cec_fixed_registers)); } void adv7533_dsi_power_off(struct adv7511 *adv) { /* disable hdmi */ regmap_write(adv->regmap_cec, 0x03, 0x0b); /* disable internal timing generator */ regmap_write(adv->regmap_cec, 0x27, 0x0b); } enum drm_mode_status adv7533_mode_valid(struct adv7511 *adv, const struct drm_display_mode *mode) { unsigned long max_lane_freq; struct mipi_dsi_device *dsi = adv->dsi; u8 bpp = mipi_dsi_pixel_format_to_bpp(dsi->format); /* Check max clock for either 7533 or 7535 */ if (mode->clock > (adv->type == ADV7533 ? 80000 : 148500)) return MODE_CLOCK_HIGH; /* Check max clock for each lane */ max_lane_freq = (adv->type == ADV7533 ? 800000 : 891000); if (mode->clock * bpp > max_lane_freq * adv->num_dsi_lanes) return MODE_CLOCK_HIGH; return MODE_OK; } int adv7533_patch_registers(struct adv7511 *adv) { return regmap_register_patch(adv->regmap, adv7533_fixed_registers, ARRAY_SIZE(adv7533_fixed_registers)); } int adv7533_patch_cec_registers(struct adv7511 *adv) { return regmap_register_patch(adv->regmap_cec, adv7533_cec_fixed_registers, ARRAY_SIZE(adv7533_cec_fixed_registers)); } int adv7533_attach_dsi(struct adv7511 *adv) { struct device *dev = &adv->i2c_main->dev; struct mipi_dsi_host *host; struct mipi_dsi_device *dsi; int ret = 0; const struct mipi_dsi_device_info info = { .type = "adv7533", .channel = 0, .node = NULL, }; host = of_find_mipi_dsi_host_by_node(adv->host_node); if (!host) return dev_err_probe(dev, -EPROBE_DEFER, "failed to find dsi host\n"); dsi = devm_mipi_dsi_device_register_full(dev, host, &info); if (IS_ERR(dsi)) return dev_err_probe(dev, PTR_ERR(dsi), "failed to create dsi device\n"); adv->dsi = dsi; dsi->lanes = adv->num_dsi_lanes; dsi->format = MIPI_DSI_FMT_RGB888; dsi->mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_SYNC_PULSE | MIPI_DSI_MODE_NO_EOT_PACKET | MIPI_DSI_MODE_VIDEO_HSE; ret = devm_mipi_dsi_attach(dev, dsi); if (ret < 0) return dev_err_probe(dev, ret, "failed to attach dsi to host\n"); return 0; } int adv7533_parse_dt(struct device_node *np, struct adv7511 *adv) { u32 num_lanes; of_property_read_u32(np, "adi,dsi-lanes", &num_lanes); if (num_lanes < 1 || num_lanes > 4) return -EINVAL; adv->num_dsi_lanes = num_lanes; adv->host_node = of_graph_get_remote_node(np, 0, 0); if (!adv->host_node) return -ENODEV; of_node_put(adv->host_node); adv->use_timing_gen = !of_property_read_bool(np, "adi,disable-timing-generator"); /* TODO: Check if these need to be parsed by DT or not */ adv->rgb = true; adv->embedded_sync = false; return 0; }
linux-master
drivers/gpu/drm/bridge/adv7511/adv7533.c
// SPDX-License-Identifier: GPL-2.0+ /* * Copyright 2020 NXP */ #include <linux/firmware/imx/svc/misc.h> #include <linux/media-bus-format.h> #include <linux/mfd/syscon.h> #include <linux/module.h> #include <linux/of.h> #include <linux/of_device.h> #include <linux/of_graph.h> #include <linux/platform_device.h> #include <linux/pm_runtime.h> #include <linux/regmap.h> #include <drm/drm_atomic_state_helper.h> #include <drm/drm_bridge.h> #include <drm/drm_of.h> #include <drm/drm_print.h> #include <dt-bindings/firmware/imx/rsrc.h> #define PXL2DPI_CTRL 0x40 #define CFG1_16BIT 0x0 #define CFG2_16BIT 0x1 #define CFG3_16BIT 0x2 #define CFG1_18BIT 0x3 #define CFG2_18BIT 0x4 #define CFG_24BIT 0x5 #define DRIVER_NAME "imx8qxp-pxl2dpi" struct imx8qxp_pxl2dpi { struct regmap *regmap; struct drm_bridge bridge; struct drm_bridge *next_bridge; struct drm_bridge *companion; struct device *dev; struct imx_sc_ipc *ipc_handle; u32 sc_resource; u32 in_bus_format; u32 out_bus_format; u32 pl_sel; }; #define bridge_to_p2d(b) container_of(b, struct imx8qxp_pxl2dpi, bridge) static int imx8qxp_pxl2dpi_bridge_attach(struct drm_bridge *bridge, enum drm_bridge_attach_flags flags) { struct imx8qxp_pxl2dpi *p2d = bridge->driver_private; if (!(flags & DRM_BRIDGE_ATTACH_NO_CONNECTOR)) { DRM_DEV_ERROR(p2d->dev, "do not support creating a drm_connector\n"); return -EINVAL; } if (!bridge->encoder) { DRM_DEV_ERROR(p2d->dev, "missing encoder\n"); return -ENODEV; } return drm_bridge_attach(bridge->encoder, p2d->next_bridge, bridge, DRM_BRIDGE_ATTACH_NO_CONNECTOR); } static int imx8qxp_pxl2dpi_bridge_atomic_check(struct drm_bridge *bridge, struct drm_bridge_state *bridge_state, struct drm_crtc_state *crtc_state, struct drm_connector_state *conn_state) { struct imx8qxp_pxl2dpi *p2d = bridge->driver_private; p2d->in_bus_format = bridge_state->input_bus_cfg.format; p2d->out_bus_format = bridge_state->output_bus_cfg.format; return 0; } static void imx8qxp_pxl2dpi_bridge_mode_set(struct drm_bridge *bridge, const struct drm_display_mode *mode, const struct drm_display_mode *adjusted_mode) { struct imx8qxp_pxl2dpi *p2d = bridge->driver_private; struct imx8qxp_pxl2dpi *companion_p2d; int ret; ret = pm_runtime_get_sync(p2d->dev); if (ret < 0) DRM_DEV_ERROR(p2d->dev, "failed to get runtime PM sync: %d\n", ret); ret = imx_sc_misc_set_control(p2d->ipc_handle, p2d->sc_resource, IMX_SC_C_PXL_LINK_SEL, p2d->pl_sel); if (ret) DRM_DEV_ERROR(p2d->dev, "failed to set pixel link selection(%u): %d\n", p2d->pl_sel, ret); switch (p2d->out_bus_format) { case MEDIA_BUS_FMT_RGB888_1X24: regmap_write(p2d->regmap, PXL2DPI_CTRL, CFG_24BIT); break; case MEDIA_BUS_FMT_RGB666_1X24_CPADHI: regmap_write(p2d->regmap, PXL2DPI_CTRL, CFG2_18BIT); break; default: DRM_DEV_ERROR(p2d->dev, "unsupported output bus format 0x%08x\n", p2d->out_bus_format); } if (p2d->companion) { companion_p2d = bridge_to_p2d(p2d->companion); companion_p2d->in_bus_format = p2d->in_bus_format; companion_p2d->out_bus_format = p2d->out_bus_format; p2d->companion->funcs->mode_set(p2d->companion, mode, adjusted_mode); } } static void imx8qxp_pxl2dpi_bridge_atomic_disable(struct drm_bridge *bridge, struct drm_bridge_state *old_bridge_state) { struct imx8qxp_pxl2dpi *p2d = bridge->driver_private; int ret; ret = pm_runtime_put(p2d->dev); if (ret < 0) DRM_DEV_ERROR(p2d->dev, "failed to put runtime PM: %d\n", ret); if (p2d->companion) p2d->companion->funcs->atomic_disable(p2d->companion, old_bridge_state); } static const u32 imx8qxp_pxl2dpi_bus_output_fmts[] = { MEDIA_BUS_FMT_RGB888_1X24, MEDIA_BUS_FMT_RGB666_1X24_CPADHI, }; static bool imx8qxp_pxl2dpi_bus_output_fmt_supported(u32 fmt) { int i; for (i = 0; i < ARRAY_SIZE(imx8qxp_pxl2dpi_bus_output_fmts); i++) { if (imx8qxp_pxl2dpi_bus_output_fmts[i] == fmt) return true; } return false; } static u32 * imx8qxp_pxl2dpi_bridge_atomic_get_input_bus_fmts(struct drm_bridge *bridge, struct drm_bridge_state *bridge_state, struct drm_crtc_state *crtc_state, struct drm_connector_state *conn_state, u32 output_fmt, unsigned int *num_input_fmts) { u32 *input_fmts; if (!imx8qxp_pxl2dpi_bus_output_fmt_supported(output_fmt)) return NULL; *num_input_fmts = 1; input_fmts = kmalloc(sizeof(*input_fmts), GFP_KERNEL); if (!input_fmts) return NULL; switch (output_fmt) { case MEDIA_BUS_FMT_RGB888_1X24: input_fmts[0] = MEDIA_BUS_FMT_RGB888_1X36_CPADLO; break; case MEDIA_BUS_FMT_RGB666_1X24_CPADHI: input_fmts[0] = MEDIA_BUS_FMT_RGB666_1X36_CPADLO; break; default: kfree(input_fmts); input_fmts = NULL; break; } return input_fmts; } static u32 * imx8qxp_pxl2dpi_bridge_atomic_get_output_bus_fmts(struct drm_bridge *bridge, struct drm_bridge_state *bridge_state, struct drm_crtc_state *crtc_state, struct drm_connector_state *conn_state, unsigned int *num_output_fmts) { *num_output_fmts = ARRAY_SIZE(imx8qxp_pxl2dpi_bus_output_fmts); return kmemdup(imx8qxp_pxl2dpi_bus_output_fmts, sizeof(imx8qxp_pxl2dpi_bus_output_fmts), GFP_KERNEL); } static const struct drm_bridge_funcs imx8qxp_pxl2dpi_bridge_funcs = { .atomic_duplicate_state = drm_atomic_helper_bridge_duplicate_state, .atomic_destroy_state = drm_atomic_helper_bridge_destroy_state, .atomic_reset = drm_atomic_helper_bridge_reset, .attach = imx8qxp_pxl2dpi_bridge_attach, .atomic_check = imx8qxp_pxl2dpi_bridge_atomic_check, .mode_set = imx8qxp_pxl2dpi_bridge_mode_set, .atomic_disable = imx8qxp_pxl2dpi_bridge_atomic_disable, .atomic_get_input_bus_fmts = imx8qxp_pxl2dpi_bridge_atomic_get_input_bus_fmts, .atomic_get_output_bus_fmts = imx8qxp_pxl2dpi_bridge_atomic_get_output_bus_fmts, }; static struct device_node * imx8qxp_pxl2dpi_get_available_ep_from_port(struct imx8qxp_pxl2dpi *p2d, u32 port_id) { struct device_node *port, *ep; int ep_cnt; port = of_graph_get_port_by_id(p2d->dev->of_node, port_id); if (!port) { DRM_DEV_ERROR(p2d->dev, "failed to get port@%u\n", port_id); return ERR_PTR(-ENODEV); } ep_cnt = of_get_available_child_count(port); if (ep_cnt == 0) { DRM_DEV_ERROR(p2d->dev, "no available endpoints of port@%u\n", port_id); ep = ERR_PTR(-ENODEV); goto out; } else if (ep_cnt > 1) { DRM_DEV_ERROR(p2d->dev, "invalid available endpoints of port@%u\n", port_id); ep = ERR_PTR(-EINVAL); goto out; } ep = of_get_next_available_child(port, NULL); if (!ep) { DRM_DEV_ERROR(p2d->dev, "failed to get available endpoint of port@%u\n", port_id); ep = ERR_PTR(-ENODEV); goto out; } out: of_node_put(port); return ep; } static struct drm_bridge * imx8qxp_pxl2dpi_find_next_bridge(struct imx8qxp_pxl2dpi *p2d) { struct device_node *ep, *remote; struct drm_bridge *next_bridge; int ret; ep = imx8qxp_pxl2dpi_get_available_ep_from_port(p2d, 1); if (IS_ERR(ep)) { ret = PTR_ERR(ep); return ERR_PTR(ret); } remote = of_graph_get_remote_port_parent(ep); if (!remote || !of_device_is_available(remote)) { DRM_DEV_ERROR(p2d->dev, "no available remote\n"); next_bridge = ERR_PTR(-ENODEV); goto out; } else if (!of_device_is_available(remote->parent)) { DRM_DEV_ERROR(p2d->dev, "remote parent is not available\n"); next_bridge = ERR_PTR(-ENODEV); goto out; } next_bridge = of_drm_find_bridge(remote); if (!next_bridge) { next_bridge = ERR_PTR(-EPROBE_DEFER); goto out; } out: of_node_put(remote); of_node_put(ep); return next_bridge; } static int imx8qxp_pxl2dpi_set_pixel_link_sel(struct imx8qxp_pxl2dpi *p2d) { struct device_node *ep; struct of_endpoint endpoint; int ret; ep = imx8qxp_pxl2dpi_get_available_ep_from_port(p2d, 0); if (IS_ERR(ep)) return PTR_ERR(ep); ret = of_graph_parse_endpoint(ep, &endpoint); if (ret) { DRM_DEV_ERROR(p2d->dev, "failed to parse endpoint of port@0: %d\n", ret); goto out; } p2d->pl_sel = endpoint.id; out: of_node_put(ep); return ret; } static int imx8qxp_pxl2dpi_parse_dt_companion(struct imx8qxp_pxl2dpi *p2d) { struct imx8qxp_pxl2dpi *companion_p2d; struct device *dev = p2d->dev; struct device_node *companion; struct device_node *port1, *port2; const struct of_device_id *match; int dual_link; int ret = 0; /* Locate the companion PXL2DPI for dual-link operation, if any. */ companion = of_parse_phandle(dev->of_node, "fsl,companion-pxl2dpi", 0); if (!companion) return 0; if (!of_device_is_available(companion)) { DRM_DEV_ERROR(dev, "companion PXL2DPI is not available\n"); ret = -ENODEV; goto out; } /* * Sanity check: the companion bridge must have the same compatible * string. */ match = of_match_device(dev->driver->of_match_table, dev); if (!of_device_is_compatible(companion, match->compatible)) { DRM_DEV_ERROR(dev, "companion PXL2DPI is incompatible\n"); ret = -ENXIO; goto out; } p2d->companion = of_drm_find_bridge(companion); if (!p2d->companion) { ret = -EPROBE_DEFER; DRM_DEV_DEBUG_DRIVER(p2d->dev, "failed to find companion bridge: %d\n", ret); goto out; } companion_p2d = bridge_to_p2d(p2d->companion); /* * We need to work out if the sink is expecting us to function in * dual-link mode. We do this by looking at the DT port nodes that * the next bridges are connected to. If they are marked as expecting * even pixels and odd pixels than we need to use the companion PXL2DPI. */ port1 = of_graph_get_port_by_id(p2d->next_bridge->of_node, 1); port2 = of_graph_get_port_by_id(companion_p2d->next_bridge->of_node, 1); dual_link = drm_of_lvds_get_dual_link_pixel_order(port1, port2); of_node_put(port1); of_node_put(port2); if (dual_link < 0) { ret = dual_link; DRM_DEV_ERROR(dev, "failed to get dual link pixel order: %d\n", ret); goto out; } DRM_DEV_DEBUG_DRIVER(dev, "dual-link configuration detected (companion bridge %pOF)\n", companion); out: of_node_put(companion); return ret; } static int imx8qxp_pxl2dpi_bridge_probe(struct platform_device *pdev) { struct imx8qxp_pxl2dpi *p2d; struct device *dev = &pdev->dev; struct device_node *np = dev->of_node; int ret; p2d = devm_kzalloc(dev, sizeof(*p2d), GFP_KERNEL); if (!p2d) return -ENOMEM; p2d->regmap = syscon_node_to_regmap(np->parent); if (IS_ERR(p2d->regmap)) { ret = PTR_ERR(p2d->regmap); if (ret != -EPROBE_DEFER) DRM_DEV_ERROR(dev, "failed to get regmap: %d\n", ret); return ret; } ret = imx_scu_get_handle(&p2d->ipc_handle); if (ret) { if (ret != -EPROBE_DEFER) DRM_DEV_ERROR(dev, "failed to get SCU ipc handle: %d\n", ret); return ret; } p2d->dev = dev; ret = of_property_read_u32(np, "fsl,sc-resource", &p2d->sc_resource); if (ret) { DRM_DEV_ERROR(dev, "failed to get SC resource %d\n", ret); return ret; } p2d->next_bridge = imx8qxp_pxl2dpi_find_next_bridge(p2d); if (IS_ERR(p2d->next_bridge)) { ret = PTR_ERR(p2d->next_bridge); if (ret != -EPROBE_DEFER) DRM_DEV_ERROR(dev, "failed to find next bridge: %d\n", ret); return ret; } ret = imx8qxp_pxl2dpi_set_pixel_link_sel(p2d); if (ret) return ret; ret = imx8qxp_pxl2dpi_parse_dt_companion(p2d); if (ret) return ret; platform_set_drvdata(pdev, p2d); pm_runtime_enable(dev); p2d->bridge.driver_private = p2d; p2d->bridge.funcs = &imx8qxp_pxl2dpi_bridge_funcs; p2d->bridge.of_node = np; drm_bridge_add(&p2d->bridge); return ret; } static void imx8qxp_pxl2dpi_bridge_remove(struct platform_device *pdev) { struct imx8qxp_pxl2dpi *p2d = platform_get_drvdata(pdev); drm_bridge_remove(&p2d->bridge); pm_runtime_disable(&pdev->dev); } static const struct of_device_id imx8qxp_pxl2dpi_dt_ids[] = { { .compatible = "fsl,imx8qxp-pxl2dpi", }, { /* sentinel */ } }; MODULE_DEVICE_TABLE(of, imx8qxp_pxl2dpi_dt_ids); static struct platform_driver imx8qxp_pxl2dpi_bridge_driver = { .probe = imx8qxp_pxl2dpi_bridge_probe, .remove_new = imx8qxp_pxl2dpi_bridge_remove, .driver = { .of_match_table = imx8qxp_pxl2dpi_dt_ids, .name = DRIVER_NAME, }, }; module_platform_driver(imx8qxp_pxl2dpi_bridge_driver); MODULE_DESCRIPTION("i.MX8QXP pixel link to DPI bridge driver"); MODULE_AUTHOR("Liu Ying <[email protected]>"); MODULE_LICENSE("GPL v2"); MODULE_ALIAS("platform:" DRIVER_NAME);
linux-master
drivers/gpu/drm/bridge/imx/imx8qxp-pxl2dpi.c
// SPDX-License-Identifier: GPL-2.0+ /* * Copyright 2020 NXP */ #include <linux/bitfield.h> #include <linux/clk.h> #include <linux/delay.h> #include <linux/io.h> #include <linux/media-bus-format.h> #include <linux/module.h> #include <linux/of.h> #include <linux/of_graph.h> #include <linux/platform_device.h> #include <linux/pm_runtime.h> #include <drm/drm_atomic_state_helper.h> #include <drm/drm_bridge.h> #include <drm/drm_print.h> #define PC_CTRL_REG 0x0 #define PC_COMBINE_ENABLE BIT(0) #define PC_DISP_BYPASS(n) BIT(1 + 21 * (n)) #define PC_DISP_HSYNC_POLARITY(n) BIT(2 + 11 * (n)) #define PC_DISP_HSYNC_POLARITY_POS(n) DISP_HSYNC_POLARITY(n) #define PC_DISP_VSYNC_POLARITY(n) BIT(3 + 11 * (n)) #define PC_DISP_VSYNC_POLARITY_POS(n) DISP_VSYNC_POLARITY(n) #define PC_DISP_DVALID_POLARITY(n) BIT(4 + 11 * (n)) #define PC_DISP_DVALID_POLARITY_POS(n) DISP_DVALID_POLARITY(n) #define PC_VSYNC_MASK_ENABLE BIT(5) #define PC_SKIP_MODE BIT(6) #define PC_SKIP_NUMBER_MASK GENMASK(12, 7) #define PC_SKIP_NUMBER(n) FIELD_PREP(PC_SKIP_NUMBER_MASK, (n)) #define PC_DISP0_PIX_DATA_FORMAT_MASK GENMASK(18, 16) #define PC_DISP0_PIX_DATA_FORMAT(fmt) \ FIELD_PREP(PC_DISP0_PIX_DATA_FORMAT_MASK, (fmt)) #define PC_DISP1_PIX_DATA_FORMAT_MASK GENMASK(21, 19) #define PC_DISP1_PIX_DATA_FORMAT(fmt) \ FIELD_PREP(PC_DISP1_PIX_DATA_FORMAT_MASK, (fmt)) #define PC_SW_RESET_REG 0x20 #define PC_SW_RESET_N BIT(0) #define PC_DISP_SW_RESET_N(n) BIT(1 + (n)) #define PC_FULL_RESET_N (PC_SW_RESET_N | \ PC_DISP_SW_RESET_N(0) | \ PC_DISP_SW_RESET_N(1)) #define PC_REG_SET 0x4 #define PC_REG_CLR 0x8 #define DRIVER_NAME "imx8qxp-pixel-combiner" enum imx8qxp_pc_pix_data_format { RGB, YUV444, YUV422, SPLIT_RGB, }; struct imx8qxp_pc_channel { struct drm_bridge bridge; struct drm_bridge *next_bridge; struct imx8qxp_pc *pc; unsigned int stream_id; bool is_available; }; struct imx8qxp_pc { struct device *dev; struct imx8qxp_pc_channel ch[2]; struct clk *clk_apb; void __iomem *base; }; static inline u32 imx8qxp_pc_read(struct imx8qxp_pc *pc, unsigned int offset) { return readl(pc->base + offset); } static inline void imx8qxp_pc_write(struct imx8qxp_pc *pc, unsigned int offset, u32 value) { writel(value, pc->base + offset); } static inline void imx8qxp_pc_write_set(struct imx8qxp_pc *pc, unsigned int offset, u32 value) { imx8qxp_pc_write(pc, offset + PC_REG_SET, value); } static inline void imx8qxp_pc_write_clr(struct imx8qxp_pc *pc, unsigned int offset, u32 value) { imx8qxp_pc_write(pc, offset + PC_REG_CLR, value); } static enum drm_mode_status imx8qxp_pc_bridge_mode_valid(struct drm_bridge *bridge, const struct drm_display_info *info, const struct drm_display_mode *mode) { if (mode->hdisplay > 2560) return MODE_BAD_HVALUE; return MODE_OK; } static int imx8qxp_pc_bridge_attach(struct drm_bridge *bridge, enum drm_bridge_attach_flags flags) { struct imx8qxp_pc_channel *ch = bridge->driver_private; struct imx8qxp_pc *pc = ch->pc; if (!(flags & DRM_BRIDGE_ATTACH_NO_CONNECTOR)) { DRM_DEV_ERROR(pc->dev, "do not support creating a drm_connector\n"); return -EINVAL; } if (!bridge->encoder) { DRM_DEV_ERROR(pc->dev, "missing encoder\n"); return -ENODEV; } return drm_bridge_attach(bridge->encoder, ch->next_bridge, bridge, DRM_BRIDGE_ATTACH_NO_CONNECTOR); } static void imx8qxp_pc_bridge_mode_set(struct drm_bridge *bridge, const struct drm_display_mode *mode, const struct drm_display_mode *adjusted_mode) { struct imx8qxp_pc_channel *ch = bridge->driver_private; struct imx8qxp_pc *pc = ch->pc; u32 val; int ret; ret = pm_runtime_get_sync(pc->dev); if (ret < 0) DRM_DEV_ERROR(pc->dev, "failed to get runtime PM sync: %d\n", ret); ret = clk_prepare_enable(pc->clk_apb); if (ret) DRM_DEV_ERROR(pc->dev, "%s: failed to enable apb clock: %d\n", __func__, ret); /* HSYNC to pixel link is active low. */ imx8qxp_pc_write_clr(pc, PC_CTRL_REG, PC_DISP_HSYNC_POLARITY(ch->stream_id)); /* VSYNC to pixel link is active low. */ imx8qxp_pc_write_clr(pc, PC_CTRL_REG, PC_DISP_VSYNC_POLARITY(ch->stream_id)); /* Data enable to pixel link is active high. */ imx8qxp_pc_write_set(pc, PC_CTRL_REG, PC_DISP_DVALID_POLARITY(ch->stream_id)); /* Mask the first frame output which may be incomplete. */ imx8qxp_pc_write_set(pc, PC_CTRL_REG, PC_VSYNC_MASK_ENABLE); /* Only support RGB currently. */ val = imx8qxp_pc_read(pc, PC_CTRL_REG); if (ch->stream_id == 0) { val &= ~PC_DISP0_PIX_DATA_FORMAT_MASK; val |= PC_DISP0_PIX_DATA_FORMAT(RGB); } else { val &= ~PC_DISP1_PIX_DATA_FORMAT_MASK; val |= PC_DISP1_PIX_DATA_FORMAT(RGB); } imx8qxp_pc_write(pc, PC_CTRL_REG, val); /* Only support bypass mode currently. */ imx8qxp_pc_write_set(pc, PC_CTRL_REG, PC_DISP_BYPASS(ch->stream_id)); clk_disable_unprepare(pc->clk_apb); } static void imx8qxp_pc_bridge_atomic_disable(struct drm_bridge *bridge, struct drm_bridge_state *old_bridge_state) { struct imx8qxp_pc_channel *ch = bridge->driver_private; struct imx8qxp_pc *pc = ch->pc; int ret; ret = pm_runtime_put(pc->dev); if (ret < 0) DRM_DEV_ERROR(pc->dev, "failed to put runtime PM: %d\n", ret); } static const u32 imx8qxp_pc_bus_output_fmts[] = { MEDIA_BUS_FMT_RGB888_1X36_CPADLO, MEDIA_BUS_FMT_RGB666_1X36_CPADLO, }; static bool imx8qxp_pc_bus_output_fmt_supported(u32 fmt) { int i; for (i = 0; i < ARRAY_SIZE(imx8qxp_pc_bus_output_fmts); i++) { if (imx8qxp_pc_bus_output_fmts[i] == fmt) return true; } return false; } static u32 * imx8qxp_pc_bridge_atomic_get_input_bus_fmts(struct drm_bridge *bridge, struct drm_bridge_state *bridge_state, struct drm_crtc_state *crtc_state, struct drm_connector_state *conn_state, u32 output_fmt, unsigned int *num_input_fmts) { u32 *input_fmts; if (!imx8qxp_pc_bus_output_fmt_supported(output_fmt)) return NULL; *num_input_fmts = 1; input_fmts = kmalloc(sizeof(*input_fmts), GFP_KERNEL); if (!input_fmts) return NULL; switch (output_fmt) { case MEDIA_BUS_FMT_RGB888_1X36_CPADLO: input_fmts[0] = MEDIA_BUS_FMT_RGB888_1X30_CPADLO; break; case MEDIA_BUS_FMT_RGB666_1X36_CPADLO: input_fmts[0] = MEDIA_BUS_FMT_RGB666_1X30_CPADLO; break; default: kfree(input_fmts); input_fmts = NULL; break; } return input_fmts; } static u32 * imx8qxp_pc_bridge_atomic_get_output_bus_fmts(struct drm_bridge *bridge, struct drm_bridge_state *bridge_state, struct drm_crtc_state *crtc_state, struct drm_connector_state *conn_state, unsigned int *num_output_fmts) { *num_output_fmts = ARRAY_SIZE(imx8qxp_pc_bus_output_fmts); return kmemdup(imx8qxp_pc_bus_output_fmts, sizeof(imx8qxp_pc_bus_output_fmts), GFP_KERNEL); } static const struct drm_bridge_funcs imx8qxp_pc_bridge_funcs = { .atomic_duplicate_state = drm_atomic_helper_bridge_duplicate_state, .atomic_destroy_state = drm_atomic_helper_bridge_destroy_state, .atomic_reset = drm_atomic_helper_bridge_reset, .mode_valid = imx8qxp_pc_bridge_mode_valid, .attach = imx8qxp_pc_bridge_attach, .mode_set = imx8qxp_pc_bridge_mode_set, .atomic_disable = imx8qxp_pc_bridge_atomic_disable, .atomic_get_input_bus_fmts = imx8qxp_pc_bridge_atomic_get_input_bus_fmts, .atomic_get_output_bus_fmts = imx8qxp_pc_bridge_atomic_get_output_bus_fmts, }; static int imx8qxp_pc_bridge_probe(struct platform_device *pdev) { struct imx8qxp_pc *pc; struct imx8qxp_pc_channel *ch; struct device *dev = &pdev->dev; struct device_node *np = dev->of_node; struct device_node *child, *remote; u32 i; int ret; pc = devm_kzalloc(dev, sizeof(*pc), GFP_KERNEL); if (!pc) return -ENOMEM; pc->base = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(pc->base)) return PTR_ERR(pc->base); pc->dev = dev; pc->clk_apb = devm_clk_get(dev, "apb"); if (IS_ERR(pc->clk_apb)) { ret = PTR_ERR(pc->clk_apb); if (ret != -EPROBE_DEFER) DRM_DEV_ERROR(dev, "failed to get apb clock: %d\n", ret); return ret; } platform_set_drvdata(pdev, pc); pm_runtime_enable(dev); for_each_available_child_of_node(np, child) { ret = of_property_read_u32(child, "reg", &i); if (ret || i > 1) { ret = -EINVAL; DRM_DEV_ERROR(dev, "invalid channel(%u) node address\n", i); goto free_child; } ch = &pc->ch[i]; ch->pc = pc; ch->stream_id = i; remote = of_graph_get_remote_node(child, 1, 0); if (!remote) { ret = -ENODEV; DRM_DEV_ERROR(dev, "channel%u failed to get port1's remote node: %d\n", i, ret); goto free_child; } ch->next_bridge = of_drm_find_bridge(remote); if (!ch->next_bridge) { of_node_put(remote); ret = -EPROBE_DEFER; DRM_DEV_DEBUG_DRIVER(dev, "channel%u failed to find next bridge: %d\n", i, ret); goto free_child; } of_node_put(remote); ch->bridge.driver_private = ch; ch->bridge.funcs = &imx8qxp_pc_bridge_funcs; ch->bridge.of_node = child; ch->is_available = true; drm_bridge_add(&ch->bridge); } return 0; free_child: of_node_put(child); if (i == 1 && pc->ch[0].next_bridge) drm_bridge_remove(&pc->ch[0].bridge); pm_runtime_disable(dev); return ret; } static void imx8qxp_pc_bridge_remove(struct platform_device *pdev) { struct imx8qxp_pc *pc = platform_get_drvdata(pdev); struct imx8qxp_pc_channel *ch; int i; for (i = 0; i < 2; i++) { ch = &pc->ch[i]; if (!ch->is_available) continue; drm_bridge_remove(&ch->bridge); ch->is_available = false; } pm_runtime_disable(&pdev->dev); } static int __maybe_unused imx8qxp_pc_runtime_suspend(struct device *dev) { struct platform_device *pdev = to_platform_device(dev); struct imx8qxp_pc *pc = platform_get_drvdata(pdev); int ret; ret = clk_prepare_enable(pc->clk_apb); if (ret) DRM_DEV_ERROR(pc->dev, "%s: failed to enable apb clock: %d\n", __func__, ret); /* Disable pixel combiner by full reset. */ imx8qxp_pc_write_clr(pc, PC_SW_RESET_REG, PC_FULL_RESET_N); clk_disable_unprepare(pc->clk_apb); /* Ensure the reset takes effect. */ usleep_range(10, 20); return ret; } static int __maybe_unused imx8qxp_pc_runtime_resume(struct device *dev) { struct platform_device *pdev = to_platform_device(dev); struct imx8qxp_pc *pc = platform_get_drvdata(pdev); int ret; ret = clk_prepare_enable(pc->clk_apb); if (ret) { DRM_DEV_ERROR(pc->dev, "%s: failed to enable apb clock: %d\n", __func__, ret); return ret; } /* out of reset */ imx8qxp_pc_write_set(pc, PC_SW_RESET_REG, PC_FULL_RESET_N); clk_disable_unprepare(pc->clk_apb); return ret; } static const struct dev_pm_ops imx8qxp_pc_pm_ops = { SET_RUNTIME_PM_OPS(imx8qxp_pc_runtime_suspend, imx8qxp_pc_runtime_resume, NULL) }; static const struct of_device_id imx8qxp_pc_dt_ids[] = { { .compatible = "fsl,imx8qm-pixel-combiner", }, { .compatible = "fsl,imx8qxp-pixel-combiner", }, { /* sentinel */ } }; MODULE_DEVICE_TABLE(of, imx8qxp_pc_dt_ids); static struct platform_driver imx8qxp_pc_bridge_driver = { .probe = imx8qxp_pc_bridge_probe, .remove_new = imx8qxp_pc_bridge_remove, .driver = { .pm = &imx8qxp_pc_pm_ops, .name = DRIVER_NAME, .of_match_table = imx8qxp_pc_dt_ids, }, }; module_platform_driver(imx8qxp_pc_bridge_driver); MODULE_DESCRIPTION("i.MX8QM/QXP pixel combiner bridge driver"); MODULE_AUTHOR("Liu Ying <[email protected]>"); MODULE_LICENSE("GPL v2"); MODULE_ALIAS("platform:" DRIVER_NAME);
linux-master
drivers/gpu/drm/bridge/imx/imx8qxp-pixel-combiner.c
// SPDX-License-Identifier: GPL-2.0+ /* * Copyright 2020 NXP */ #include <linux/clk.h> #include <linux/media-bus-format.h> #include <linux/mfd/syscon.h> #include <linux/module.h> #include <linux/of.h> #include <linux/of_graph.h> #include <linux/phy/phy.h> #include <linux/platform_device.h> #include <linux/pm_runtime.h> #include <linux/regmap.h> #include <drm/drm_atomic_state_helper.h> #include <drm/drm_bridge.h> #include <drm/drm_connector.h> #include <drm/drm_fourcc.h> #include <drm/drm_of.h> #include <drm/drm_print.h> #include "imx-ldb-helper.h" #define LDB_CH0_10BIT_EN BIT(22) #define LDB_CH1_10BIT_EN BIT(23) #define LDB_CH0_DATA_WIDTH_24BIT BIT(24) #define LDB_CH1_DATA_WIDTH_24BIT BIT(26) #define LDB_CH0_DATA_WIDTH_30BIT (2 << 24) #define LDB_CH1_DATA_WIDTH_30BIT (2 << 26) #define SS_CTRL 0x20 #define CH_HSYNC_M(id) BIT(0 + ((id) * 2)) #define CH_VSYNC_M(id) BIT(1 + ((id) * 2)) #define CH_PHSYNC(id) BIT(0 + ((id) * 2)) #define CH_PVSYNC(id) BIT(1 + ((id) * 2)) #define DRIVER_NAME "imx8qm-ldb" struct imx8qm_ldb_channel { struct ldb_channel base; struct phy *phy; }; struct imx8qm_ldb { struct ldb base; struct device *dev; struct imx8qm_ldb_channel channel[MAX_LDB_CHAN_NUM]; struct clk *clk_pixel; struct clk *clk_bypass; int active_chno; }; static inline struct imx8qm_ldb_channel * base_to_imx8qm_ldb_channel(struct ldb_channel *base) { return container_of(base, struct imx8qm_ldb_channel, base); } static inline struct imx8qm_ldb *base_to_imx8qm_ldb(struct ldb *base) { return container_of(base, struct imx8qm_ldb, base); } static void imx8qm_ldb_set_phy_cfg(struct imx8qm_ldb *imx8qm_ldb, unsigned long di_clk, bool is_split, bool is_slave, struct phy_configure_opts_lvds *phy_cfg) { phy_cfg->bits_per_lane_and_dclk_cycle = 7; phy_cfg->lanes = 4; phy_cfg->differential_clk_rate = is_split ? di_clk / 2 : di_clk; phy_cfg->is_slave = is_slave; } static int imx8qm_ldb_bridge_atomic_check(struct drm_bridge *bridge, struct drm_bridge_state *bridge_state, struct drm_crtc_state *crtc_state, struct drm_connector_state *conn_state) { struct ldb_channel *ldb_ch = bridge->driver_private; struct ldb *ldb = ldb_ch->ldb; struct imx8qm_ldb_channel *imx8qm_ldb_ch = base_to_imx8qm_ldb_channel(ldb_ch); struct imx8qm_ldb *imx8qm_ldb = base_to_imx8qm_ldb(ldb); struct drm_display_mode *adj = &crtc_state->adjusted_mode; unsigned long di_clk = adj->clock * 1000; bool is_split = ldb_channel_is_split_link(ldb_ch); union phy_configure_opts opts = { }; struct phy_configure_opts_lvds *phy_cfg = &opts.lvds; int ret; ret = ldb_bridge_atomic_check_helper(bridge, bridge_state, crtc_state, conn_state); if (ret) return ret; imx8qm_ldb_set_phy_cfg(imx8qm_ldb, di_clk, is_split, false, phy_cfg); ret = phy_validate(imx8qm_ldb_ch->phy, PHY_MODE_LVDS, 0, &opts); if (ret < 0) { DRM_DEV_DEBUG_DRIVER(imx8qm_ldb->dev, "failed to validate PHY: %d\n", ret); return ret; } if (is_split) { imx8qm_ldb_ch = &imx8qm_ldb->channel[imx8qm_ldb->active_chno ^ 1]; imx8qm_ldb_set_phy_cfg(imx8qm_ldb, di_clk, is_split, true, phy_cfg); ret = phy_validate(imx8qm_ldb_ch->phy, PHY_MODE_LVDS, 0, &opts); if (ret < 0) { DRM_DEV_DEBUG_DRIVER(imx8qm_ldb->dev, "failed to validate slave PHY: %d\n", ret); return ret; } } return ret; } static void imx8qm_ldb_bridge_mode_set(struct drm_bridge *bridge, const struct drm_display_mode *mode, const struct drm_display_mode *adjusted_mode) { struct ldb_channel *ldb_ch = bridge->driver_private; struct ldb *ldb = ldb_ch->ldb; struct imx8qm_ldb_channel *imx8qm_ldb_ch = base_to_imx8qm_ldb_channel(ldb_ch); struct imx8qm_ldb *imx8qm_ldb = base_to_imx8qm_ldb(ldb); struct device *dev = imx8qm_ldb->dev; unsigned long di_clk = adjusted_mode->clock * 1000; bool is_split = ldb_channel_is_split_link(ldb_ch); union phy_configure_opts opts = { }; struct phy_configure_opts_lvds *phy_cfg = &opts.lvds; u32 chno = ldb_ch->chno; int ret; ret = pm_runtime_get_sync(dev); if (ret < 0) DRM_DEV_ERROR(dev, "failed to get runtime PM sync: %d\n", ret); ret = phy_init(imx8qm_ldb_ch->phy); if (ret < 0) DRM_DEV_ERROR(dev, "failed to initialize PHY: %d\n", ret); clk_set_rate(imx8qm_ldb->clk_bypass, di_clk); clk_set_rate(imx8qm_ldb->clk_pixel, di_clk); imx8qm_ldb_set_phy_cfg(imx8qm_ldb, di_clk, is_split, false, phy_cfg); ret = phy_configure(imx8qm_ldb_ch->phy, &opts); if (ret < 0) DRM_DEV_ERROR(dev, "failed to configure PHY: %d\n", ret); if (is_split) { imx8qm_ldb_ch = &imx8qm_ldb->channel[imx8qm_ldb->active_chno ^ 1]; imx8qm_ldb_set_phy_cfg(imx8qm_ldb, di_clk, is_split, true, phy_cfg); ret = phy_configure(imx8qm_ldb_ch->phy, &opts); if (ret < 0) DRM_DEV_ERROR(dev, "failed to configure slave PHY: %d\n", ret); } /* input VSYNC signal from pixel link is active low */ if (ldb_ch->chno == 0 || is_split) ldb->ldb_ctrl |= LDB_DI0_VS_POL_ACT_LOW; if (ldb_ch->chno == 1 || is_split) ldb->ldb_ctrl |= LDB_DI1_VS_POL_ACT_LOW; switch (ldb_ch->out_bus_format) { case MEDIA_BUS_FMT_RGB666_1X7X3_SPWG: break; case MEDIA_BUS_FMT_RGB888_1X7X4_JEIDA: case MEDIA_BUS_FMT_RGB888_1X7X4_SPWG: if (ldb_ch->chno == 0 || is_split) ldb->ldb_ctrl |= LDB_CH0_DATA_WIDTH_24BIT; if (ldb_ch->chno == 1 || is_split) ldb->ldb_ctrl |= LDB_CH1_DATA_WIDTH_24BIT; break; } ldb_bridge_mode_set_helper(bridge, mode, adjusted_mode); if (adjusted_mode->flags & DRM_MODE_FLAG_NVSYNC) regmap_update_bits(ldb->regmap, SS_CTRL, CH_VSYNC_M(chno), 0); else if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC) regmap_update_bits(ldb->regmap, SS_CTRL, CH_VSYNC_M(chno), CH_PVSYNC(chno)); if (adjusted_mode->flags & DRM_MODE_FLAG_NHSYNC) regmap_update_bits(ldb->regmap, SS_CTRL, CH_HSYNC_M(chno), 0); else if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC) regmap_update_bits(ldb->regmap, SS_CTRL, CH_HSYNC_M(chno), CH_PHSYNC(chno)); } static void imx8qm_ldb_bridge_atomic_enable(struct drm_bridge *bridge, struct drm_bridge_state *old_bridge_state) { struct ldb_channel *ldb_ch = bridge->driver_private; struct ldb *ldb = ldb_ch->ldb; struct imx8qm_ldb_channel *imx8qm_ldb_ch = base_to_imx8qm_ldb_channel(ldb_ch); struct imx8qm_ldb *imx8qm_ldb = base_to_imx8qm_ldb(ldb); struct device *dev = imx8qm_ldb->dev; bool is_split = ldb_channel_is_split_link(ldb_ch); int ret; clk_prepare_enable(imx8qm_ldb->clk_pixel); clk_prepare_enable(imx8qm_ldb->clk_bypass); /* both DI0 and DI1 connect with pixel link, so ok to use DI0 only */ if (ldb_ch->chno == 0 || is_split) { ldb->ldb_ctrl &= ~LDB_CH0_MODE_EN_MASK; ldb->ldb_ctrl |= LDB_CH0_MODE_EN_TO_DI0; } if (ldb_ch->chno == 1 || is_split) { ldb->ldb_ctrl &= ~LDB_CH1_MODE_EN_MASK; ldb->ldb_ctrl |= LDB_CH1_MODE_EN_TO_DI0; } if (is_split) { ret = phy_power_on(imx8qm_ldb->channel[0].phy); if (ret) DRM_DEV_ERROR(dev, "failed to power on channel0 PHY: %d\n", ret); ret = phy_power_on(imx8qm_ldb->channel[1].phy); if (ret) DRM_DEV_ERROR(dev, "failed to power on channel1 PHY: %d\n", ret); } else { ret = phy_power_on(imx8qm_ldb_ch->phy); if (ret) DRM_DEV_ERROR(dev, "failed to power on PHY: %d\n", ret); } ldb_bridge_enable_helper(bridge); } static void imx8qm_ldb_bridge_atomic_disable(struct drm_bridge *bridge, struct drm_bridge_state *old_bridge_state) { struct ldb_channel *ldb_ch = bridge->driver_private; struct ldb *ldb = ldb_ch->ldb; struct imx8qm_ldb_channel *imx8qm_ldb_ch = base_to_imx8qm_ldb_channel(ldb_ch); struct imx8qm_ldb *imx8qm_ldb = base_to_imx8qm_ldb(ldb); struct device *dev = imx8qm_ldb->dev; bool is_split = ldb_channel_is_split_link(ldb_ch); int ret; ldb_bridge_disable_helper(bridge); if (is_split) { ret = phy_power_off(imx8qm_ldb->channel[0].phy); if (ret) DRM_DEV_ERROR(dev, "failed to power off channel0 PHY: %d\n", ret); ret = phy_power_off(imx8qm_ldb->channel[1].phy); if (ret) DRM_DEV_ERROR(dev, "failed to power off channel1 PHY: %d\n", ret); } else { ret = phy_power_off(imx8qm_ldb_ch->phy); if (ret) DRM_DEV_ERROR(dev, "failed to power off PHY: %d\n", ret); } clk_disable_unprepare(imx8qm_ldb->clk_bypass); clk_disable_unprepare(imx8qm_ldb->clk_pixel); ret = pm_runtime_put(dev); if (ret < 0) DRM_DEV_ERROR(dev, "failed to put runtime PM: %d\n", ret); } static const u32 imx8qm_ldb_bus_output_fmts[] = { MEDIA_BUS_FMT_RGB666_1X7X3_SPWG, MEDIA_BUS_FMT_RGB888_1X7X4_SPWG, MEDIA_BUS_FMT_RGB888_1X7X4_JEIDA, MEDIA_BUS_FMT_FIXED, }; static bool imx8qm_ldb_bus_output_fmt_supported(u32 fmt) { int i; for (i = 0; i < ARRAY_SIZE(imx8qm_ldb_bus_output_fmts); i++) { if (imx8qm_ldb_bus_output_fmts[i] == fmt) return true; } return false; } static u32 * imx8qm_ldb_bridge_atomic_get_input_bus_fmts(struct drm_bridge *bridge, struct drm_bridge_state *bridge_state, struct drm_crtc_state *crtc_state, struct drm_connector_state *conn_state, u32 output_fmt, unsigned int *num_input_fmts) { struct drm_display_info *di; const struct drm_format_info *finfo; u32 *input_fmts; if (!imx8qm_ldb_bus_output_fmt_supported(output_fmt)) return NULL; *num_input_fmts = 1; input_fmts = kmalloc(sizeof(*input_fmts), GFP_KERNEL); if (!input_fmts) return NULL; switch (output_fmt) { case MEDIA_BUS_FMT_FIXED: di = &conn_state->connector->display_info; /* * Look at the first bus format to determine input format. * Default to MEDIA_BUS_FMT_RGB888_1X36_CPADLO, if no match. */ if (di->num_bus_formats) { finfo = drm_format_info(di->bus_formats[0]); input_fmts[0] = finfo->depth == 18 ? MEDIA_BUS_FMT_RGB666_1X36_CPADLO : MEDIA_BUS_FMT_RGB888_1X36_CPADLO; } else { input_fmts[0] = MEDIA_BUS_FMT_RGB888_1X36_CPADLO; } break; case MEDIA_BUS_FMT_RGB666_1X7X3_SPWG: input_fmts[0] = MEDIA_BUS_FMT_RGB666_1X36_CPADLO; break; case MEDIA_BUS_FMT_RGB888_1X7X4_SPWG: case MEDIA_BUS_FMT_RGB888_1X7X4_JEIDA: input_fmts[0] = MEDIA_BUS_FMT_RGB888_1X36_CPADLO; break; default: kfree(input_fmts); input_fmts = NULL; break; } return input_fmts; } static u32 * imx8qm_ldb_bridge_atomic_get_output_bus_fmts(struct drm_bridge *bridge, struct drm_bridge_state *bridge_state, struct drm_crtc_state *crtc_state, struct drm_connector_state *conn_state, unsigned int *num_output_fmts) { *num_output_fmts = ARRAY_SIZE(imx8qm_ldb_bus_output_fmts); return kmemdup(imx8qm_ldb_bus_output_fmts, sizeof(imx8qm_ldb_bus_output_fmts), GFP_KERNEL); } static enum drm_mode_status imx8qm_ldb_bridge_mode_valid(struct drm_bridge *bridge, const struct drm_display_info *info, const struct drm_display_mode *mode) { struct ldb_channel *ldb_ch = bridge->driver_private; bool is_single = ldb_channel_is_single_link(ldb_ch); if (mode->clock > 300000) return MODE_CLOCK_HIGH; if (mode->clock > 150000 && is_single) return MODE_CLOCK_HIGH; return MODE_OK; } static const struct drm_bridge_funcs imx8qm_ldb_bridge_funcs = { .atomic_duplicate_state = drm_atomic_helper_bridge_duplicate_state, .atomic_destroy_state = drm_atomic_helper_bridge_destroy_state, .atomic_reset = drm_atomic_helper_bridge_reset, .mode_valid = imx8qm_ldb_bridge_mode_valid, .attach = ldb_bridge_attach_helper, .atomic_check = imx8qm_ldb_bridge_atomic_check, .mode_set = imx8qm_ldb_bridge_mode_set, .atomic_enable = imx8qm_ldb_bridge_atomic_enable, .atomic_disable = imx8qm_ldb_bridge_atomic_disable, .atomic_get_input_bus_fmts = imx8qm_ldb_bridge_atomic_get_input_bus_fmts, .atomic_get_output_bus_fmts = imx8qm_ldb_bridge_atomic_get_output_bus_fmts, }; static int imx8qm_ldb_get_phy(struct imx8qm_ldb *imx8qm_ldb) { struct imx8qm_ldb_channel *imx8qm_ldb_ch; struct ldb_channel *ldb_ch; struct device *dev = imx8qm_ldb->dev; int i, ret; for (i = 0; i < MAX_LDB_CHAN_NUM; i++) { imx8qm_ldb_ch = &imx8qm_ldb->channel[i]; ldb_ch = &imx8qm_ldb_ch->base; if (!ldb_ch->is_available) continue; imx8qm_ldb_ch->phy = devm_of_phy_get(dev, ldb_ch->np, "lvds_phy"); if (IS_ERR(imx8qm_ldb_ch->phy)) { ret = PTR_ERR(imx8qm_ldb_ch->phy); if (ret != -EPROBE_DEFER) DRM_DEV_ERROR(dev, "failed to get channel%d PHY: %d\n", i, ret); return ret; } } return 0; } static int imx8qm_ldb_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; struct imx8qm_ldb *imx8qm_ldb; struct imx8qm_ldb_channel *imx8qm_ldb_ch; struct ldb *ldb; struct ldb_channel *ldb_ch; struct device_node *port1, *port2; int pixel_order; int ret, i; imx8qm_ldb = devm_kzalloc(dev, sizeof(*imx8qm_ldb), GFP_KERNEL); if (!imx8qm_ldb) return -ENOMEM; imx8qm_ldb->clk_pixel = devm_clk_get(dev, "pixel"); if (IS_ERR(imx8qm_ldb->clk_pixel)) { ret = PTR_ERR(imx8qm_ldb->clk_pixel); if (ret != -EPROBE_DEFER) DRM_DEV_ERROR(dev, "failed to get pixel clock: %d\n", ret); return ret; } imx8qm_ldb->clk_bypass = devm_clk_get(dev, "bypass"); if (IS_ERR(imx8qm_ldb->clk_bypass)) { ret = PTR_ERR(imx8qm_ldb->clk_bypass); if (ret != -EPROBE_DEFER) DRM_DEV_ERROR(dev, "failed to get bypass clock: %d\n", ret); return ret; } imx8qm_ldb->dev = dev; ldb = &imx8qm_ldb->base; ldb->dev = dev; ldb->ctrl_reg = 0xe0; for (i = 0; i < MAX_LDB_CHAN_NUM; i++) ldb->channel[i] = &imx8qm_ldb->channel[i].base; ret = ldb_init_helper(ldb); if (ret) return ret; if (ldb->available_ch_cnt == 0) { DRM_DEV_DEBUG_DRIVER(dev, "no available channel\n"); return 0; } if (ldb->available_ch_cnt == 2) { port1 = of_graph_get_port_by_id(ldb->channel[0]->np, 1); port2 = of_graph_get_port_by_id(ldb->channel[1]->np, 1); pixel_order = drm_of_lvds_get_dual_link_pixel_order(port1, port2); of_node_put(port1); of_node_put(port2); if (pixel_order != DRM_LVDS_DUAL_LINK_ODD_EVEN_PIXELS) { DRM_DEV_ERROR(dev, "invalid dual link pixel order: %d\n", pixel_order); return -EINVAL; } imx8qm_ldb->active_chno = 0; imx8qm_ldb_ch = &imx8qm_ldb->channel[0]; ldb_ch = &imx8qm_ldb_ch->base; ldb_ch->link_type = pixel_order; } else { for (i = 0; i < MAX_LDB_CHAN_NUM; i++) { imx8qm_ldb_ch = &imx8qm_ldb->channel[i]; ldb_ch = &imx8qm_ldb_ch->base; if (ldb_ch->is_available) { imx8qm_ldb->active_chno = ldb_ch->chno; break; } } } ret = imx8qm_ldb_get_phy(imx8qm_ldb); if (ret) return ret; ret = ldb_find_next_bridge_helper(ldb); if (ret) return ret; platform_set_drvdata(pdev, imx8qm_ldb); pm_runtime_enable(dev); ldb_add_bridge_helper(ldb, &imx8qm_ldb_bridge_funcs); return ret; } static void imx8qm_ldb_remove(struct platform_device *pdev) { struct imx8qm_ldb *imx8qm_ldb = platform_get_drvdata(pdev); struct ldb *ldb = &imx8qm_ldb->base; ldb_remove_bridge_helper(ldb); pm_runtime_disable(&pdev->dev); } static int __maybe_unused imx8qm_ldb_runtime_suspend(struct device *dev) { return 0; } static int __maybe_unused imx8qm_ldb_runtime_resume(struct device *dev) { struct imx8qm_ldb *imx8qm_ldb = dev_get_drvdata(dev); struct ldb *ldb = &imx8qm_ldb->base; /* disable LDB by resetting the control register to POR default */ regmap_write(ldb->regmap, ldb->ctrl_reg, 0); return 0; } static const struct dev_pm_ops imx8qm_ldb_pm_ops = { SET_RUNTIME_PM_OPS(imx8qm_ldb_runtime_suspend, imx8qm_ldb_runtime_resume, NULL) }; static const struct of_device_id imx8qm_ldb_dt_ids[] = { { .compatible = "fsl,imx8qm-ldb" }, { /* sentinel */ } }; MODULE_DEVICE_TABLE(of, imx8qm_ldb_dt_ids); static struct platform_driver imx8qm_ldb_driver = { .probe = imx8qm_ldb_probe, .remove_new = imx8qm_ldb_remove, .driver = { .pm = &imx8qm_ldb_pm_ops, .name = DRIVER_NAME, .of_match_table = imx8qm_ldb_dt_ids, }, }; module_platform_driver(imx8qm_ldb_driver); MODULE_DESCRIPTION("i.MX8QM LVDS Display Bridge(LDB)/Pixel Mapper bridge driver"); MODULE_AUTHOR("Liu Ying <[email protected]>"); MODULE_LICENSE("GPL v2"); MODULE_ALIAS("platform:" DRIVER_NAME);
linux-master
drivers/gpu/drm/bridge/imx/imx8qm-ldb.c
// SPDX-License-Identifier: GPL-2.0+ /* * Copyright 2020,2022 NXP */ #include <linux/firmware/imx/svc/misc.h> #include <linux/media-bus-format.h> #include <linux/module.h> #include <linux/of.h> #include <linux/of_graph.h> #include <linux/platform_device.h> #include <drm/drm_atomic_state_helper.h> #include <drm/drm_bridge.h> #include <drm/drm_print.h> #include <dt-bindings/firmware/imx/rsrc.h> #define DRIVER_NAME "imx8qxp-display-pixel-link" #define PL_MAX_MST_ADDR 3 #define PL_MAX_NEXT_BRIDGES 2 struct imx8qxp_pixel_link { struct drm_bridge bridge; struct drm_bridge *next_bridge; struct device *dev; struct imx_sc_ipc *ipc_handle; u8 stream_id; u8 dc_id; u32 sink_rsc; u32 mst_addr; u8 mst_addr_ctrl; u8 mst_en_ctrl; u8 mst_vld_ctrl; u8 sync_ctrl; }; static void imx8qxp_pixel_link_enable_mst_en(struct imx8qxp_pixel_link *pl) { int ret; ret = imx_sc_misc_set_control(pl->ipc_handle, pl->sink_rsc, pl->mst_en_ctrl, true); if (ret) DRM_DEV_ERROR(pl->dev, "failed to enable DC%u stream%u pixel link mst_en: %d\n", pl->dc_id, pl->stream_id, ret); } static void imx8qxp_pixel_link_enable_mst_vld(struct imx8qxp_pixel_link *pl) { int ret; ret = imx_sc_misc_set_control(pl->ipc_handle, pl->sink_rsc, pl->mst_vld_ctrl, true); if (ret) DRM_DEV_ERROR(pl->dev, "failed to enable DC%u stream%u pixel link mst_vld: %d\n", pl->dc_id, pl->stream_id, ret); } static void imx8qxp_pixel_link_enable_sync(struct imx8qxp_pixel_link *pl) { int ret; ret = imx_sc_misc_set_control(pl->ipc_handle, pl->sink_rsc, pl->sync_ctrl, true); if (ret) DRM_DEV_ERROR(pl->dev, "failed to enable DC%u stream%u pixel link sync: %d\n", pl->dc_id, pl->stream_id, ret); } static int imx8qxp_pixel_link_disable_mst_en(struct imx8qxp_pixel_link *pl) { int ret; ret = imx_sc_misc_set_control(pl->ipc_handle, pl->sink_rsc, pl->mst_en_ctrl, false); if (ret) DRM_DEV_ERROR(pl->dev, "failed to disable DC%u stream%u pixel link mst_en: %d\n", pl->dc_id, pl->stream_id, ret); return ret; } static int imx8qxp_pixel_link_disable_mst_vld(struct imx8qxp_pixel_link *pl) { int ret; ret = imx_sc_misc_set_control(pl->ipc_handle, pl->sink_rsc, pl->mst_vld_ctrl, false); if (ret) DRM_DEV_ERROR(pl->dev, "failed to disable DC%u stream%u pixel link mst_vld: %d\n", pl->dc_id, pl->stream_id, ret); return ret; } static int imx8qxp_pixel_link_disable_sync(struct imx8qxp_pixel_link *pl) { int ret; ret = imx_sc_misc_set_control(pl->ipc_handle, pl->sink_rsc, pl->sync_ctrl, false); if (ret) DRM_DEV_ERROR(pl->dev, "failed to disable DC%u stream%u pixel link sync: %d\n", pl->dc_id, pl->stream_id, ret); return ret; } static void imx8qxp_pixel_link_set_mst_addr(struct imx8qxp_pixel_link *pl) { int ret; ret = imx_sc_misc_set_control(pl->ipc_handle, pl->sink_rsc, pl->mst_addr_ctrl, pl->mst_addr); if (ret) DRM_DEV_ERROR(pl->dev, "failed to set DC%u stream%u pixel link mst addr(%u): %d\n", pl->dc_id, pl->stream_id, pl->mst_addr, ret); } static int imx8qxp_pixel_link_bridge_attach(struct drm_bridge *bridge, enum drm_bridge_attach_flags flags) { struct imx8qxp_pixel_link *pl = bridge->driver_private; if (!(flags & DRM_BRIDGE_ATTACH_NO_CONNECTOR)) { DRM_DEV_ERROR(pl->dev, "do not support creating a drm_connector\n"); return -EINVAL; } if (!bridge->encoder) { DRM_DEV_ERROR(pl->dev, "missing encoder\n"); return -ENODEV; } return drm_bridge_attach(bridge->encoder, pl->next_bridge, bridge, DRM_BRIDGE_ATTACH_NO_CONNECTOR); } static void imx8qxp_pixel_link_bridge_mode_set(struct drm_bridge *bridge, const struct drm_display_mode *mode, const struct drm_display_mode *adjusted_mode) { struct imx8qxp_pixel_link *pl = bridge->driver_private; imx8qxp_pixel_link_set_mst_addr(pl); } static void imx8qxp_pixel_link_bridge_atomic_enable(struct drm_bridge *bridge, struct drm_bridge_state *old_bridge_state) { struct imx8qxp_pixel_link *pl = bridge->driver_private; imx8qxp_pixel_link_enable_mst_en(pl); imx8qxp_pixel_link_enable_mst_vld(pl); imx8qxp_pixel_link_enable_sync(pl); } static void imx8qxp_pixel_link_bridge_atomic_disable(struct drm_bridge *bridge, struct drm_bridge_state *old_bridge_state) { struct imx8qxp_pixel_link *pl = bridge->driver_private; imx8qxp_pixel_link_disable_mst_en(pl); imx8qxp_pixel_link_disable_mst_vld(pl); imx8qxp_pixel_link_disable_sync(pl); } static const u32 imx8qxp_pixel_link_bus_output_fmts[] = { MEDIA_BUS_FMT_RGB888_1X36_CPADLO, MEDIA_BUS_FMT_RGB666_1X36_CPADLO, }; static bool imx8qxp_pixel_link_bus_output_fmt_supported(u32 fmt) { int i; for (i = 0; i < ARRAY_SIZE(imx8qxp_pixel_link_bus_output_fmts); i++) { if (imx8qxp_pixel_link_bus_output_fmts[i] == fmt) return true; } return false; } static u32 * imx8qxp_pixel_link_bridge_atomic_get_input_bus_fmts(struct drm_bridge *bridge, struct drm_bridge_state *bridge_state, struct drm_crtc_state *crtc_state, struct drm_connector_state *conn_state, u32 output_fmt, unsigned int *num_input_fmts) { u32 *input_fmts; if (!imx8qxp_pixel_link_bus_output_fmt_supported(output_fmt)) return NULL; *num_input_fmts = 1; input_fmts = kmalloc(sizeof(*input_fmts), GFP_KERNEL); if (!input_fmts) return NULL; input_fmts[0] = output_fmt; return input_fmts; } static u32 * imx8qxp_pixel_link_bridge_atomic_get_output_bus_fmts(struct drm_bridge *bridge, struct drm_bridge_state *bridge_state, struct drm_crtc_state *crtc_state, struct drm_connector_state *conn_state, unsigned int *num_output_fmts) { *num_output_fmts = ARRAY_SIZE(imx8qxp_pixel_link_bus_output_fmts); return kmemdup(imx8qxp_pixel_link_bus_output_fmts, sizeof(imx8qxp_pixel_link_bus_output_fmts), GFP_KERNEL); } static const struct drm_bridge_funcs imx8qxp_pixel_link_bridge_funcs = { .atomic_duplicate_state = drm_atomic_helper_bridge_duplicate_state, .atomic_destroy_state = drm_atomic_helper_bridge_destroy_state, .atomic_reset = drm_atomic_helper_bridge_reset, .attach = imx8qxp_pixel_link_bridge_attach, .mode_set = imx8qxp_pixel_link_bridge_mode_set, .atomic_enable = imx8qxp_pixel_link_bridge_atomic_enable, .atomic_disable = imx8qxp_pixel_link_bridge_atomic_disable, .atomic_get_input_bus_fmts = imx8qxp_pixel_link_bridge_atomic_get_input_bus_fmts, .atomic_get_output_bus_fmts = imx8qxp_pixel_link_bridge_atomic_get_output_bus_fmts, }; static int imx8qxp_pixel_link_disable_all_controls(struct imx8qxp_pixel_link *pl) { int ret; ret = imx8qxp_pixel_link_disable_mst_en(pl); if (ret) return ret; ret = imx8qxp_pixel_link_disable_mst_vld(pl); if (ret) return ret; return imx8qxp_pixel_link_disable_sync(pl); } static struct drm_bridge * imx8qxp_pixel_link_find_next_bridge(struct imx8qxp_pixel_link *pl) { struct device_node *np = pl->dev->of_node; struct device_node *port, *remote; struct drm_bridge *next_bridge[PL_MAX_NEXT_BRIDGES]; u32 port_id; bool found_port = false; int reg, ep_cnt = 0; /* select the first next bridge by default */ int bridge_sel = 0; for (port_id = 1; port_id <= PL_MAX_MST_ADDR + 1; port_id++) { port = of_graph_get_port_by_id(np, port_id); if (!port) continue; if (of_device_is_available(port)) { found_port = true; of_node_put(port); break; } of_node_put(port); } if (!found_port) { DRM_DEV_ERROR(pl->dev, "no available output port\n"); return ERR_PTR(-ENODEV); } for (reg = 0; reg < PL_MAX_NEXT_BRIDGES; reg++) { remote = of_graph_get_remote_node(np, port_id, reg); if (!remote) continue; if (!of_device_is_available(remote->parent)) { DRM_DEV_DEBUG(pl->dev, "port%u endpoint%u remote parent is not available\n", port_id, reg); of_node_put(remote); continue; } next_bridge[ep_cnt] = of_drm_find_bridge(remote); if (!next_bridge[ep_cnt]) { of_node_put(remote); return ERR_PTR(-EPROBE_DEFER); } /* specially select the next bridge with companion PXL2DPI */ if (of_property_present(remote, "fsl,companion-pxl2dpi")) bridge_sel = ep_cnt; ep_cnt++; of_node_put(remote); } pl->mst_addr = port_id - 1; return next_bridge[bridge_sel]; } static int imx8qxp_pixel_link_bridge_probe(struct platform_device *pdev) { struct imx8qxp_pixel_link *pl; struct device *dev = &pdev->dev; struct device_node *np = dev->of_node; int ret; pl = devm_kzalloc(dev, sizeof(*pl), GFP_KERNEL); if (!pl) return -ENOMEM; ret = imx_scu_get_handle(&pl->ipc_handle); if (ret) { if (ret != -EPROBE_DEFER) DRM_DEV_ERROR(dev, "failed to get SCU ipc handle: %d\n", ret); return ret; } ret = of_property_read_u8(np, "fsl,dc-id", &pl->dc_id); if (ret) { DRM_DEV_ERROR(dev, "failed to get DC index: %d\n", ret); return ret; } ret = of_property_read_u8(np, "fsl,dc-stream-id", &pl->stream_id); if (ret) { DRM_DEV_ERROR(dev, "failed to get DC stream index: %d\n", ret); return ret; } pl->dev = dev; pl->sink_rsc = pl->dc_id ? IMX_SC_R_DC_1 : IMX_SC_R_DC_0; if (pl->stream_id == 0) { pl->mst_addr_ctrl = IMX_SC_C_PXL_LINK_MST1_ADDR; pl->mst_en_ctrl = IMX_SC_C_PXL_LINK_MST1_ENB; pl->mst_vld_ctrl = IMX_SC_C_PXL_LINK_MST1_VLD; pl->sync_ctrl = IMX_SC_C_SYNC_CTRL0; } else { pl->mst_addr_ctrl = IMX_SC_C_PXL_LINK_MST2_ADDR; pl->mst_en_ctrl = IMX_SC_C_PXL_LINK_MST2_ENB; pl->mst_vld_ctrl = IMX_SC_C_PXL_LINK_MST2_VLD; pl->sync_ctrl = IMX_SC_C_SYNC_CTRL1; } /* disable all controls to POR default */ ret = imx8qxp_pixel_link_disable_all_controls(pl); if (ret) return ret; pl->next_bridge = imx8qxp_pixel_link_find_next_bridge(pl); if (IS_ERR(pl->next_bridge)) { ret = PTR_ERR(pl->next_bridge); if (ret != -EPROBE_DEFER) DRM_DEV_ERROR(dev, "failed to find next bridge: %d\n", ret); return ret; } platform_set_drvdata(pdev, pl); pl->bridge.driver_private = pl; pl->bridge.funcs = &imx8qxp_pixel_link_bridge_funcs; pl->bridge.of_node = np; drm_bridge_add(&pl->bridge); return ret; } static void imx8qxp_pixel_link_bridge_remove(struct platform_device *pdev) { struct imx8qxp_pixel_link *pl = platform_get_drvdata(pdev); drm_bridge_remove(&pl->bridge); } static const struct of_device_id imx8qxp_pixel_link_dt_ids[] = { { .compatible = "fsl,imx8qm-dc-pixel-link", }, { .compatible = "fsl,imx8qxp-dc-pixel-link", }, { /* sentinel */ } }; MODULE_DEVICE_TABLE(of, imx8qxp_pixel_link_dt_ids); static struct platform_driver imx8qxp_pixel_link_bridge_driver = { .probe = imx8qxp_pixel_link_bridge_probe, .remove_new = imx8qxp_pixel_link_bridge_remove, .driver = { .of_match_table = imx8qxp_pixel_link_dt_ids, .name = DRIVER_NAME, }, }; module_platform_driver(imx8qxp_pixel_link_bridge_driver); MODULE_DESCRIPTION("i.MX8QXP/QM display pixel link bridge driver"); MODULE_AUTHOR("Liu Ying <[email protected]>"); MODULE_LICENSE("GPL v2"); MODULE_ALIAS("platform:" DRIVER_NAME);
linux-master
drivers/gpu/drm/bridge/imx/imx8qxp-pixel-link.c
// SPDX-License-Identifier: GPL-2.0+ /* * Copyright 2020 NXP */ #include <linux/clk.h> #include <linux/media-bus-format.h> #include <linux/mfd/syscon.h> #include <linux/module.h> #include <linux/of.h> #include <linux/of_device.h> #include <linux/of_graph.h> #include <linux/phy/phy.h> #include <linux/platform_device.h> #include <linux/pm_runtime.h> #include <linux/regmap.h> #include <drm/drm_atomic_state_helper.h> #include <drm/drm_bridge.h> #include <drm/drm_connector.h> #include <drm/drm_fourcc.h> #include <drm/drm_of.h> #include <drm/drm_print.h> #include "imx-ldb-helper.h" #define LDB_CH_SEL BIT(28) #define SS_CTRL 0x20 #define CH_HSYNC_M(id) BIT(0 + ((id) * 2)) #define CH_VSYNC_M(id) BIT(1 + ((id) * 2)) #define CH_PHSYNC(id) BIT(0 + ((id) * 2)) #define CH_PVSYNC(id) BIT(1 + ((id) * 2)) #define DRIVER_NAME "imx8qxp-ldb" struct imx8qxp_ldb_channel { struct ldb_channel base; struct phy *phy; unsigned int di_id; }; struct imx8qxp_ldb { struct ldb base; struct device *dev; struct imx8qxp_ldb_channel channel[MAX_LDB_CHAN_NUM]; struct clk *clk_pixel; struct clk *clk_bypass; struct drm_bridge *companion; int active_chno; }; static inline struct imx8qxp_ldb_channel * base_to_imx8qxp_ldb_channel(struct ldb_channel *base) { return container_of(base, struct imx8qxp_ldb_channel, base); } static inline struct imx8qxp_ldb *base_to_imx8qxp_ldb(struct ldb *base) { return container_of(base, struct imx8qxp_ldb, base); } static void imx8qxp_ldb_set_phy_cfg(struct imx8qxp_ldb *imx8qxp_ldb, unsigned long di_clk, bool is_split, struct phy_configure_opts_lvds *phy_cfg) { phy_cfg->bits_per_lane_and_dclk_cycle = 7; phy_cfg->lanes = 4; if (is_split) { phy_cfg->differential_clk_rate = di_clk / 2; phy_cfg->is_slave = !imx8qxp_ldb->companion; } else { phy_cfg->differential_clk_rate = di_clk; phy_cfg->is_slave = false; } } static int imx8qxp_ldb_bridge_atomic_check(struct drm_bridge *bridge, struct drm_bridge_state *bridge_state, struct drm_crtc_state *crtc_state, struct drm_connector_state *conn_state) { struct ldb_channel *ldb_ch = bridge->driver_private; struct ldb *ldb = ldb_ch->ldb; struct imx8qxp_ldb_channel *imx8qxp_ldb_ch = base_to_imx8qxp_ldb_channel(ldb_ch); struct imx8qxp_ldb *imx8qxp_ldb = base_to_imx8qxp_ldb(ldb); struct drm_bridge *companion = imx8qxp_ldb->companion; struct drm_display_mode *adj = &crtc_state->adjusted_mode; unsigned long di_clk = adj->clock * 1000; bool is_split = ldb_channel_is_split_link(ldb_ch); union phy_configure_opts opts = { }; struct phy_configure_opts_lvds *phy_cfg = &opts.lvds; int ret; ret = ldb_bridge_atomic_check_helper(bridge, bridge_state, crtc_state, conn_state); if (ret) return ret; imx8qxp_ldb_set_phy_cfg(imx8qxp_ldb, di_clk, is_split, phy_cfg); ret = phy_validate(imx8qxp_ldb_ch->phy, PHY_MODE_LVDS, 0, &opts); if (ret < 0) { DRM_DEV_DEBUG_DRIVER(imx8qxp_ldb->dev, "failed to validate PHY: %d\n", ret); return ret; } if (is_split && companion) { ret = companion->funcs->atomic_check(companion, bridge_state, crtc_state, conn_state); if (ret) return ret; } return ret; } static void imx8qxp_ldb_bridge_mode_set(struct drm_bridge *bridge, const struct drm_display_mode *mode, const struct drm_display_mode *adjusted_mode) { struct ldb_channel *ldb_ch = bridge->driver_private; struct ldb_channel *companion_ldb_ch; struct ldb *ldb = ldb_ch->ldb; struct imx8qxp_ldb_channel *imx8qxp_ldb_ch = base_to_imx8qxp_ldb_channel(ldb_ch); struct imx8qxp_ldb *imx8qxp_ldb = base_to_imx8qxp_ldb(ldb); struct drm_bridge *companion = imx8qxp_ldb->companion; struct device *dev = imx8qxp_ldb->dev; unsigned long di_clk = adjusted_mode->clock * 1000; bool is_split = ldb_channel_is_split_link(ldb_ch); union phy_configure_opts opts = { }; struct phy_configure_opts_lvds *phy_cfg = &opts.lvds; u32 chno = ldb_ch->chno; int ret; ret = pm_runtime_get_sync(dev); if (ret < 0) DRM_DEV_ERROR(dev, "failed to get runtime PM sync: %d\n", ret); ret = phy_init(imx8qxp_ldb_ch->phy); if (ret < 0) DRM_DEV_ERROR(dev, "failed to initialize PHY: %d\n", ret); ret = phy_set_mode(imx8qxp_ldb_ch->phy, PHY_MODE_LVDS); if (ret < 0) DRM_DEV_ERROR(dev, "failed to set PHY mode: %d\n", ret); if (is_split && companion) { companion_ldb_ch = bridge_to_ldb_ch(companion); companion_ldb_ch->in_bus_format = ldb_ch->in_bus_format; companion_ldb_ch->out_bus_format = ldb_ch->out_bus_format; } clk_set_rate(imx8qxp_ldb->clk_bypass, di_clk); clk_set_rate(imx8qxp_ldb->clk_pixel, di_clk); imx8qxp_ldb_set_phy_cfg(imx8qxp_ldb, di_clk, is_split, phy_cfg); ret = phy_configure(imx8qxp_ldb_ch->phy, &opts); if (ret < 0) DRM_DEV_ERROR(dev, "failed to configure PHY: %d\n", ret); if (chno == 0) ldb->ldb_ctrl &= ~LDB_CH_SEL; else ldb->ldb_ctrl |= LDB_CH_SEL; /* input VSYNC signal from pixel link is active low */ if (imx8qxp_ldb_ch->di_id == 0) ldb->ldb_ctrl |= LDB_DI0_VS_POL_ACT_LOW; else ldb->ldb_ctrl |= LDB_DI1_VS_POL_ACT_LOW; /* * For split mode, settle input VSYNC signal polarity and * channel selection down early. */ if (is_split) regmap_write(ldb->regmap, ldb->ctrl_reg, ldb->ldb_ctrl); ldb_bridge_mode_set_helper(bridge, mode, adjusted_mode); if (adjusted_mode->flags & DRM_MODE_FLAG_NVSYNC) regmap_update_bits(ldb->regmap, SS_CTRL, CH_VSYNC_M(chno), 0); else if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC) regmap_update_bits(ldb->regmap, SS_CTRL, CH_VSYNC_M(chno), CH_PVSYNC(chno)); if (adjusted_mode->flags & DRM_MODE_FLAG_NHSYNC) regmap_update_bits(ldb->regmap, SS_CTRL, CH_HSYNC_M(chno), 0); else if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC) regmap_update_bits(ldb->regmap, SS_CTRL, CH_HSYNC_M(chno), CH_PHSYNC(chno)); if (is_split && companion) companion->funcs->mode_set(companion, mode, adjusted_mode); } static void imx8qxp_ldb_bridge_atomic_pre_enable(struct drm_bridge *bridge, struct drm_bridge_state *old_bridge_state) { struct ldb_channel *ldb_ch = bridge->driver_private; struct ldb *ldb = ldb_ch->ldb; struct imx8qxp_ldb *imx8qxp_ldb = base_to_imx8qxp_ldb(ldb); struct drm_bridge *companion = imx8qxp_ldb->companion; bool is_split = ldb_channel_is_split_link(ldb_ch); clk_prepare_enable(imx8qxp_ldb->clk_pixel); clk_prepare_enable(imx8qxp_ldb->clk_bypass); if (is_split && companion) companion->funcs->atomic_pre_enable(companion, old_bridge_state); } static void imx8qxp_ldb_bridge_atomic_enable(struct drm_bridge *bridge, struct drm_bridge_state *old_bridge_state) { struct ldb_channel *ldb_ch = bridge->driver_private; struct ldb *ldb = ldb_ch->ldb; struct imx8qxp_ldb_channel *imx8qxp_ldb_ch = base_to_imx8qxp_ldb_channel(ldb_ch); struct imx8qxp_ldb *imx8qxp_ldb = base_to_imx8qxp_ldb(ldb); struct drm_bridge *companion = imx8qxp_ldb->companion; struct device *dev = imx8qxp_ldb->dev; bool is_split = ldb_channel_is_split_link(ldb_ch); int ret; if (ldb_ch->chno == 0 || is_split) { ldb->ldb_ctrl &= ~LDB_CH0_MODE_EN_MASK; ldb->ldb_ctrl |= imx8qxp_ldb_ch->di_id == 0 ? LDB_CH0_MODE_EN_TO_DI0 : LDB_CH0_MODE_EN_TO_DI1; } if (ldb_ch->chno == 1 || is_split) { ldb->ldb_ctrl &= ~LDB_CH1_MODE_EN_MASK; ldb->ldb_ctrl |= imx8qxp_ldb_ch->di_id == 0 ? LDB_CH1_MODE_EN_TO_DI0 : LDB_CH1_MODE_EN_TO_DI1; } ldb_bridge_enable_helper(bridge); ret = phy_power_on(imx8qxp_ldb_ch->phy); if (ret) DRM_DEV_ERROR(dev, "failed to power on PHY: %d\n", ret); if (is_split && companion) companion->funcs->atomic_enable(companion, old_bridge_state); } static void imx8qxp_ldb_bridge_atomic_disable(struct drm_bridge *bridge, struct drm_bridge_state *old_bridge_state) { struct ldb_channel *ldb_ch = bridge->driver_private; struct ldb *ldb = ldb_ch->ldb; struct imx8qxp_ldb_channel *imx8qxp_ldb_ch = base_to_imx8qxp_ldb_channel(ldb_ch); struct imx8qxp_ldb *imx8qxp_ldb = base_to_imx8qxp_ldb(ldb); struct drm_bridge *companion = imx8qxp_ldb->companion; struct device *dev = imx8qxp_ldb->dev; bool is_split = ldb_channel_is_split_link(ldb_ch); int ret; ret = phy_power_off(imx8qxp_ldb_ch->phy); if (ret) DRM_DEV_ERROR(dev, "failed to power off PHY: %d\n", ret); ret = phy_exit(imx8qxp_ldb_ch->phy); if (ret < 0) DRM_DEV_ERROR(dev, "failed to teardown PHY: %d\n", ret); ldb_bridge_disable_helper(bridge); clk_disable_unprepare(imx8qxp_ldb->clk_bypass); clk_disable_unprepare(imx8qxp_ldb->clk_pixel); if (is_split && companion) companion->funcs->atomic_disable(companion, old_bridge_state); ret = pm_runtime_put(dev); if (ret < 0) DRM_DEV_ERROR(dev, "failed to put runtime PM: %d\n", ret); } static const u32 imx8qxp_ldb_bus_output_fmts[] = { MEDIA_BUS_FMT_RGB666_1X7X3_SPWG, MEDIA_BUS_FMT_RGB888_1X7X4_SPWG, MEDIA_BUS_FMT_RGB888_1X7X4_JEIDA, MEDIA_BUS_FMT_FIXED, }; static bool imx8qxp_ldb_bus_output_fmt_supported(u32 fmt) { int i; for (i = 0; i < ARRAY_SIZE(imx8qxp_ldb_bus_output_fmts); i++) { if (imx8qxp_ldb_bus_output_fmts[i] == fmt) return true; } return false; } static u32 * imx8qxp_ldb_bridge_atomic_get_input_bus_fmts(struct drm_bridge *bridge, struct drm_bridge_state *bridge_state, struct drm_crtc_state *crtc_state, struct drm_connector_state *conn_state, u32 output_fmt, unsigned int *num_input_fmts) { struct drm_display_info *di; const struct drm_format_info *finfo; u32 *input_fmts; if (!imx8qxp_ldb_bus_output_fmt_supported(output_fmt)) return NULL; *num_input_fmts = 1; input_fmts = kmalloc(sizeof(*input_fmts), GFP_KERNEL); if (!input_fmts) return NULL; switch (output_fmt) { case MEDIA_BUS_FMT_FIXED: di = &conn_state->connector->display_info; /* * Look at the first bus format to determine input format. * Default to MEDIA_BUS_FMT_RGB888_1X24, if no match. */ if (di->num_bus_formats) { finfo = drm_format_info(di->bus_formats[0]); input_fmts[0] = finfo->depth == 18 ? MEDIA_BUS_FMT_RGB666_1X24_CPADHI : MEDIA_BUS_FMT_RGB888_1X24; } else { input_fmts[0] = MEDIA_BUS_FMT_RGB888_1X24; } break; case MEDIA_BUS_FMT_RGB666_1X7X3_SPWG: input_fmts[0] = MEDIA_BUS_FMT_RGB666_1X24_CPADHI; break; case MEDIA_BUS_FMT_RGB888_1X7X4_SPWG: case MEDIA_BUS_FMT_RGB888_1X7X4_JEIDA: input_fmts[0] = MEDIA_BUS_FMT_RGB888_1X24; break; default: kfree(input_fmts); input_fmts = NULL; break; } return input_fmts; } static u32 * imx8qxp_ldb_bridge_atomic_get_output_bus_fmts(struct drm_bridge *bridge, struct drm_bridge_state *bridge_state, struct drm_crtc_state *crtc_state, struct drm_connector_state *conn_state, unsigned int *num_output_fmts) { *num_output_fmts = ARRAY_SIZE(imx8qxp_ldb_bus_output_fmts); return kmemdup(imx8qxp_ldb_bus_output_fmts, sizeof(imx8qxp_ldb_bus_output_fmts), GFP_KERNEL); } static enum drm_mode_status imx8qxp_ldb_bridge_mode_valid(struct drm_bridge *bridge, const struct drm_display_info *info, const struct drm_display_mode *mode) { struct ldb_channel *ldb_ch = bridge->driver_private; bool is_single = ldb_channel_is_single_link(ldb_ch); if (mode->clock > 170000) return MODE_CLOCK_HIGH; if (mode->clock > 150000 && is_single) return MODE_CLOCK_HIGH; return MODE_OK; } static const struct drm_bridge_funcs imx8qxp_ldb_bridge_funcs = { .atomic_duplicate_state = drm_atomic_helper_bridge_duplicate_state, .atomic_destroy_state = drm_atomic_helper_bridge_destroy_state, .atomic_reset = drm_atomic_helper_bridge_reset, .mode_valid = imx8qxp_ldb_bridge_mode_valid, .attach = ldb_bridge_attach_helper, .atomic_check = imx8qxp_ldb_bridge_atomic_check, .mode_set = imx8qxp_ldb_bridge_mode_set, .atomic_pre_enable = imx8qxp_ldb_bridge_atomic_pre_enable, .atomic_enable = imx8qxp_ldb_bridge_atomic_enable, .atomic_disable = imx8qxp_ldb_bridge_atomic_disable, .atomic_get_input_bus_fmts = imx8qxp_ldb_bridge_atomic_get_input_bus_fmts, .atomic_get_output_bus_fmts = imx8qxp_ldb_bridge_atomic_get_output_bus_fmts, }; static int imx8qxp_ldb_set_di_id(struct imx8qxp_ldb *imx8qxp_ldb) { struct imx8qxp_ldb_channel *imx8qxp_ldb_ch = &imx8qxp_ldb->channel[imx8qxp_ldb->active_chno]; struct ldb_channel *ldb_ch = &imx8qxp_ldb_ch->base; struct device_node *ep, *remote; struct device *dev = imx8qxp_ldb->dev; struct of_endpoint endpoint; int ret; ep = of_graph_get_endpoint_by_regs(ldb_ch->np, 0, -1); if (!ep) { DRM_DEV_ERROR(dev, "failed to get port0 endpoint\n"); return -EINVAL; } remote = of_graph_get_remote_endpoint(ep); of_node_put(ep); if (!remote) { DRM_DEV_ERROR(dev, "failed to get port0 remote endpoint\n"); return -EINVAL; } ret = of_graph_parse_endpoint(remote, &endpoint); of_node_put(remote); if (ret) { DRM_DEV_ERROR(dev, "failed to parse port0 remote endpoint: %d\n", ret); return ret; } imx8qxp_ldb_ch->di_id = endpoint.id; return 0; } static int imx8qxp_ldb_check_chno_and_dual_link(struct ldb_channel *ldb_ch, int link) { if ((link == DRM_LVDS_DUAL_LINK_ODD_EVEN_PIXELS && ldb_ch->chno != 0) || (link == DRM_LVDS_DUAL_LINK_EVEN_ODD_PIXELS && ldb_ch->chno != 1)) return -EINVAL; return 0; } static int imx8qxp_ldb_parse_dt_companion(struct imx8qxp_ldb *imx8qxp_ldb) { struct imx8qxp_ldb_channel *imx8qxp_ldb_ch = &imx8qxp_ldb->channel[imx8qxp_ldb->active_chno]; struct ldb_channel *ldb_ch = &imx8qxp_ldb_ch->base; struct ldb_channel *companion_ldb_ch; struct device_node *companion; struct device_node *child; struct device_node *companion_port = NULL; struct device_node *port1, *port2; struct device *dev = imx8qxp_ldb->dev; const struct of_device_id *match; u32 i; int dual_link; int ret; /* Locate the companion LDB for dual-link operation, if any. */ companion = of_parse_phandle(dev->of_node, "fsl,companion-ldb", 0); if (!companion) return 0; if (!of_device_is_available(companion)) { DRM_DEV_ERROR(dev, "companion LDB is not available\n"); ret = -ENODEV; goto out; } /* * Sanity check: the companion bridge must have the same compatible * string. */ match = of_match_device(dev->driver->of_match_table, dev); if (!of_device_is_compatible(companion, match->compatible)) { DRM_DEV_ERROR(dev, "companion LDB is incompatible\n"); ret = -ENXIO; goto out; } for_each_available_child_of_node(companion, child) { ret = of_property_read_u32(child, "reg", &i); if (ret || i > MAX_LDB_CHAN_NUM - 1) { DRM_DEV_ERROR(dev, "invalid channel node address: %u\n", i); ret = -EINVAL; of_node_put(child); goto out; } /* * Channel numbers have to be different, because channel0 * transmits odd pixels and channel1 transmits even pixels. */ if (i == (ldb_ch->chno ^ 0x1)) { companion_port = child; break; } } if (!companion_port) { DRM_DEV_ERROR(dev, "failed to find companion LDB channel port\n"); ret = -EINVAL; goto out; } /* * We need to work out if the sink is expecting us to function in * dual-link mode. We do this by looking at the DT port nodes we are * connected to. If they are marked as expecting odd pixels and * even pixels than we need to enable LDB split mode. */ port1 = of_graph_get_port_by_id(ldb_ch->np, 1); port2 = of_graph_get_port_by_id(companion_port, 1); dual_link = drm_of_lvds_get_dual_link_pixel_order(port1, port2); of_node_put(port1); of_node_put(port2); switch (dual_link) { case DRM_LVDS_DUAL_LINK_ODD_EVEN_PIXELS: ldb_ch->link_type = LDB_CH_DUAL_LINK_ODD_EVEN_PIXELS; break; case DRM_LVDS_DUAL_LINK_EVEN_ODD_PIXELS: ldb_ch->link_type = LDB_CH_DUAL_LINK_EVEN_ODD_PIXELS; break; default: ret = dual_link; DRM_DEV_ERROR(dev, "failed to get dual link pixel order: %d\n", ret); goto out; } ret = imx8qxp_ldb_check_chno_and_dual_link(ldb_ch, dual_link); if (ret < 0) { DRM_DEV_ERROR(dev, "unmatched channel number(%u) vs dual link(%d)\n", ldb_ch->chno, dual_link); goto out; } imx8qxp_ldb->companion = of_drm_find_bridge(companion_port); if (!imx8qxp_ldb->companion) { ret = -EPROBE_DEFER; DRM_DEV_DEBUG_DRIVER(dev, "failed to find bridge for companion bridge: %d\n", ret); goto out; } DRM_DEV_DEBUG_DRIVER(dev, "dual-link configuration detected (companion bridge %pOF)\n", companion); companion_ldb_ch = bridge_to_ldb_ch(imx8qxp_ldb->companion); companion_ldb_ch->link_type = ldb_ch->link_type; out: of_node_put(companion_port); of_node_put(companion); return ret; } static int imx8qxp_ldb_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; struct imx8qxp_ldb *imx8qxp_ldb; struct imx8qxp_ldb_channel *imx8qxp_ldb_ch; struct ldb *ldb; struct ldb_channel *ldb_ch; int ret, i; imx8qxp_ldb = devm_kzalloc(dev, sizeof(*imx8qxp_ldb), GFP_KERNEL); if (!imx8qxp_ldb) return -ENOMEM; imx8qxp_ldb->clk_pixel = devm_clk_get(dev, "pixel"); if (IS_ERR(imx8qxp_ldb->clk_pixel)) { ret = PTR_ERR(imx8qxp_ldb->clk_pixel); if (ret != -EPROBE_DEFER) DRM_DEV_ERROR(dev, "failed to get pixel clock: %d\n", ret); return ret; } imx8qxp_ldb->clk_bypass = devm_clk_get(dev, "bypass"); if (IS_ERR(imx8qxp_ldb->clk_bypass)) { ret = PTR_ERR(imx8qxp_ldb->clk_bypass); if (ret != -EPROBE_DEFER) DRM_DEV_ERROR(dev, "failed to get bypass clock: %d\n", ret); return ret; } imx8qxp_ldb->dev = dev; ldb = &imx8qxp_ldb->base; ldb->dev = dev; ldb->ctrl_reg = 0xe0; for (i = 0; i < MAX_LDB_CHAN_NUM; i++) ldb->channel[i] = &imx8qxp_ldb->channel[i].base; ret = ldb_init_helper(ldb); if (ret) return ret; if (ldb->available_ch_cnt == 0) { DRM_DEV_DEBUG_DRIVER(dev, "no available channel\n"); return 0; } else if (ldb->available_ch_cnt > 1) { DRM_DEV_ERROR(dev, "invalid available channel number(%u)\n", ldb->available_ch_cnt); return -EINVAL; } for (i = 0; i < MAX_LDB_CHAN_NUM; i++) { imx8qxp_ldb_ch = &imx8qxp_ldb->channel[i]; ldb_ch = &imx8qxp_ldb_ch->base; if (ldb_ch->is_available) { imx8qxp_ldb->active_chno = ldb_ch->chno; break; } } imx8qxp_ldb_ch->phy = devm_of_phy_get(dev, ldb_ch->np, "lvds_phy"); if (IS_ERR(imx8qxp_ldb_ch->phy)) { ret = PTR_ERR(imx8qxp_ldb_ch->phy); if (ret != -EPROBE_DEFER) DRM_DEV_ERROR(dev, "failed to get channel%d PHY: %d\n", imx8qxp_ldb->active_chno, ret); return ret; } ret = ldb_find_next_bridge_helper(ldb); if (ret) return ret; ret = imx8qxp_ldb_set_di_id(imx8qxp_ldb); if (ret) return ret; ret = imx8qxp_ldb_parse_dt_companion(imx8qxp_ldb); if (ret) return ret; platform_set_drvdata(pdev, imx8qxp_ldb); pm_runtime_enable(dev); ldb_add_bridge_helper(ldb, &imx8qxp_ldb_bridge_funcs); return ret; } static void imx8qxp_ldb_remove(struct platform_device *pdev) { struct imx8qxp_ldb *imx8qxp_ldb = platform_get_drvdata(pdev); struct ldb *ldb = &imx8qxp_ldb->base; ldb_remove_bridge_helper(ldb); pm_runtime_disable(&pdev->dev); } static int __maybe_unused imx8qxp_ldb_runtime_suspend(struct device *dev) { return 0; } static int __maybe_unused imx8qxp_ldb_runtime_resume(struct device *dev) { struct imx8qxp_ldb *imx8qxp_ldb = dev_get_drvdata(dev); struct ldb *ldb = &imx8qxp_ldb->base; /* disable LDB by resetting the control register to POR default */ regmap_write(ldb->regmap, ldb->ctrl_reg, 0); return 0; } static const struct dev_pm_ops imx8qxp_ldb_pm_ops = { SET_RUNTIME_PM_OPS(imx8qxp_ldb_runtime_suspend, imx8qxp_ldb_runtime_resume, NULL) }; static const struct of_device_id imx8qxp_ldb_dt_ids[] = { { .compatible = "fsl,imx8qxp-ldb" }, { /* sentinel */ } }; MODULE_DEVICE_TABLE(of, imx8qxp_ldb_dt_ids); static struct platform_driver imx8qxp_ldb_driver = { .probe = imx8qxp_ldb_probe, .remove_new = imx8qxp_ldb_remove, .driver = { .pm = &imx8qxp_ldb_pm_ops, .name = DRIVER_NAME, .of_match_table = imx8qxp_ldb_dt_ids, }, }; module_platform_driver(imx8qxp_ldb_driver); MODULE_DESCRIPTION("i.MX8QXP LVDS Display Bridge(LDB)/Pixel Mapper bridge driver"); MODULE_AUTHOR("Liu Ying <[email protected]>"); MODULE_LICENSE("GPL v2"); MODULE_ALIAS("platform:" DRIVER_NAME);
linux-master
drivers/gpu/drm/bridge/imx/imx8qxp-ldb.c
// SPDX-License-Identifier: GPL-2.0+ /* * Copyright (C) 2012 Sascha Hauer, Pengutronix * Copyright 2019,2020,2022 NXP */ #include <linux/export.h> #include <linux/media-bus-format.h> #include <linux/mfd/syscon.h> #include <linux/module.h> #include <linux/of.h> #include <linux/regmap.h> #include <drm/drm_bridge.h> #include <drm/drm_of.h> #include <drm/drm_print.h> #include "imx-ldb-helper.h" bool ldb_channel_is_single_link(struct ldb_channel *ldb_ch) { return ldb_ch->link_type == LDB_CH_SINGLE_LINK; } EXPORT_SYMBOL_GPL(ldb_channel_is_single_link); bool ldb_channel_is_split_link(struct ldb_channel *ldb_ch) { return ldb_ch->link_type == LDB_CH_DUAL_LINK_EVEN_ODD_PIXELS || ldb_ch->link_type == LDB_CH_DUAL_LINK_ODD_EVEN_PIXELS; } EXPORT_SYMBOL_GPL(ldb_channel_is_split_link); int ldb_bridge_atomic_check_helper(struct drm_bridge *bridge, struct drm_bridge_state *bridge_state, struct drm_crtc_state *crtc_state, struct drm_connector_state *conn_state) { struct ldb_channel *ldb_ch = bridge->driver_private; ldb_ch->in_bus_format = bridge_state->input_bus_cfg.format; ldb_ch->out_bus_format = bridge_state->output_bus_cfg.format; return 0; } EXPORT_SYMBOL_GPL(ldb_bridge_atomic_check_helper); void ldb_bridge_mode_set_helper(struct drm_bridge *bridge, const struct drm_display_mode *mode, const struct drm_display_mode *adjusted_mode) { struct ldb_channel *ldb_ch = bridge->driver_private; struct ldb *ldb = ldb_ch->ldb; bool is_split = ldb_channel_is_split_link(ldb_ch); if (is_split) ldb->ldb_ctrl |= LDB_SPLIT_MODE_EN; switch (ldb_ch->out_bus_format) { case MEDIA_BUS_FMT_RGB666_1X7X3_SPWG: break; case MEDIA_BUS_FMT_RGB888_1X7X4_SPWG: if (ldb_ch->chno == 0 || is_split) ldb->ldb_ctrl |= LDB_DATA_WIDTH_CH0_24; if (ldb_ch->chno == 1 || is_split) ldb->ldb_ctrl |= LDB_DATA_WIDTH_CH1_24; break; case MEDIA_BUS_FMT_RGB888_1X7X4_JEIDA: if (ldb_ch->chno == 0 || is_split) ldb->ldb_ctrl |= LDB_DATA_WIDTH_CH0_24 | LDB_BIT_MAP_CH0_JEIDA; if (ldb_ch->chno == 1 || is_split) ldb->ldb_ctrl |= LDB_DATA_WIDTH_CH1_24 | LDB_BIT_MAP_CH1_JEIDA; break; } } EXPORT_SYMBOL_GPL(ldb_bridge_mode_set_helper); void ldb_bridge_enable_helper(struct drm_bridge *bridge) { struct ldb_channel *ldb_ch = bridge->driver_private; struct ldb *ldb = ldb_ch->ldb; /* * Platform specific bridge drivers should set ldb_ctrl properly * for the enablement, so just write the ctrl_reg here. */ regmap_write(ldb->regmap, ldb->ctrl_reg, ldb->ldb_ctrl); } EXPORT_SYMBOL_GPL(ldb_bridge_enable_helper); void ldb_bridge_disable_helper(struct drm_bridge *bridge) { struct ldb_channel *ldb_ch = bridge->driver_private; struct ldb *ldb = ldb_ch->ldb; bool is_split = ldb_channel_is_split_link(ldb_ch); if (ldb_ch->chno == 0 || is_split) ldb->ldb_ctrl &= ~LDB_CH0_MODE_EN_MASK; if (ldb_ch->chno == 1 || is_split) ldb->ldb_ctrl &= ~LDB_CH1_MODE_EN_MASK; regmap_write(ldb->regmap, ldb->ctrl_reg, ldb->ldb_ctrl); } EXPORT_SYMBOL_GPL(ldb_bridge_disable_helper); int ldb_bridge_attach_helper(struct drm_bridge *bridge, enum drm_bridge_attach_flags flags) { struct ldb_channel *ldb_ch = bridge->driver_private; struct ldb *ldb = ldb_ch->ldb; if (!(flags & DRM_BRIDGE_ATTACH_NO_CONNECTOR)) { DRM_DEV_ERROR(ldb->dev, "do not support creating a drm_connector\n"); return -EINVAL; } if (!bridge->encoder) { DRM_DEV_ERROR(ldb->dev, "missing encoder\n"); return -ENODEV; } return drm_bridge_attach(bridge->encoder, ldb_ch->next_bridge, bridge, DRM_BRIDGE_ATTACH_NO_CONNECTOR); } EXPORT_SYMBOL_GPL(ldb_bridge_attach_helper); int ldb_init_helper(struct ldb *ldb) { struct device *dev = ldb->dev; struct device_node *np = dev->of_node; struct device_node *child; int ret; u32 i; ldb->regmap = syscon_node_to_regmap(np->parent); if (IS_ERR(ldb->regmap)) { ret = PTR_ERR(ldb->regmap); if (ret != -EPROBE_DEFER) DRM_DEV_ERROR(dev, "failed to get regmap: %d\n", ret); return ret; } for_each_available_child_of_node(np, child) { struct ldb_channel *ldb_ch; ret = of_property_read_u32(child, "reg", &i); if (ret || i > MAX_LDB_CHAN_NUM - 1) { ret = -EINVAL; DRM_DEV_ERROR(dev, "invalid channel node address: %u\n", i); of_node_put(child); return ret; } ldb_ch = ldb->channel[i]; ldb_ch->ldb = ldb; ldb_ch->chno = i; ldb_ch->is_available = true; ldb_ch->np = child; ldb->available_ch_cnt++; } return 0; } EXPORT_SYMBOL_GPL(ldb_init_helper); int ldb_find_next_bridge_helper(struct ldb *ldb) { struct device *dev = ldb->dev; struct ldb_channel *ldb_ch; int ret, i; for (i = 0; i < MAX_LDB_CHAN_NUM; i++) { ldb_ch = ldb->channel[i]; if (!ldb_ch->is_available) continue; ldb_ch->next_bridge = devm_drm_of_get_bridge(dev, ldb_ch->np, 1, 0); if (IS_ERR(ldb_ch->next_bridge)) { ret = PTR_ERR(ldb_ch->next_bridge); if (ret != -EPROBE_DEFER) DRM_DEV_ERROR(dev, "failed to get next bridge: %d\n", ret); return ret; } } return 0; } EXPORT_SYMBOL_GPL(ldb_find_next_bridge_helper); void ldb_add_bridge_helper(struct ldb *ldb, const struct drm_bridge_funcs *bridge_funcs) { struct ldb_channel *ldb_ch; int i; for (i = 0; i < MAX_LDB_CHAN_NUM; i++) { ldb_ch = ldb->channel[i]; if (!ldb_ch->is_available) continue; ldb_ch->bridge.driver_private = ldb_ch; ldb_ch->bridge.funcs = bridge_funcs; ldb_ch->bridge.of_node = ldb_ch->np; drm_bridge_add(&ldb_ch->bridge); } } EXPORT_SYMBOL_GPL(ldb_add_bridge_helper); void ldb_remove_bridge_helper(struct ldb *ldb) { struct ldb_channel *ldb_ch; int i; for (i = 0; i < MAX_LDB_CHAN_NUM; i++) { ldb_ch = ldb->channel[i]; if (!ldb_ch->is_available) continue; drm_bridge_remove(&ldb_ch->bridge); } } EXPORT_SYMBOL_GPL(ldb_remove_bridge_helper); MODULE_DESCRIPTION("i.MX8 LVDS Display Bridge(LDB)/Pixel Mapper bridge helper"); MODULE_AUTHOR("Liu Ying <[email protected]>"); MODULE_LICENSE("GPL");
linux-master
drivers/gpu/drm/bridge/imx/imx-ldb-helper.c
// SPDX-License-Identifier: GPL-2.0-only /* * DesignWare HDMI audio driver * * Written and tested against the Designware HDMI Tx found in iMX6. */ #include <linux/io.h> #include <linux/interrupt.h> #include <linux/module.h> #include <linux/platform_device.h> #include <drm/bridge/dw_hdmi.h> #include <drm/drm_edid.h> #include <sound/asoundef.h> #include <sound/core.h> #include <sound/initval.h> #include <sound/pcm.h> #include <sound/pcm_drm_eld.h> #include <sound/pcm_iec958.h> #include "dw-hdmi-audio.h" #define DRIVER_NAME "dw-hdmi-ahb-audio" /* Provide some bits rather than bit offsets */ enum { HDMI_AHB_DMA_CONF0_SW_FIFO_RST = BIT(7), HDMI_AHB_DMA_CONF0_EN_HLOCK = BIT(3), HDMI_AHB_DMA_START_START = BIT(0), HDMI_AHB_DMA_STOP_STOP = BIT(0), HDMI_IH_MUTE_AHBDMAAUD_STAT0_ERROR = BIT(5), HDMI_IH_MUTE_AHBDMAAUD_STAT0_LOST = BIT(4), HDMI_IH_MUTE_AHBDMAAUD_STAT0_RETRY = BIT(3), HDMI_IH_MUTE_AHBDMAAUD_STAT0_DONE = BIT(2), HDMI_IH_MUTE_AHBDMAAUD_STAT0_BUFFFULL = BIT(1), HDMI_IH_MUTE_AHBDMAAUD_STAT0_BUFFEMPTY = BIT(0), HDMI_IH_MUTE_AHBDMAAUD_STAT0_ALL = HDMI_IH_MUTE_AHBDMAAUD_STAT0_ERROR | HDMI_IH_MUTE_AHBDMAAUD_STAT0_LOST | HDMI_IH_MUTE_AHBDMAAUD_STAT0_RETRY | HDMI_IH_MUTE_AHBDMAAUD_STAT0_DONE | HDMI_IH_MUTE_AHBDMAAUD_STAT0_BUFFFULL | HDMI_IH_MUTE_AHBDMAAUD_STAT0_BUFFEMPTY, HDMI_IH_AHBDMAAUD_STAT0_ERROR = BIT(5), HDMI_IH_AHBDMAAUD_STAT0_LOST = BIT(4), HDMI_IH_AHBDMAAUD_STAT0_RETRY = BIT(3), HDMI_IH_AHBDMAAUD_STAT0_DONE = BIT(2), HDMI_IH_AHBDMAAUD_STAT0_BUFFFULL = BIT(1), HDMI_IH_AHBDMAAUD_STAT0_BUFFEMPTY = BIT(0), HDMI_IH_AHBDMAAUD_STAT0_ALL = HDMI_IH_AHBDMAAUD_STAT0_ERROR | HDMI_IH_AHBDMAAUD_STAT0_LOST | HDMI_IH_AHBDMAAUD_STAT0_RETRY | HDMI_IH_AHBDMAAUD_STAT0_DONE | HDMI_IH_AHBDMAAUD_STAT0_BUFFFULL | HDMI_IH_AHBDMAAUD_STAT0_BUFFEMPTY, HDMI_AHB_DMA_CONF0_INCR16 = 2 << 1, HDMI_AHB_DMA_CONF0_INCR8 = 1 << 1, HDMI_AHB_DMA_CONF0_INCR4 = 0, HDMI_AHB_DMA_CONF0_BURST_MODE = BIT(0), HDMI_AHB_DMA_MASK_DONE = BIT(7), HDMI_REVISION_ID = 0x0001, HDMI_IH_AHBDMAAUD_STAT0 = 0x0109, HDMI_IH_MUTE_AHBDMAAUD_STAT0 = 0x0189, HDMI_AHB_DMA_CONF0 = 0x3600, HDMI_AHB_DMA_START = 0x3601, HDMI_AHB_DMA_STOP = 0x3602, HDMI_AHB_DMA_THRSLD = 0x3603, HDMI_AHB_DMA_STRADDR0 = 0x3604, HDMI_AHB_DMA_STPADDR0 = 0x3608, HDMI_AHB_DMA_MASK = 0x3614, HDMI_AHB_DMA_POL = 0x3615, HDMI_AHB_DMA_CONF1 = 0x3616, HDMI_AHB_DMA_BUFFPOL = 0x361a, }; struct dw_hdmi_channel_conf { u8 conf1; u8 ca; }; /* * The default mapping of ALSA channels to HDMI channels and speaker * allocation bits. Note that we can't do channel remapping here - * channels must be in the same order. * * Mappings for alsa-lib pcm/surround*.conf files: * * Front Sur4.0 Sur4.1 Sur5.0 Sur5.1 Sur7.1 * Channels 2 4 6 6 6 8 * * Our mapping from ALSA channel to CEA686D speaker name and HDMI channel: * * Number of ALSA channels * ALSA Channel 2 3 4 5 6 7 8 * 0 FL:0 = = = = = = * 1 FR:1 = = = = = = * 2 FC:3 RL:4 LFE:2 = = = * 3 RR:5 RL:4 FC:3 = = * 4 RR:5 RL:4 = = * 5 RR:5 = = * 6 RC:6 = * 7 RLC/FRC RLC/FRC */ static struct dw_hdmi_channel_conf default_hdmi_channel_config[7] = { { 0x03, 0x00 }, /* FL,FR */ { 0x0b, 0x02 }, /* FL,FR,FC */ { 0x33, 0x08 }, /* FL,FR,RL,RR */ { 0x37, 0x09 }, /* FL,FR,LFE,RL,RR */ { 0x3f, 0x0b }, /* FL,FR,LFE,FC,RL,RR */ { 0x7f, 0x0f }, /* FL,FR,LFE,FC,RL,RR,RC */ { 0xff, 0x13 }, /* FL,FR,LFE,FC,RL,RR,[FR]RC,[FR]LC */ }; struct snd_dw_hdmi { struct snd_card *card; struct snd_pcm *pcm; spinlock_t lock; struct dw_hdmi_audio_data data; struct snd_pcm_substream *substream; void (*reformat)(struct snd_dw_hdmi *, size_t, size_t); void *buf_src; void *buf_dst; dma_addr_t buf_addr; unsigned buf_offset; unsigned buf_period; unsigned buf_size; unsigned channels; u8 revision; u8 iec_offset; u8 cs[192][8]; }; static void dw_hdmi_writel(u32 val, void __iomem *ptr) { writeb_relaxed(val, ptr); writeb_relaxed(val >> 8, ptr + 1); writeb_relaxed(val >> 16, ptr + 2); writeb_relaxed(val >> 24, ptr + 3); } /* * Convert to hardware format: The userspace buffer contains IEC958 samples, * with the PCUV bits in bits 31..28 and audio samples in bits 27..4. We * need these to be in bits 27..24, with the IEC B bit in bit 28, and audio * samples in 23..0. * * Default preamble in bits 3..0: 8 = block start, 4 = even 2 = odd * * Ideally, we could do with having the data properly formatted in userspace. */ static void dw_hdmi_reformat_iec958(struct snd_dw_hdmi *dw, size_t offset, size_t bytes) { u32 *src = dw->buf_src + offset; u32 *dst = dw->buf_dst + offset; u32 *end = dw->buf_src + offset + bytes; do { u32 b, sample = *src++; b = (sample & 8) << (28 - 3); sample >>= 4; *dst++ = sample | b; } while (src < end); } static u32 parity(u32 sample) { sample ^= sample >> 16; sample ^= sample >> 8; sample ^= sample >> 4; sample ^= sample >> 2; sample ^= sample >> 1; return (sample & 1) << 27; } static void dw_hdmi_reformat_s24(struct snd_dw_hdmi *dw, size_t offset, size_t bytes) { u32 *src = dw->buf_src + offset; u32 *dst = dw->buf_dst + offset; u32 *end = dw->buf_src + offset + bytes; do { unsigned i; u8 *cs; cs = dw->cs[dw->iec_offset++]; if (dw->iec_offset >= 192) dw->iec_offset = 0; i = dw->channels; do { u32 sample = *src++; sample &= ~0xff000000; sample |= *cs++ << 24; sample |= parity(sample & ~0xf8000000); *dst++ = sample; } while (--i); } while (src < end); } static void dw_hdmi_create_cs(struct snd_dw_hdmi *dw, struct snd_pcm_runtime *runtime) { u8 cs[4]; unsigned ch, i, j; snd_pcm_create_iec958_consumer(runtime, cs, sizeof(cs)); memset(dw->cs, 0, sizeof(dw->cs)); for (ch = 0; ch < 8; ch++) { cs[2] &= ~IEC958_AES2_CON_CHANNEL; cs[2] |= (ch + 1) << 4; for (i = 0; i < ARRAY_SIZE(cs); i++) { unsigned c = cs[i]; for (j = 0; j < 8; j++, c >>= 1) dw->cs[i * 8 + j][ch] = (c & 1) << 2; } } dw->cs[0][0] |= BIT(4); } static void dw_hdmi_start_dma(struct snd_dw_hdmi *dw) { void __iomem *base = dw->data.base; unsigned offset = dw->buf_offset; unsigned period = dw->buf_period; u32 start, stop; dw->reformat(dw, offset, period); /* Clear all irqs before enabling irqs and starting DMA */ writeb_relaxed(HDMI_IH_AHBDMAAUD_STAT0_ALL, base + HDMI_IH_AHBDMAAUD_STAT0); start = dw->buf_addr + offset; stop = start + period - 1; /* Setup the hardware start/stop addresses */ dw_hdmi_writel(start, base + HDMI_AHB_DMA_STRADDR0); dw_hdmi_writel(stop, base + HDMI_AHB_DMA_STPADDR0); writeb_relaxed((u8)~HDMI_AHB_DMA_MASK_DONE, base + HDMI_AHB_DMA_MASK); writeb(HDMI_AHB_DMA_START_START, base + HDMI_AHB_DMA_START); offset += period; if (offset >= dw->buf_size) offset = 0; dw->buf_offset = offset; } static void dw_hdmi_stop_dma(struct snd_dw_hdmi *dw) { /* Disable interrupts before disabling DMA */ writeb_relaxed(~0, dw->data.base + HDMI_AHB_DMA_MASK); writeb_relaxed(HDMI_AHB_DMA_STOP_STOP, dw->data.base + HDMI_AHB_DMA_STOP); } static irqreturn_t snd_dw_hdmi_irq(int irq, void *data) { struct snd_dw_hdmi *dw = data; struct snd_pcm_substream *substream; unsigned stat; stat = readb_relaxed(dw->data.base + HDMI_IH_AHBDMAAUD_STAT0); if (!stat) return IRQ_NONE; writeb_relaxed(stat, dw->data.base + HDMI_IH_AHBDMAAUD_STAT0); substream = dw->substream; if (stat & HDMI_IH_AHBDMAAUD_STAT0_DONE && substream) { snd_pcm_period_elapsed(substream); spin_lock(&dw->lock); if (dw->substream) dw_hdmi_start_dma(dw); spin_unlock(&dw->lock); } return IRQ_HANDLED; } static const struct snd_pcm_hardware dw_hdmi_hw = { .info = SNDRV_PCM_INFO_INTERLEAVED | SNDRV_PCM_INFO_BLOCK_TRANSFER | SNDRV_PCM_INFO_MMAP | SNDRV_PCM_INFO_MMAP_VALID, .formats = SNDRV_PCM_FMTBIT_IEC958_SUBFRAME_LE | SNDRV_PCM_FMTBIT_S24_LE, .rates = SNDRV_PCM_RATE_32000 | SNDRV_PCM_RATE_44100 | SNDRV_PCM_RATE_48000 | SNDRV_PCM_RATE_88200 | SNDRV_PCM_RATE_96000 | SNDRV_PCM_RATE_176400 | SNDRV_PCM_RATE_192000, .channels_min = 2, .channels_max = 8, .buffer_bytes_max = 1024 * 1024, .period_bytes_min = 256, .period_bytes_max = 8192, /* ERR004323: must limit to 8k */ .periods_min = 2, .periods_max = 16, .fifo_size = 0, }; static int dw_hdmi_open(struct snd_pcm_substream *substream) { struct snd_pcm_runtime *runtime = substream->runtime; struct snd_dw_hdmi *dw = substream->private_data; void __iomem *base = dw->data.base; u8 *eld; int ret; runtime->hw = dw_hdmi_hw; eld = dw->data.get_eld(dw->data.hdmi); if (eld) { ret = snd_pcm_hw_constraint_eld(runtime, eld); if (ret < 0) return ret; } ret = snd_pcm_limit_hw_rates(runtime); if (ret < 0) return ret; ret = snd_pcm_hw_constraint_integer(runtime, SNDRV_PCM_HW_PARAM_PERIODS); if (ret < 0) return ret; /* Limit the buffer size to the size of the preallocated buffer */ ret = snd_pcm_hw_constraint_minmax(runtime, SNDRV_PCM_HW_PARAM_BUFFER_SIZE, 0, substream->dma_buffer.bytes); if (ret < 0) return ret; /* Clear FIFO */ writeb_relaxed(HDMI_AHB_DMA_CONF0_SW_FIFO_RST, base + HDMI_AHB_DMA_CONF0); /* Configure interrupt polarities */ writeb_relaxed(~0, base + HDMI_AHB_DMA_POL); writeb_relaxed(~0, base + HDMI_AHB_DMA_BUFFPOL); /* Keep interrupts masked, and clear any pending */ writeb_relaxed(~0, base + HDMI_AHB_DMA_MASK); writeb_relaxed(~0, base + HDMI_IH_AHBDMAAUD_STAT0); ret = request_irq(dw->data.irq, snd_dw_hdmi_irq, IRQF_SHARED, "dw-hdmi-audio", dw); if (ret) return ret; /* Un-mute done interrupt */ writeb_relaxed(HDMI_IH_MUTE_AHBDMAAUD_STAT0_ALL & ~HDMI_IH_MUTE_AHBDMAAUD_STAT0_DONE, base + HDMI_IH_MUTE_AHBDMAAUD_STAT0); return 0; } static int dw_hdmi_close(struct snd_pcm_substream *substream) { struct snd_dw_hdmi *dw = substream->private_data; /* Mute all interrupts */ writeb_relaxed(HDMI_IH_MUTE_AHBDMAAUD_STAT0_ALL, dw->data.base + HDMI_IH_MUTE_AHBDMAAUD_STAT0); free_irq(dw->data.irq, dw); return 0; } static int dw_hdmi_hw_free(struct snd_pcm_substream *substream) { return snd_pcm_lib_free_vmalloc_buffer(substream); } static int dw_hdmi_hw_params(struct snd_pcm_substream *substream, struct snd_pcm_hw_params *params) { /* Allocate the PCM runtime buffer, which is exposed to userspace. */ return snd_pcm_lib_alloc_vmalloc_buffer(substream, params_buffer_bytes(params)); } static int dw_hdmi_prepare(struct snd_pcm_substream *substream) { struct snd_pcm_runtime *runtime = substream->runtime; struct snd_dw_hdmi *dw = substream->private_data; u8 threshold, conf0, conf1, ca; /* Setup as per 3.0.5 FSL 4.1.0 BSP */ switch (dw->revision) { case 0x0a: conf0 = HDMI_AHB_DMA_CONF0_BURST_MODE | HDMI_AHB_DMA_CONF0_INCR4; if (runtime->channels == 2) threshold = 126; else threshold = 124; break; case 0x1a: conf0 = HDMI_AHB_DMA_CONF0_BURST_MODE | HDMI_AHB_DMA_CONF0_INCR8; threshold = 128; break; default: /* NOTREACHED */ return -EINVAL; } dw_hdmi_set_sample_rate(dw->data.hdmi, runtime->rate); /* Minimum number of bytes in the fifo. */ runtime->hw.fifo_size = threshold * 32; conf0 |= HDMI_AHB_DMA_CONF0_EN_HLOCK; conf1 = default_hdmi_channel_config[runtime->channels - 2].conf1; ca = default_hdmi_channel_config[runtime->channels - 2].ca; writeb_relaxed(threshold, dw->data.base + HDMI_AHB_DMA_THRSLD); writeb_relaxed(conf0, dw->data.base + HDMI_AHB_DMA_CONF0); writeb_relaxed(conf1, dw->data.base + HDMI_AHB_DMA_CONF1); dw_hdmi_set_channel_count(dw->data.hdmi, runtime->channels); dw_hdmi_set_channel_allocation(dw->data.hdmi, ca); switch (runtime->format) { case SNDRV_PCM_FORMAT_IEC958_SUBFRAME_LE: dw->reformat = dw_hdmi_reformat_iec958; break; case SNDRV_PCM_FORMAT_S24_LE: dw_hdmi_create_cs(dw, runtime); dw->reformat = dw_hdmi_reformat_s24; break; } dw->iec_offset = 0; dw->channels = runtime->channels; dw->buf_src = runtime->dma_area; dw->buf_dst = substream->dma_buffer.area; dw->buf_addr = substream->dma_buffer.addr; dw->buf_period = snd_pcm_lib_period_bytes(substream); dw->buf_size = snd_pcm_lib_buffer_bytes(substream); return 0; } static int dw_hdmi_trigger(struct snd_pcm_substream *substream, int cmd) { struct snd_dw_hdmi *dw = substream->private_data; unsigned long flags; int ret = 0; switch (cmd) { case SNDRV_PCM_TRIGGER_START: spin_lock_irqsave(&dw->lock, flags); dw->buf_offset = 0; dw->substream = substream; dw_hdmi_start_dma(dw); dw_hdmi_audio_enable(dw->data.hdmi); spin_unlock_irqrestore(&dw->lock, flags); substream->runtime->delay = substream->runtime->period_size; break; case SNDRV_PCM_TRIGGER_STOP: spin_lock_irqsave(&dw->lock, flags); dw->substream = NULL; dw_hdmi_stop_dma(dw); dw_hdmi_audio_disable(dw->data.hdmi); spin_unlock_irqrestore(&dw->lock, flags); break; default: ret = -EINVAL; break; } return ret; } static snd_pcm_uframes_t dw_hdmi_pointer(struct snd_pcm_substream *substream) { struct snd_pcm_runtime *runtime = substream->runtime; struct snd_dw_hdmi *dw = substream->private_data; /* * We are unable to report the exact hardware position as * reading the 32-bit DMA position using 8-bit reads is racy. */ return bytes_to_frames(runtime, dw->buf_offset); } static const struct snd_pcm_ops snd_dw_hdmi_ops = { .open = dw_hdmi_open, .close = dw_hdmi_close, .ioctl = snd_pcm_lib_ioctl, .hw_params = dw_hdmi_hw_params, .hw_free = dw_hdmi_hw_free, .prepare = dw_hdmi_prepare, .trigger = dw_hdmi_trigger, .pointer = dw_hdmi_pointer, .page = snd_pcm_lib_get_vmalloc_page, }; static int snd_dw_hdmi_probe(struct platform_device *pdev) { const struct dw_hdmi_audio_data *data = pdev->dev.platform_data; struct device *dev = pdev->dev.parent; struct snd_dw_hdmi *dw; struct snd_card *card; struct snd_pcm *pcm; unsigned revision; int ret; writeb_relaxed(HDMI_IH_MUTE_AHBDMAAUD_STAT0_ALL, data->base + HDMI_IH_MUTE_AHBDMAAUD_STAT0); revision = readb_relaxed(data->base + HDMI_REVISION_ID); if (revision != 0x0a && revision != 0x1a) { dev_err(dev, "dw-hdmi-audio: unknown revision 0x%02x\n", revision); return -ENXIO; } ret = snd_card_new(dev, SNDRV_DEFAULT_IDX1, SNDRV_DEFAULT_STR1, THIS_MODULE, sizeof(struct snd_dw_hdmi), &card); if (ret < 0) return ret; strscpy(card->driver, DRIVER_NAME, sizeof(card->driver)); strscpy(card->shortname, "DW-HDMI", sizeof(card->shortname)); snprintf(card->longname, sizeof(card->longname), "%s rev 0x%02x, irq %d", card->shortname, revision, data->irq); dw = card->private_data; dw->card = card; dw->data = *data; dw->revision = revision; spin_lock_init(&dw->lock); ret = snd_pcm_new(card, "DW HDMI", 0, 1, 0, &pcm); if (ret < 0) goto err; dw->pcm = pcm; pcm->private_data = dw; strscpy(pcm->name, DRIVER_NAME, sizeof(pcm->name)); snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_PLAYBACK, &snd_dw_hdmi_ops); /* * To support 8-channel 96kHz audio reliably, we need 512k * to satisfy alsa with our restricted period (ERR004323). */ snd_pcm_lib_preallocate_pages_for_all(pcm, SNDRV_DMA_TYPE_DEV, dev, 128 * 1024, 1024 * 1024); ret = snd_card_register(card); if (ret < 0) goto err; platform_set_drvdata(pdev, dw); return 0; err: snd_card_free(card); return ret; } static void snd_dw_hdmi_remove(struct platform_device *pdev) { struct snd_dw_hdmi *dw = platform_get_drvdata(pdev); snd_card_free(dw->card); } #if defined(CONFIG_PM_SLEEP) && defined(IS_NOT_BROKEN) /* * This code is fine, but requires implementation in the dw_hdmi_trigger() * method which is currently missing as I have no way to test this. */ static int snd_dw_hdmi_suspend(struct device *dev) { struct snd_dw_hdmi *dw = dev_get_drvdata(dev); snd_power_change_state(dw->card, SNDRV_CTL_POWER_D3cold); return 0; } static int snd_dw_hdmi_resume(struct device *dev) { struct snd_dw_hdmi *dw = dev_get_drvdata(dev); snd_power_change_state(dw->card, SNDRV_CTL_POWER_D0); return 0; } static SIMPLE_DEV_PM_OPS(snd_dw_hdmi_pm, snd_dw_hdmi_suspend, snd_dw_hdmi_resume); #define PM_OPS &snd_dw_hdmi_pm #else #define PM_OPS NULL #endif static struct platform_driver snd_dw_hdmi_driver = { .probe = snd_dw_hdmi_probe, .remove_new = snd_dw_hdmi_remove, .driver = { .name = DRIVER_NAME, .pm = PM_OPS, }, }; module_platform_driver(snd_dw_hdmi_driver); MODULE_AUTHOR("Russell King <[email protected]>"); MODULE_DESCRIPTION("Synopsis Designware HDMI AHB ALSA interface"); MODULE_LICENSE("GPL v2"); MODULE_ALIAS("platform:" DRIVER_NAME);
linux-master
drivers/gpu/drm/bridge/synopsys/dw-hdmi-ahb-audio.c
// SPDX-License-Identifier: GPL-2.0-only /* * Designware HDMI CEC driver * * Copyright (C) 2015-2017 Russell King. */ #include <linux/interrupt.h> #include <linux/io.h> #include <linux/module.h> #include <linux/platform_device.h> #include <linux/sched.h> #include <linux/slab.h> #include <drm/drm_edid.h> #include <media/cec.h> #include <media/cec-notifier.h> #include "dw-hdmi-cec.h" enum { HDMI_IH_CEC_STAT0 = 0x0106, HDMI_IH_MUTE_CEC_STAT0 = 0x0186, HDMI_CEC_CTRL = 0x7d00, CEC_CTRL_START = BIT(0), CEC_CTRL_FRAME_TYP = 3 << 1, CEC_CTRL_RETRY = 0 << 1, CEC_CTRL_NORMAL = 1 << 1, CEC_CTRL_IMMED = 2 << 1, HDMI_CEC_STAT = 0x7d01, CEC_STAT_DONE = BIT(0), CEC_STAT_EOM = BIT(1), CEC_STAT_NACK = BIT(2), CEC_STAT_ARBLOST = BIT(3), CEC_STAT_ERROR_INIT = BIT(4), CEC_STAT_ERROR_FOLL = BIT(5), CEC_STAT_WAKEUP = BIT(6), HDMI_CEC_MASK = 0x7d02, HDMI_CEC_POLARITY = 0x7d03, HDMI_CEC_INT = 0x7d04, HDMI_CEC_ADDR_L = 0x7d05, HDMI_CEC_ADDR_H = 0x7d06, HDMI_CEC_TX_CNT = 0x7d07, HDMI_CEC_RX_CNT = 0x7d08, HDMI_CEC_TX_DATA0 = 0x7d10, HDMI_CEC_RX_DATA0 = 0x7d20, HDMI_CEC_LOCK = 0x7d30, HDMI_CEC_WKUPCTRL = 0x7d31, }; struct dw_hdmi_cec { struct dw_hdmi *hdmi; const struct dw_hdmi_cec_ops *ops; u32 addresses; struct cec_adapter *adap; struct cec_msg rx_msg; unsigned int tx_status; bool tx_done; bool rx_done; struct cec_notifier *notify; int irq; u8 regs_polarity; u8 regs_mask; u8 regs_mute_stat0; }; static void dw_hdmi_write(struct dw_hdmi_cec *cec, u8 val, int offset) { cec->ops->write(cec->hdmi, val, offset); } static u8 dw_hdmi_read(struct dw_hdmi_cec *cec, int offset) { return cec->ops->read(cec->hdmi, offset); } static int dw_hdmi_cec_log_addr(struct cec_adapter *adap, u8 logical_addr) { struct dw_hdmi_cec *cec = cec_get_drvdata(adap); if (logical_addr == CEC_LOG_ADDR_INVALID) cec->addresses = 0; else cec->addresses |= BIT(logical_addr) | BIT(15); dw_hdmi_write(cec, cec->addresses & 255, HDMI_CEC_ADDR_L); dw_hdmi_write(cec, cec->addresses >> 8, HDMI_CEC_ADDR_H); return 0; } static int dw_hdmi_cec_transmit(struct cec_adapter *adap, u8 attempts, u32 signal_free_time, struct cec_msg *msg) { struct dw_hdmi_cec *cec = cec_get_drvdata(adap); unsigned int i, ctrl; switch (signal_free_time) { case CEC_SIGNAL_FREE_TIME_RETRY: ctrl = CEC_CTRL_RETRY; break; case CEC_SIGNAL_FREE_TIME_NEW_INITIATOR: default: ctrl = CEC_CTRL_NORMAL; break; case CEC_SIGNAL_FREE_TIME_NEXT_XFER: ctrl = CEC_CTRL_IMMED; break; } for (i = 0; i < msg->len; i++) dw_hdmi_write(cec, msg->msg[i], HDMI_CEC_TX_DATA0 + i); dw_hdmi_write(cec, msg->len, HDMI_CEC_TX_CNT); dw_hdmi_write(cec, ctrl | CEC_CTRL_START, HDMI_CEC_CTRL); return 0; } static irqreturn_t dw_hdmi_cec_hardirq(int irq, void *data) { struct cec_adapter *adap = data; struct dw_hdmi_cec *cec = cec_get_drvdata(adap); unsigned int stat = dw_hdmi_read(cec, HDMI_IH_CEC_STAT0); irqreturn_t ret = IRQ_HANDLED; if (stat == 0) return IRQ_NONE; dw_hdmi_write(cec, stat, HDMI_IH_CEC_STAT0); if (stat & CEC_STAT_ERROR_INIT) { cec->tx_status = CEC_TX_STATUS_ERROR; cec->tx_done = true; ret = IRQ_WAKE_THREAD; } else if (stat & CEC_STAT_DONE) { cec->tx_status = CEC_TX_STATUS_OK; cec->tx_done = true; ret = IRQ_WAKE_THREAD; } else if (stat & CEC_STAT_NACK) { cec->tx_status = CEC_TX_STATUS_NACK; cec->tx_done = true; ret = IRQ_WAKE_THREAD; } if (stat & CEC_STAT_EOM) { unsigned int len, i; len = dw_hdmi_read(cec, HDMI_CEC_RX_CNT); if (len > sizeof(cec->rx_msg.msg)) len = sizeof(cec->rx_msg.msg); for (i = 0; i < len; i++) cec->rx_msg.msg[i] = dw_hdmi_read(cec, HDMI_CEC_RX_DATA0 + i); dw_hdmi_write(cec, 0, HDMI_CEC_LOCK); cec->rx_msg.len = len; smp_wmb(); cec->rx_done = true; ret = IRQ_WAKE_THREAD; } return ret; } static irqreturn_t dw_hdmi_cec_thread(int irq, void *data) { struct cec_adapter *adap = data; struct dw_hdmi_cec *cec = cec_get_drvdata(adap); if (cec->tx_done) { cec->tx_done = false; cec_transmit_attempt_done(adap, cec->tx_status); } if (cec->rx_done) { cec->rx_done = false; smp_rmb(); cec_received_msg(adap, &cec->rx_msg); } return IRQ_HANDLED; } static int dw_hdmi_cec_enable(struct cec_adapter *adap, bool enable) { struct dw_hdmi_cec *cec = cec_get_drvdata(adap); if (!enable) { dw_hdmi_write(cec, ~0, HDMI_CEC_MASK); dw_hdmi_write(cec, ~0, HDMI_IH_MUTE_CEC_STAT0); dw_hdmi_write(cec, 0, HDMI_CEC_POLARITY); cec->ops->disable(cec->hdmi); } else { unsigned int irqs; dw_hdmi_write(cec, 0, HDMI_CEC_CTRL); dw_hdmi_write(cec, ~0, HDMI_IH_CEC_STAT0); dw_hdmi_write(cec, 0, HDMI_CEC_LOCK); dw_hdmi_cec_log_addr(cec->adap, CEC_LOG_ADDR_INVALID); cec->ops->enable(cec->hdmi); irqs = CEC_STAT_ERROR_INIT | CEC_STAT_NACK | CEC_STAT_EOM | CEC_STAT_DONE; dw_hdmi_write(cec, irqs, HDMI_CEC_POLARITY); dw_hdmi_write(cec, ~irqs, HDMI_CEC_MASK); dw_hdmi_write(cec, ~irqs, HDMI_IH_MUTE_CEC_STAT0); } return 0; } static const struct cec_adap_ops dw_hdmi_cec_ops = { .adap_enable = dw_hdmi_cec_enable, .adap_log_addr = dw_hdmi_cec_log_addr, .adap_transmit = dw_hdmi_cec_transmit, }; static void dw_hdmi_cec_del(void *data) { struct dw_hdmi_cec *cec = data; cec_delete_adapter(cec->adap); } static int dw_hdmi_cec_probe(struct platform_device *pdev) { struct dw_hdmi_cec_data *data = dev_get_platdata(&pdev->dev); struct dw_hdmi_cec *cec; int ret; if (!data) return -ENXIO; /* * Our device is just a convenience - we want to link to the real * hardware device here, so that userspace can see the association * between the HDMI hardware and its associated CEC chardev. */ cec = devm_kzalloc(&pdev->dev, sizeof(*cec), GFP_KERNEL); if (!cec) return -ENOMEM; cec->irq = data->irq; cec->ops = data->ops; cec->hdmi = data->hdmi; platform_set_drvdata(pdev, cec); dw_hdmi_write(cec, 0, HDMI_CEC_TX_CNT); dw_hdmi_write(cec, ~0, HDMI_CEC_MASK); dw_hdmi_write(cec, ~0, HDMI_IH_MUTE_CEC_STAT0); dw_hdmi_write(cec, 0, HDMI_CEC_POLARITY); cec->adap = cec_allocate_adapter(&dw_hdmi_cec_ops, cec, "dw_hdmi", CEC_CAP_DEFAULTS | CEC_CAP_CONNECTOR_INFO, CEC_MAX_LOG_ADDRS); if (IS_ERR(cec->adap)) return PTR_ERR(cec->adap); /* override the module pointer */ cec->adap->owner = THIS_MODULE; ret = devm_add_action_or_reset(&pdev->dev, dw_hdmi_cec_del, cec); if (ret) return ret; ret = devm_request_threaded_irq(&pdev->dev, cec->irq, dw_hdmi_cec_hardirq, dw_hdmi_cec_thread, IRQF_SHARED, "dw-hdmi-cec", cec->adap); if (ret < 0) return ret; cec->notify = cec_notifier_cec_adap_register(pdev->dev.parent, NULL, cec->adap); if (!cec->notify) return -ENOMEM; ret = cec_register_adapter(cec->adap, pdev->dev.parent); if (ret < 0) { cec_notifier_cec_adap_unregister(cec->notify, cec->adap); return ret; } /* * CEC documentation says we must not call cec_delete_adapter * after a successful call to cec_register_adapter(). */ devm_remove_action(&pdev->dev, dw_hdmi_cec_del, cec); return 0; } static void dw_hdmi_cec_remove(struct platform_device *pdev) { struct dw_hdmi_cec *cec = platform_get_drvdata(pdev); cec_notifier_cec_adap_unregister(cec->notify, cec->adap); cec_unregister_adapter(cec->adap); } static int __maybe_unused dw_hdmi_cec_resume(struct device *dev) { struct dw_hdmi_cec *cec = dev_get_drvdata(dev); /* Restore logical address */ dw_hdmi_write(cec, cec->addresses & 255, HDMI_CEC_ADDR_L); dw_hdmi_write(cec, cec->addresses >> 8, HDMI_CEC_ADDR_H); /* Restore interrupt status/mask registers */ dw_hdmi_write(cec, cec->regs_polarity, HDMI_CEC_POLARITY); dw_hdmi_write(cec, cec->regs_mask, HDMI_CEC_MASK); dw_hdmi_write(cec, cec->regs_mute_stat0, HDMI_IH_MUTE_CEC_STAT0); return 0; } static int __maybe_unused dw_hdmi_cec_suspend(struct device *dev) { struct dw_hdmi_cec *cec = dev_get_drvdata(dev); /* store interrupt status/mask registers */ cec->regs_polarity = dw_hdmi_read(cec, HDMI_CEC_POLARITY); cec->regs_mask = dw_hdmi_read(cec, HDMI_CEC_MASK); cec->regs_mute_stat0 = dw_hdmi_read(cec, HDMI_IH_MUTE_CEC_STAT0); return 0; } static const struct dev_pm_ops dw_hdmi_cec_pm = { SET_SYSTEM_SLEEP_PM_OPS(dw_hdmi_cec_suspend, dw_hdmi_cec_resume) }; static struct platform_driver dw_hdmi_cec_driver = { .probe = dw_hdmi_cec_probe, .remove_new = dw_hdmi_cec_remove, .driver = { .name = "dw-hdmi-cec", .pm = &dw_hdmi_cec_pm, }, }; module_platform_driver(dw_hdmi_cec_driver); MODULE_AUTHOR("Russell King <[email protected]>"); MODULE_DESCRIPTION("Synopsys Designware HDMI CEC driver for i.MX"); MODULE_LICENSE("GPL"); MODULE_ALIAS(PLATFORM_MODULE_PREFIX "dw-hdmi-cec");
linux-master
drivers/gpu/drm/bridge/synopsys/dw-hdmi-cec.c
// SPDX-License-Identifier: GPL-2.0+ /* * Copyright (c) 2016, Fuzhou Rockchip Electronics Co., Ltd * Copyright (C) STMicroelectronics SA 2017 * * Modified by Philippe Cornu <[email protected]> * This generic Synopsys DesignWare MIPI DSI host driver is based on the * Rockchip version from rockchip/dw-mipi-dsi.c with phy & bridge APIs. */ #include <linux/clk.h> #include <linux/component.h> #include <linux/debugfs.h> #include <linux/iopoll.h> #include <linux/module.h> #include <linux/platform_device.h> #include <linux/pm_runtime.h> #include <linux/reset.h> #include <video/mipi_display.h> #include <drm/bridge/dw_mipi_dsi.h> #include <drm/drm_atomic_helper.h> #include <drm/drm_bridge.h> #include <drm/drm_crtc.h> #include <drm/drm_mipi_dsi.h> #include <drm/drm_modes.h> #include <drm/drm_of.h> #include <drm/drm_print.h> #define HWVER_131 0x31333100 /* IP version 1.31 */ #define DSI_VERSION 0x00 #define VERSION GENMASK(31, 8) #define DSI_PWR_UP 0x04 #define RESET 0 #define POWERUP BIT(0) #define DSI_CLKMGR_CFG 0x08 #define TO_CLK_DIVISION(div) (((div) & 0xff) << 8) #define TX_ESC_CLK_DIVISION(div) ((div) & 0xff) #define DSI_DPI_VCID 0x0c #define DPI_VCID(vcid) ((vcid) & 0x3) #define DSI_DPI_COLOR_CODING 0x10 #define LOOSELY18_EN BIT(8) #define DPI_COLOR_CODING_16BIT_1 0x0 #define DPI_COLOR_CODING_16BIT_2 0x1 #define DPI_COLOR_CODING_16BIT_3 0x2 #define DPI_COLOR_CODING_18BIT_1 0x3 #define DPI_COLOR_CODING_18BIT_2 0x4 #define DPI_COLOR_CODING_24BIT 0x5 #define DSI_DPI_CFG_POL 0x14 #define COLORM_ACTIVE_LOW BIT(4) #define SHUTD_ACTIVE_LOW BIT(3) #define HSYNC_ACTIVE_LOW BIT(2) #define VSYNC_ACTIVE_LOW BIT(1) #define DATAEN_ACTIVE_LOW BIT(0) #define DSI_DPI_LP_CMD_TIM 0x18 #define OUTVACT_LPCMD_TIME(p) (((p) & 0xff) << 16) #define INVACT_LPCMD_TIME(p) ((p) & 0xff) #define DSI_DBI_VCID 0x1c #define DSI_DBI_CFG 0x20 #define DSI_DBI_PARTITIONING_EN 0x24 #define DSI_DBI_CMDSIZE 0x28 #define DSI_PCKHDL_CFG 0x2c #define CRC_RX_EN BIT(4) #define ECC_RX_EN BIT(3) #define BTA_EN BIT(2) #define EOTP_RX_EN BIT(1) #define EOTP_TX_EN BIT(0) #define DSI_GEN_VCID 0x30 #define DSI_MODE_CFG 0x34 #define ENABLE_VIDEO_MODE 0 #define ENABLE_CMD_MODE BIT(0) #define DSI_VID_MODE_CFG 0x38 #define ENABLE_LOW_POWER (0x3f << 8) #define ENABLE_LOW_POWER_MASK (0x3f << 8) #define VID_MODE_TYPE_NON_BURST_SYNC_PULSES 0x0 #define VID_MODE_TYPE_NON_BURST_SYNC_EVENTS 0x1 #define VID_MODE_TYPE_BURST 0x2 #define VID_MODE_TYPE_MASK 0x3 #define ENABLE_LOW_POWER_CMD BIT(15) #define VID_MODE_VPG_ENABLE BIT(16) #define VID_MODE_VPG_MODE BIT(20) #define VID_MODE_VPG_HORIZONTAL BIT(24) #define DSI_VID_PKT_SIZE 0x3c #define VID_PKT_SIZE(p) ((p) & 0x3fff) #define DSI_VID_NUM_CHUNKS 0x40 #define VID_NUM_CHUNKS(c) ((c) & 0x1fff) #define DSI_VID_NULL_SIZE 0x44 #define VID_NULL_SIZE(b) ((b) & 0x1fff) #define DSI_VID_HSA_TIME 0x48 #define DSI_VID_HBP_TIME 0x4c #define DSI_VID_HLINE_TIME 0x50 #define DSI_VID_VSA_LINES 0x54 #define DSI_VID_VBP_LINES 0x58 #define DSI_VID_VFP_LINES 0x5c #define DSI_VID_VACTIVE_LINES 0x60 #define DSI_EDPI_CMD_SIZE 0x64 #define DSI_CMD_MODE_CFG 0x68 #define MAX_RD_PKT_SIZE_LP BIT(24) #define DCS_LW_TX_LP BIT(19) #define DCS_SR_0P_TX_LP BIT(18) #define DCS_SW_1P_TX_LP BIT(17) #define DCS_SW_0P_TX_LP BIT(16) #define GEN_LW_TX_LP BIT(14) #define GEN_SR_2P_TX_LP BIT(13) #define GEN_SR_1P_TX_LP BIT(12) #define GEN_SR_0P_TX_LP BIT(11) #define GEN_SW_2P_TX_LP BIT(10) #define GEN_SW_1P_TX_LP BIT(9) #define GEN_SW_0P_TX_LP BIT(8) #define ACK_RQST_EN BIT(1) #define TEAR_FX_EN BIT(0) #define CMD_MODE_ALL_LP (MAX_RD_PKT_SIZE_LP | \ DCS_LW_TX_LP | \ DCS_SR_0P_TX_LP | \ DCS_SW_1P_TX_LP | \ DCS_SW_0P_TX_LP | \ GEN_LW_TX_LP | \ GEN_SR_2P_TX_LP | \ GEN_SR_1P_TX_LP | \ GEN_SR_0P_TX_LP | \ GEN_SW_2P_TX_LP | \ GEN_SW_1P_TX_LP | \ GEN_SW_0P_TX_LP) #define DSI_GEN_HDR 0x6c #define DSI_GEN_PLD_DATA 0x70 #define DSI_CMD_PKT_STATUS 0x74 #define GEN_RD_CMD_BUSY BIT(6) #define GEN_PLD_R_FULL BIT(5) #define GEN_PLD_R_EMPTY BIT(4) #define GEN_PLD_W_FULL BIT(3) #define GEN_PLD_W_EMPTY BIT(2) #define GEN_CMD_FULL BIT(1) #define GEN_CMD_EMPTY BIT(0) #define DSI_TO_CNT_CFG 0x78 #define HSTX_TO_CNT(p) (((p) & 0xffff) << 16) #define LPRX_TO_CNT(p) ((p) & 0xffff) #define DSI_HS_RD_TO_CNT 0x7c #define DSI_LP_RD_TO_CNT 0x80 #define DSI_HS_WR_TO_CNT 0x84 #define DSI_LP_WR_TO_CNT 0x88 #define DSI_BTA_TO_CNT 0x8c #define DSI_LPCLK_CTRL 0x94 #define AUTO_CLKLANE_CTRL BIT(1) #define PHY_TXREQUESTCLKHS BIT(0) #define DSI_PHY_TMR_LPCLK_CFG 0x98 #define PHY_CLKHS2LP_TIME(lbcc) (((lbcc) & 0x3ff) << 16) #define PHY_CLKLP2HS_TIME(lbcc) ((lbcc) & 0x3ff) #define DSI_PHY_TMR_CFG 0x9c #define PHY_HS2LP_TIME(lbcc) (((lbcc) & 0xff) << 24) #define PHY_LP2HS_TIME(lbcc) (((lbcc) & 0xff) << 16) #define MAX_RD_TIME(lbcc) ((lbcc) & 0x7fff) #define PHY_HS2LP_TIME_V131(lbcc) (((lbcc) & 0x3ff) << 16) #define PHY_LP2HS_TIME_V131(lbcc) ((lbcc) & 0x3ff) #define DSI_PHY_RSTZ 0xa0 #define PHY_DISFORCEPLL 0 #define PHY_ENFORCEPLL BIT(3) #define PHY_DISABLECLK 0 #define PHY_ENABLECLK BIT(2) #define PHY_RSTZ 0 #define PHY_UNRSTZ BIT(1) #define PHY_SHUTDOWNZ 0 #define PHY_UNSHUTDOWNZ BIT(0) #define DSI_PHY_IF_CFG 0xa4 #define PHY_STOP_WAIT_TIME(cycle) (((cycle) & 0xff) << 8) #define N_LANES(n) (((n) - 1) & 0x3) #define DSI_PHY_ULPS_CTRL 0xa8 #define DSI_PHY_TX_TRIGGERS 0xac #define DSI_PHY_STATUS 0xb0 #define PHY_STOP_STATE_CLK_LANE BIT(2) #define PHY_LOCK BIT(0) #define DSI_PHY_TST_CTRL0 0xb4 #define PHY_TESTCLK BIT(1) #define PHY_UNTESTCLK 0 #define PHY_TESTCLR BIT(0) #define PHY_UNTESTCLR 0 #define DSI_PHY_TST_CTRL1 0xb8 #define PHY_TESTEN BIT(16) #define PHY_UNTESTEN 0 #define PHY_TESTDOUT(n) (((n) & 0xff) << 8) #define PHY_TESTDIN(n) ((n) & 0xff) #define DSI_INT_ST0 0xbc #define DSI_INT_ST1 0xc0 #define DSI_INT_MSK0 0xc4 #define DSI_INT_MSK1 0xc8 #define DSI_PHY_TMR_RD_CFG 0xf4 #define MAX_RD_TIME_V131(lbcc) ((lbcc) & 0x7fff) #define PHY_STATUS_TIMEOUT_US 10000 #define CMD_PKT_STATUS_TIMEOUT_US 20000 #ifdef CONFIG_DEBUG_FS #define VPG_DEFS(name, dsi) \ ((void __force *)&((*dsi).vpg_defs.name)) #define REGISTER(name, mask, dsi) \ { #name, VPG_DEFS(name, dsi), mask, dsi } struct debugfs_entries { const char *name; bool *reg; u32 mask; struct dw_mipi_dsi *dsi; }; #endif /* CONFIG_DEBUG_FS */ struct dw_mipi_dsi { struct drm_bridge bridge; struct mipi_dsi_host dsi_host; struct drm_bridge *panel_bridge; struct device *dev; void __iomem *base; struct clk *pclk; unsigned int lane_mbps; /* per lane */ u32 channel; u32 lanes; u32 format; unsigned long mode_flags; #ifdef CONFIG_DEBUG_FS struct dentry *debugfs; struct debugfs_entries *debugfs_vpg; struct { bool vpg; bool vpg_horizontal; bool vpg_ber_pattern; } vpg_defs; #endif /* CONFIG_DEBUG_FS */ struct dw_mipi_dsi *master; /* dual-dsi master ptr */ struct dw_mipi_dsi *slave; /* dual-dsi slave ptr */ struct drm_display_mode mode; const struct dw_mipi_dsi_plat_data *plat_data; }; /* * Check if either a link to a master or slave is present */ static inline bool dw_mipi_is_dual_mode(struct dw_mipi_dsi *dsi) { return dsi->slave || dsi->master; } /* * The controller should generate 2 frames before * preparing the peripheral. */ static void dw_mipi_dsi_wait_for_two_frames(const struct drm_display_mode *mode) { int refresh, two_frames; refresh = drm_mode_vrefresh(mode); two_frames = DIV_ROUND_UP(MSEC_PER_SEC, refresh) * 2; msleep(two_frames); } static inline struct dw_mipi_dsi *host_to_dsi(struct mipi_dsi_host *host) { return container_of(host, struct dw_mipi_dsi, dsi_host); } static inline struct dw_mipi_dsi *bridge_to_dsi(struct drm_bridge *bridge) { return container_of(bridge, struct dw_mipi_dsi, bridge); } static inline void dsi_write(struct dw_mipi_dsi *dsi, u32 reg, u32 val) { writel(val, dsi->base + reg); } static inline u32 dsi_read(struct dw_mipi_dsi *dsi, u32 reg) { return readl(dsi->base + reg); } static int dw_mipi_dsi_host_attach(struct mipi_dsi_host *host, struct mipi_dsi_device *device) { struct dw_mipi_dsi *dsi = host_to_dsi(host); const struct dw_mipi_dsi_plat_data *pdata = dsi->plat_data; struct drm_bridge *bridge; int ret; if (device->lanes > dsi->plat_data->max_data_lanes) { dev_err(dsi->dev, "the number of data lanes(%u) is too many\n", device->lanes); return -EINVAL; } dsi->lanes = device->lanes; dsi->channel = device->channel; dsi->format = device->format; dsi->mode_flags = device->mode_flags; bridge = devm_drm_of_get_bridge(dsi->dev, dsi->dev->of_node, 1, 0); if (IS_ERR(bridge)) return PTR_ERR(bridge); bridge->pre_enable_prev_first = true; dsi->panel_bridge = bridge; drm_bridge_add(&dsi->bridge); if (pdata->host_ops && pdata->host_ops->attach) { ret = pdata->host_ops->attach(pdata->priv_data, device); if (ret < 0) return ret; } return 0; } static int dw_mipi_dsi_host_detach(struct mipi_dsi_host *host, struct mipi_dsi_device *device) { struct dw_mipi_dsi *dsi = host_to_dsi(host); const struct dw_mipi_dsi_plat_data *pdata = dsi->plat_data; int ret; if (pdata->host_ops && pdata->host_ops->detach) { ret = pdata->host_ops->detach(pdata->priv_data, device); if (ret < 0) return ret; } drm_of_panel_bridge_remove(host->dev->of_node, 1, 0); drm_bridge_remove(&dsi->bridge); return 0; } static void dw_mipi_message_config(struct dw_mipi_dsi *dsi, const struct mipi_dsi_msg *msg) { bool lpm = msg->flags & MIPI_DSI_MSG_USE_LPM; u32 val = 0; /* * TODO dw drv improvements * largest packet sizes during hfp or during vsa/vpb/vfp * should be computed according to byte lane, lane number and only * if sending lp cmds in high speed is enable (PHY_TXREQUESTCLKHS) */ dsi_write(dsi, DSI_DPI_LP_CMD_TIM, OUTVACT_LPCMD_TIME(16) | INVACT_LPCMD_TIME(4)); if (msg->flags & MIPI_DSI_MSG_REQ_ACK) val |= ACK_RQST_EN; if (lpm) val |= CMD_MODE_ALL_LP; dsi_write(dsi, DSI_CMD_MODE_CFG, val); val = dsi_read(dsi, DSI_VID_MODE_CFG); if (lpm) val |= ENABLE_LOW_POWER_CMD; else val &= ~ENABLE_LOW_POWER_CMD; dsi_write(dsi, DSI_VID_MODE_CFG, val); } static int dw_mipi_dsi_gen_pkt_hdr_write(struct dw_mipi_dsi *dsi, u32 hdr_val) { int ret; u32 val, mask; ret = readl_poll_timeout(dsi->base + DSI_CMD_PKT_STATUS, val, !(val & GEN_CMD_FULL), 1000, CMD_PKT_STATUS_TIMEOUT_US); if (ret) { dev_err(dsi->dev, "failed to get available command FIFO\n"); return ret; } dsi_write(dsi, DSI_GEN_HDR, hdr_val); mask = GEN_CMD_EMPTY | GEN_PLD_W_EMPTY; ret = readl_poll_timeout(dsi->base + DSI_CMD_PKT_STATUS, val, (val & mask) == mask, 1000, CMD_PKT_STATUS_TIMEOUT_US); if (ret) { dev_err(dsi->dev, "failed to write command FIFO\n"); return ret; } return 0; } static int dw_mipi_dsi_write(struct dw_mipi_dsi *dsi, const struct mipi_dsi_packet *packet) { const u8 *tx_buf = packet->payload; int len = packet->payload_length, pld_data_bytes = sizeof(u32), ret; __le32 word; u32 val; while (len) { if (len < pld_data_bytes) { word = 0; memcpy(&word, tx_buf, len); dsi_write(dsi, DSI_GEN_PLD_DATA, le32_to_cpu(word)); len = 0; } else { memcpy(&word, tx_buf, pld_data_bytes); dsi_write(dsi, DSI_GEN_PLD_DATA, le32_to_cpu(word)); tx_buf += pld_data_bytes; len -= pld_data_bytes; } ret = readl_poll_timeout(dsi->base + DSI_CMD_PKT_STATUS, val, !(val & GEN_PLD_W_FULL), 1000, CMD_PKT_STATUS_TIMEOUT_US); if (ret) { dev_err(dsi->dev, "failed to get available write payload FIFO\n"); return ret; } } word = 0; memcpy(&word, packet->header, sizeof(packet->header)); return dw_mipi_dsi_gen_pkt_hdr_write(dsi, le32_to_cpu(word)); } static int dw_mipi_dsi_read(struct dw_mipi_dsi *dsi, const struct mipi_dsi_msg *msg) { int i, j, ret, len = msg->rx_len; u8 *buf = msg->rx_buf; u32 val; /* Wait end of the read operation */ ret = readl_poll_timeout(dsi->base + DSI_CMD_PKT_STATUS, val, !(val & GEN_RD_CMD_BUSY), 1000, CMD_PKT_STATUS_TIMEOUT_US); if (ret) { dev_err(dsi->dev, "Timeout during read operation\n"); return ret; } for (i = 0; i < len; i += 4) { /* Read fifo must not be empty before all bytes are read */ ret = readl_poll_timeout(dsi->base + DSI_CMD_PKT_STATUS, val, !(val & GEN_PLD_R_EMPTY), 1000, CMD_PKT_STATUS_TIMEOUT_US); if (ret) { dev_err(dsi->dev, "Read payload FIFO is empty\n"); return ret; } val = dsi_read(dsi, DSI_GEN_PLD_DATA); for (j = 0; j < 4 && j + i < len; j++) buf[i + j] = val >> (8 * j); } return ret; } static ssize_t dw_mipi_dsi_host_transfer(struct mipi_dsi_host *host, const struct mipi_dsi_msg *msg) { struct dw_mipi_dsi *dsi = host_to_dsi(host); struct mipi_dsi_packet packet; int ret, nb_bytes; ret = mipi_dsi_create_packet(&packet, msg); if (ret) { dev_err(dsi->dev, "failed to create packet: %d\n", ret); return ret; } dw_mipi_message_config(dsi, msg); if (dsi->slave) dw_mipi_message_config(dsi->slave, msg); ret = dw_mipi_dsi_write(dsi, &packet); if (ret) return ret; if (dsi->slave) { ret = dw_mipi_dsi_write(dsi->slave, &packet); if (ret) return ret; } if (msg->rx_buf && msg->rx_len) { ret = dw_mipi_dsi_read(dsi, msg); if (ret) return ret; nb_bytes = msg->rx_len; } else { nb_bytes = packet.size; } return nb_bytes; } static const struct mipi_dsi_host_ops dw_mipi_dsi_host_ops = { .attach = dw_mipi_dsi_host_attach, .detach = dw_mipi_dsi_host_detach, .transfer = dw_mipi_dsi_host_transfer, }; static void dw_mipi_dsi_video_mode_config(struct dw_mipi_dsi *dsi) { u32 val; /* * TODO dw drv improvements * enabling low power is panel-dependent, we should use the * panel configuration here... */ val = ENABLE_LOW_POWER; if (dsi->mode_flags & MIPI_DSI_MODE_VIDEO_BURST) val |= VID_MODE_TYPE_BURST; else if (dsi->mode_flags & MIPI_DSI_MODE_VIDEO_SYNC_PULSE) val |= VID_MODE_TYPE_NON_BURST_SYNC_PULSES; else val |= VID_MODE_TYPE_NON_BURST_SYNC_EVENTS; #ifdef CONFIG_DEBUG_FS if (dsi->vpg_defs.vpg) { val |= VID_MODE_VPG_ENABLE; val |= dsi->vpg_defs.vpg_horizontal ? VID_MODE_VPG_HORIZONTAL : 0; val |= dsi->vpg_defs.vpg_ber_pattern ? VID_MODE_VPG_MODE : 0; } #endif /* CONFIG_DEBUG_FS */ dsi_write(dsi, DSI_VID_MODE_CFG, val); } static void dw_mipi_dsi_set_mode(struct dw_mipi_dsi *dsi, unsigned long mode_flags) { u32 val; dsi_write(dsi, DSI_PWR_UP, RESET); if (mode_flags & MIPI_DSI_MODE_VIDEO) { dsi_write(dsi, DSI_MODE_CFG, ENABLE_VIDEO_MODE); dw_mipi_dsi_video_mode_config(dsi); } else { dsi_write(dsi, DSI_MODE_CFG, ENABLE_CMD_MODE); } val = PHY_TXREQUESTCLKHS; if (dsi->mode_flags & MIPI_DSI_CLOCK_NON_CONTINUOUS) val |= AUTO_CLKLANE_CTRL; dsi_write(dsi, DSI_LPCLK_CTRL, val); dsi_write(dsi, DSI_PWR_UP, POWERUP); } static void dw_mipi_dsi_disable(struct dw_mipi_dsi *dsi) { dsi_write(dsi, DSI_PWR_UP, RESET); dsi_write(dsi, DSI_PHY_RSTZ, PHY_RSTZ); } static void dw_mipi_dsi_init(struct dw_mipi_dsi *dsi) { const struct dw_mipi_dsi_phy_ops *phy_ops = dsi->plat_data->phy_ops; unsigned int esc_rate; /* in MHz */ u32 esc_clk_division; int ret; /* * The maximum permitted escape clock is 20MHz and it is derived from * lanebyteclk, which is running at "lane_mbps / 8". */ if (phy_ops->get_esc_clk_rate) { ret = phy_ops->get_esc_clk_rate(dsi->plat_data->priv_data, &esc_rate); if (ret) DRM_DEBUG_DRIVER("Phy get_esc_clk_rate() failed\n"); } else esc_rate = 20; /* Default to 20MHz */ /* * We want : * (lane_mbps >> 3) / esc_clk_division < X * which is: * (lane_mbps >> 3) / X > esc_clk_division */ esc_clk_division = (dsi->lane_mbps >> 3) / esc_rate + 1; dsi_write(dsi, DSI_PWR_UP, RESET); /* * TODO dw drv improvements * timeout clock division should be computed with the * high speed transmission counter timeout and byte lane... */ dsi_write(dsi, DSI_CLKMGR_CFG, TO_CLK_DIVISION(10) | TX_ESC_CLK_DIVISION(esc_clk_division)); } static void dw_mipi_dsi_dpi_config(struct dw_mipi_dsi *dsi, const struct drm_display_mode *mode) { u32 val = 0, color = 0; switch (dsi->format) { case MIPI_DSI_FMT_RGB888: color = DPI_COLOR_CODING_24BIT; break; case MIPI_DSI_FMT_RGB666: color = DPI_COLOR_CODING_18BIT_2 | LOOSELY18_EN; break; case MIPI_DSI_FMT_RGB666_PACKED: color = DPI_COLOR_CODING_18BIT_1; break; case MIPI_DSI_FMT_RGB565: color = DPI_COLOR_CODING_16BIT_1; break; } if (mode->flags & DRM_MODE_FLAG_NVSYNC) val |= VSYNC_ACTIVE_LOW; if (mode->flags & DRM_MODE_FLAG_NHSYNC) val |= HSYNC_ACTIVE_LOW; dsi_write(dsi, DSI_DPI_VCID, DPI_VCID(dsi->channel)); dsi_write(dsi, DSI_DPI_COLOR_CODING, color); dsi_write(dsi, DSI_DPI_CFG_POL, val); } static void dw_mipi_dsi_packet_handler_config(struct dw_mipi_dsi *dsi) { dsi_write(dsi, DSI_PCKHDL_CFG, CRC_RX_EN | ECC_RX_EN | BTA_EN); } static void dw_mipi_dsi_video_packet_config(struct dw_mipi_dsi *dsi, const struct drm_display_mode *mode) { /* * TODO dw drv improvements * only burst mode is supported here. For non-burst video modes, * we should compute DSI_VID_PKT_SIZE, DSI_VCCR.NUMC & * DSI_VNPCR.NPSIZE... especially because this driver supports * non-burst video modes, see dw_mipi_dsi_video_mode_config()... */ dsi_write(dsi, DSI_VID_PKT_SIZE, dw_mipi_is_dual_mode(dsi) ? VID_PKT_SIZE(mode->hdisplay / 2) : VID_PKT_SIZE(mode->hdisplay)); } static void dw_mipi_dsi_command_mode_config(struct dw_mipi_dsi *dsi) { /* * TODO dw drv improvements * compute high speed transmission counter timeout according * to the timeout clock division (TO_CLK_DIVISION) and byte lane... */ dsi_write(dsi, DSI_TO_CNT_CFG, HSTX_TO_CNT(1000) | LPRX_TO_CNT(1000)); /* * TODO dw drv improvements * the Bus-Turn-Around Timeout Counter should be computed * according to byte lane... */ dsi_write(dsi, DSI_BTA_TO_CNT, 0xd00); dsi_write(dsi, DSI_MODE_CFG, ENABLE_CMD_MODE); } /* Get lane byte clock cycles. */ static u32 dw_mipi_dsi_get_hcomponent_lbcc(struct dw_mipi_dsi *dsi, const struct drm_display_mode *mode, u32 hcomponent) { u32 frac, lbcc; lbcc = hcomponent * dsi->lane_mbps * MSEC_PER_SEC / 8; frac = lbcc % mode->clock; lbcc = lbcc / mode->clock; if (frac) lbcc++; return lbcc; } static void dw_mipi_dsi_line_timer_config(struct dw_mipi_dsi *dsi, const struct drm_display_mode *mode) { u32 htotal, hsa, hbp, lbcc; htotal = mode->htotal; hsa = mode->hsync_end - mode->hsync_start; hbp = mode->htotal - mode->hsync_end; /* * TODO dw drv improvements * computations below may be improved... */ lbcc = dw_mipi_dsi_get_hcomponent_lbcc(dsi, mode, htotal); dsi_write(dsi, DSI_VID_HLINE_TIME, lbcc); lbcc = dw_mipi_dsi_get_hcomponent_lbcc(dsi, mode, hsa); dsi_write(dsi, DSI_VID_HSA_TIME, lbcc); lbcc = dw_mipi_dsi_get_hcomponent_lbcc(dsi, mode, hbp); dsi_write(dsi, DSI_VID_HBP_TIME, lbcc); } static void dw_mipi_dsi_vertical_timing_config(struct dw_mipi_dsi *dsi, const struct drm_display_mode *mode) { u32 vactive, vsa, vfp, vbp; vactive = mode->vdisplay; vsa = mode->vsync_end - mode->vsync_start; vfp = mode->vsync_start - mode->vdisplay; vbp = mode->vtotal - mode->vsync_end; dsi_write(dsi, DSI_VID_VACTIVE_LINES, vactive); dsi_write(dsi, DSI_VID_VSA_LINES, vsa); dsi_write(dsi, DSI_VID_VFP_LINES, vfp); dsi_write(dsi, DSI_VID_VBP_LINES, vbp); } static void dw_mipi_dsi_dphy_timing_config(struct dw_mipi_dsi *dsi) { const struct dw_mipi_dsi_phy_ops *phy_ops = dsi->plat_data->phy_ops; struct dw_mipi_dsi_dphy_timing timing; u32 hw_version; int ret; ret = phy_ops->get_timing(dsi->plat_data->priv_data, dsi->lane_mbps, &timing); if (ret) DRM_DEV_ERROR(dsi->dev, "Retrieving phy timings failed\n"); /* * TODO dw drv improvements * data & clock lane timers should be computed according to panel * blankings and to the automatic clock lane control mode... * note: DSI_PHY_TMR_CFG.MAX_RD_TIME should be in line with * DSI_CMD_MODE_CFG.MAX_RD_PKT_SIZE_LP (see CMD_MODE_ALL_LP) */ hw_version = dsi_read(dsi, DSI_VERSION) & VERSION; if (hw_version >= HWVER_131) { dsi_write(dsi, DSI_PHY_TMR_CFG, PHY_HS2LP_TIME_V131(timing.data_hs2lp) | PHY_LP2HS_TIME_V131(timing.data_lp2hs)); dsi_write(dsi, DSI_PHY_TMR_RD_CFG, MAX_RD_TIME_V131(10000)); } else { dsi_write(dsi, DSI_PHY_TMR_CFG, PHY_HS2LP_TIME(timing.data_hs2lp) | PHY_LP2HS_TIME(timing.data_lp2hs) | MAX_RD_TIME(10000)); } dsi_write(dsi, DSI_PHY_TMR_LPCLK_CFG, PHY_CLKHS2LP_TIME(timing.clk_hs2lp) | PHY_CLKLP2HS_TIME(timing.clk_lp2hs)); } static void dw_mipi_dsi_dphy_interface_config(struct dw_mipi_dsi *dsi) { /* * TODO dw drv improvements * stop wait time should be the maximum between host dsi * and panel stop wait times */ dsi_write(dsi, DSI_PHY_IF_CFG, PHY_STOP_WAIT_TIME(0x20) | N_LANES(dsi->lanes)); } static void dw_mipi_dsi_dphy_init(struct dw_mipi_dsi *dsi) { /* Clear PHY state */ dsi_write(dsi, DSI_PHY_RSTZ, PHY_DISFORCEPLL | PHY_DISABLECLK | PHY_RSTZ | PHY_SHUTDOWNZ); dsi_write(dsi, DSI_PHY_TST_CTRL0, PHY_UNTESTCLR); dsi_write(dsi, DSI_PHY_TST_CTRL0, PHY_TESTCLR); dsi_write(dsi, DSI_PHY_TST_CTRL0, PHY_UNTESTCLR); } static void dw_mipi_dsi_dphy_enable(struct dw_mipi_dsi *dsi) { u32 val; int ret; dsi_write(dsi, DSI_PHY_RSTZ, PHY_ENFORCEPLL | PHY_ENABLECLK | PHY_UNRSTZ | PHY_UNSHUTDOWNZ); ret = readl_poll_timeout(dsi->base + DSI_PHY_STATUS, val, val & PHY_LOCK, 1000, PHY_STATUS_TIMEOUT_US); if (ret) DRM_DEBUG_DRIVER("failed to wait phy lock state\n"); ret = readl_poll_timeout(dsi->base + DSI_PHY_STATUS, val, val & PHY_STOP_STATE_CLK_LANE, 1000, PHY_STATUS_TIMEOUT_US); if (ret) DRM_DEBUG_DRIVER("failed to wait phy clk lane stop state\n"); } static void dw_mipi_dsi_clear_err(struct dw_mipi_dsi *dsi) { dsi_read(dsi, DSI_INT_ST0); dsi_read(dsi, DSI_INT_ST1); dsi_write(dsi, DSI_INT_MSK0, 0); dsi_write(dsi, DSI_INT_MSK1, 0); } static void dw_mipi_dsi_bridge_post_atomic_disable(struct drm_bridge *bridge, struct drm_bridge_state *old_bridge_state) { struct dw_mipi_dsi *dsi = bridge_to_dsi(bridge); const struct dw_mipi_dsi_phy_ops *phy_ops = dsi->plat_data->phy_ops; /* * Switch to command mode before panel-bridge post_disable & * panel unprepare. * Note: panel-bridge disable & panel disable has been called * before by the drm framework. */ dw_mipi_dsi_set_mode(dsi, 0); if (phy_ops->power_off) phy_ops->power_off(dsi->plat_data->priv_data); if (dsi->slave) { dw_mipi_dsi_disable(dsi->slave); clk_disable_unprepare(dsi->slave->pclk); pm_runtime_put(dsi->slave->dev); } dw_mipi_dsi_disable(dsi); clk_disable_unprepare(dsi->pclk); pm_runtime_put(dsi->dev); } static unsigned int dw_mipi_dsi_get_lanes(struct dw_mipi_dsi *dsi) { /* this instance is the slave, so add the master's lanes */ if (dsi->master) return dsi->master->lanes + dsi->lanes; /* this instance is the master, so add the slave's lanes */ if (dsi->slave) return dsi->lanes + dsi->slave->lanes; /* single-dsi, so no other instance to consider */ return dsi->lanes; } static void dw_mipi_dsi_mode_set(struct dw_mipi_dsi *dsi, const struct drm_display_mode *adjusted_mode) { const struct dw_mipi_dsi_phy_ops *phy_ops = dsi->plat_data->phy_ops; void *priv_data = dsi->plat_data->priv_data; int ret; u32 lanes = dw_mipi_dsi_get_lanes(dsi); clk_prepare_enable(dsi->pclk); ret = phy_ops->get_lane_mbps(priv_data, adjusted_mode, dsi->mode_flags, lanes, dsi->format, &dsi->lane_mbps); if (ret) DRM_DEBUG_DRIVER("Phy get_lane_mbps() failed\n"); pm_runtime_get_sync(dsi->dev); dw_mipi_dsi_init(dsi); dw_mipi_dsi_dpi_config(dsi, adjusted_mode); dw_mipi_dsi_packet_handler_config(dsi); dw_mipi_dsi_video_mode_config(dsi); dw_mipi_dsi_video_packet_config(dsi, adjusted_mode); dw_mipi_dsi_command_mode_config(dsi); dw_mipi_dsi_line_timer_config(dsi, adjusted_mode); dw_mipi_dsi_vertical_timing_config(dsi, adjusted_mode); dw_mipi_dsi_dphy_init(dsi); dw_mipi_dsi_dphy_timing_config(dsi); dw_mipi_dsi_dphy_interface_config(dsi); dw_mipi_dsi_clear_err(dsi); ret = phy_ops->init(priv_data); if (ret) DRM_DEBUG_DRIVER("Phy init() failed\n"); dw_mipi_dsi_dphy_enable(dsi); dw_mipi_dsi_wait_for_two_frames(adjusted_mode); /* Switch to cmd mode for panel-bridge pre_enable & panel prepare */ dw_mipi_dsi_set_mode(dsi, 0); if (phy_ops->power_on) phy_ops->power_on(dsi->plat_data->priv_data); } static void dw_mipi_dsi_bridge_atomic_pre_enable(struct drm_bridge *bridge, struct drm_bridge_state *old_bridge_state) { struct dw_mipi_dsi *dsi = bridge_to_dsi(bridge); /* Power up the dsi ctl into a command mode */ dw_mipi_dsi_mode_set(dsi, &dsi->mode); if (dsi->slave) dw_mipi_dsi_mode_set(dsi->slave, &dsi->mode); } static void dw_mipi_dsi_bridge_mode_set(struct drm_bridge *bridge, const struct drm_display_mode *mode, const struct drm_display_mode *adjusted_mode) { struct dw_mipi_dsi *dsi = bridge_to_dsi(bridge); /* Store the display mode for later use in pre_enable callback */ drm_mode_copy(&dsi->mode, adjusted_mode); } static void dw_mipi_dsi_bridge_atomic_enable(struct drm_bridge *bridge, struct drm_bridge_state *old_bridge_state) { struct dw_mipi_dsi *dsi = bridge_to_dsi(bridge); /* Switch to video mode for panel-bridge enable & panel enable */ dw_mipi_dsi_set_mode(dsi, MIPI_DSI_MODE_VIDEO); if (dsi->slave) dw_mipi_dsi_set_mode(dsi->slave, MIPI_DSI_MODE_VIDEO); } static enum drm_mode_status dw_mipi_dsi_bridge_mode_valid(struct drm_bridge *bridge, const struct drm_display_info *info, const struct drm_display_mode *mode) { struct dw_mipi_dsi *dsi = bridge_to_dsi(bridge); const struct dw_mipi_dsi_plat_data *pdata = dsi->plat_data; enum drm_mode_status mode_status = MODE_OK; if (pdata->mode_valid) mode_status = pdata->mode_valid(pdata->priv_data, mode, dsi->mode_flags, dw_mipi_dsi_get_lanes(dsi), dsi->format); return mode_status; } static int dw_mipi_dsi_bridge_attach(struct drm_bridge *bridge, enum drm_bridge_attach_flags flags) { struct dw_mipi_dsi *dsi = bridge_to_dsi(bridge); if (!bridge->encoder) { DRM_ERROR("Parent encoder object not found\n"); return -ENODEV; } /* Set the encoder type as caller does not know it */ bridge->encoder->encoder_type = DRM_MODE_ENCODER_DSI; /* Attach the panel-bridge to the dsi bridge */ return drm_bridge_attach(bridge->encoder, dsi->panel_bridge, bridge, flags); } static const struct drm_bridge_funcs dw_mipi_dsi_bridge_funcs = { .atomic_duplicate_state = drm_atomic_helper_bridge_duplicate_state, .atomic_destroy_state = drm_atomic_helper_bridge_destroy_state, .atomic_reset = drm_atomic_helper_bridge_reset, .atomic_pre_enable = dw_mipi_dsi_bridge_atomic_pre_enable, .atomic_enable = dw_mipi_dsi_bridge_atomic_enable, .atomic_post_disable = dw_mipi_dsi_bridge_post_atomic_disable, .mode_set = dw_mipi_dsi_bridge_mode_set, .mode_valid = dw_mipi_dsi_bridge_mode_valid, .attach = dw_mipi_dsi_bridge_attach, }; #ifdef CONFIG_DEBUG_FS static int dw_mipi_dsi_debugfs_write(void *data, u64 val) { struct debugfs_entries *vpg = data; struct dw_mipi_dsi *dsi; u32 mode_cfg; if (!vpg) return -ENODEV; dsi = vpg->dsi; *vpg->reg = (bool)val; mode_cfg = dsi_read(dsi, DSI_VID_MODE_CFG); if (*vpg->reg) mode_cfg |= vpg->mask; else mode_cfg &= ~vpg->mask; dsi_write(dsi, DSI_VID_MODE_CFG, mode_cfg); return 0; } static int dw_mipi_dsi_debugfs_show(void *data, u64 *val) { struct debugfs_entries *vpg = data; if (!vpg) return -ENODEV; *val = *vpg->reg; return 0; } DEFINE_DEBUGFS_ATTRIBUTE(fops_x32, dw_mipi_dsi_debugfs_show, dw_mipi_dsi_debugfs_write, "%llu\n"); static void debugfs_create_files(void *data) { struct dw_mipi_dsi *dsi = data; struct debugfs_entries debugfs[] = { REGISTER(vpg, VID_MODE_VPG_ENABLE, dsi), REGISTER(vpg_horizontal, VID_MODE_VPG_HORIZONTAL, dsi), REGISTER(vpg_ber_pattern, VID_MODE_VPG_MODE, dsi), }; int i; dsi->debugfs_vpg = kmemdup(debugfs, sizeof(debugfs), GFP_KERNEL); if (!dsi->debugfs_vpg) return; for (i = 0; i < ARRAY_SIZE(debugfs); i++) debugfs_create_file(dsi->debugfs_vpg[i].name, 0644, dsi->debugfs, &dsi->debugfs_vpg[i], &fops_x32); } static void dw_mipi_dsi_debugfs_init(struct dw_mipi_dsi *dsi) { dsi->debugfs = debugfs_create_dir(dev_name(dsi->dev), NULL); if (IS_ERR(dsi->debugfs)) { dev_err(dsi->dev, "failed to create debugfs root\n"); return; } debugfs_create_files(dsi); } static void dw_mipi_dsi_debugfs_remove(struct dw_mipi_dsi *dsi) { debugfs_remove_recursive(dsi->debugfs); kfree(dsi->debugfs_vpg); } #else static void dw_mipi_dsi_debugfs_init(struct dw_mipi_dsi *dsi) { } static void dw_mipi_dsi_debugfs_remove(struct dw_mipi_dsi *dsi) { } #endif /* CONFIG_DEBUG_FS */ static struct dw_mipi_dsi * __dw_mipi_dsi_probe(struct platform_device *pdev, const struct dw_mipi_dsi_plat_data *plat_data) { struct device *dev = &pdev->dev; struct reset_control *apb_rst; struct dw_mipi_dsi *dsi; int ret; dsi = devm_kzalloc(dev, sizeof(*dsi), GFP_KERNEL); if (!dsi) return ERR_PTR(-ENOMEM); dsi->dev = dev; dsi->plat_data = plat_data; if (!plat_data->phy_ops->init || !plat_data->phy_ops->get_lane_mbps || !plat_data->phy_ops->get_timing) { DRM_ERROR("Phy not properly configured\n"); return ERR_PTR(-ENODEV); } if (!plat_data->base) { dsi->base = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(dsi->base)) return ERR_PTR(-ENODEV); } else { dsi->base = plat_data->base; } dsi->pclk = devm_clk_get(dev, "pclk"); if (IS_ERR(dsi->pclk)) { ret = PTR_ERR(dsi->pclk); dev_err(dev, "Unable to get pclk: %d\n", ret); return ERR_PTR(ret); } /* * Note that the reset was not defined in the initial device tree, so * we have to be prepared for it not being found. */ apb_rst = devm_reset_control_get_optional_exclusive(dev, "apb"); if (IS_ERR(apb_rst)) { ret = PTR_ERR(apb_rst); if (ret != -EPROBE_DEFER) dev_err(dev, "Unable to get reset control: %d\n", ret); return ERR_PTR(ret); } if (apb_rst) { ret = clk_prepare_enable(dsi->pclk); if (ret) { dev_err(dev, "%s: Failed to enable pclk\n", __func__); return ERR_PTR(ret); } reset_control_assert(apb_rst); usleep_range(10, 20); reset_control_deassert(apb_rst); clk_disable_unprepare(dsi->pclk); } dw_mipi_dsi_debugfs_init(dsi); pm_runtime_enable(dev); dsi->dsi_host.ops = &dw_mipi_dsi_host_ops; dsi->dsi_host.dev = dev; ret = mipi_dsi_host_register(&dsi->dsi_host); if (ret) { dev_err(dev, "Failed to register MIPI host: %d\n", ret); pm_runtime_disable(dev); dw_mipi_dsi_debugfs_remove(dsi); return ERR_PTR(ret); } dsi->bridge.driver_private = dsi; dsi->bridge.funcs = &dw_mipi_dsi_bridge_funcs; #ifdef CONFIG_OF dsi->bridge.of_node = pdev->dev.of_node; #endif return dsi; } static void __dw_mipi_dsi_remove(struct dw_mipi_dsi *dsi) { mipi_dsi_host_unregister(&dsi->dsi_host); pm_runtime_disable(dsi->dev); dw_mipi_dsi_debugfs_remove(dsi); } void dw_mipi_dsi_set_slave(struct dw_mipi_dsi *dsi, struct dw_mipi_dsi *slave) { /* introduce controllers to each other */ dsi->slave = slave; dsi->slave->master = dsi; /* migrate settings for already attached displays */ dsi->slave->lanes = dsi->lanes; dsi->slave->channel = dsi->channel; dsi->slave->format = dsi->format; dsi->slave->mode_flags = dsi->mode_flags; } EXPORT_SYMBOL_GPL(dw_mipi_dsi_set_slave); /* * Probe/remove API, used from platforms based on the DRM bridge API. */ struct dw_mipi_dsi * dw_mipi_dsi_probe(struct platform_device *pdev, const struct dw_mipi_dsi_plat_data *plat_data) { return __dw_mipi_dsi_probe(pdev, plat_data); } EXPORT_SYMBOL_GPL(dw_mipi_dsi_probe); void dw_mipi_dsi_remove(struct dw_mipi_dsi *dsi) { __dw_mipi_dsi_remove(dsi); } EXPORT_SYMBOL_GPL(dw_mipi_dsi_remove); /* * Bind/unbind API, used from platforms based on the component framework. */ int dw_mipi_dsi_bind(struct dw_mipi_dsi *dsi, struct drm_encoder *encoder) { return drm_bridge_attach(encoder, &dsi->bridge, NULL, 0); } EXPORT_SYMBOL_GPL(dw_mipi_dsi_bind); void dw_mipi_dsi_unbind(struct dw_mipi_dsi *dsi) { } EXPORT_SYMBOL_GPL(dw_mipi_dsi_unbind); MODULE_AUTHOR("Chris Zhong <[email protected]>"); MODULE_AUTHOR("Philippe Cornu <[email protected]>"); MODULE_DESCRIPTION("DW MIPI DSI host controller driver"); MODULE_LICENSE("GPL"); MODULE_ALIAS("platform:dw-mipi-dsi");
linux-master
drivers/gpu/drm/bridge/synopsys/dw-mipi-dsi.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * DesignWare High-Definition Multimedia Interface (HDMI) driver * * Copyright (C) 2013-2015 Mentor Graphics Inc. * Copyright (C) 2011-2013 Freescale Semiconductor, Inc. * Copyright (C) 2010, Guennadi Liakhovetski <[email protected]> */ #include <linux/clk.h> #include <linux/delay.h> #include <linux/err.h> #include <linux/hdmi.h> #include <linux/i2c.h> #include <linux/irq.h> #include <linux/module.h> #include <linux/mutex.h> #include <linux/of.h> #include <linux/pinctrl/consumer.h> #include <linux/regmap.h> #include <linux/dma-mapping.h> #include <linux/spinlock.h> #include <media/cec-notifier.h> #include <uapi/linux/media-bus-format.h> #include <uapi/linux/videodev2.h> #include <drm/bridge/dw_hdmi.h> #include <drm/display/drm_hdmi_helper.h> #include <drm/display/drm_scdc_helper.h> #include <drm/drm_atomic.h> #include <drm/drm_atomic_helper.h> #include <drm/drm_bridge.h> #include <drm/drm_of.h> #include <drm/drm_print.h> #include <drm/drm_probe_helper.h> #include "dw-hdmi-audio.h" #include "dw-hdmi-cec.h" #include "dw-hdmi.h" #define DDC_CI_ADDR 0x37 #define DDC_SEGMENT_ADDR 0x30 #define HDMI_EDID_LEN 512 /* DW-HDMI Controller >= 0x200a are at least compliant with SCDC version 1 */ #define SCDC_MIN_SOURCE_VERSION 0x1 #define HDMI14_MAX_TMDSCLK 340000000 static const u16 csc_coeff_default[3][4] = { { 0x2000, 0x0000, 0x0000, 0x0000 }, { 0x0000, 0x2000, 0x0000, 0x0000 }, { 0x0000, 0x0000, 0x2000, 0x0000 } }; static const u16 csc_coeff_rgb_out_eitu601[3][4] = { { 0x2000, 0x6926, 0x74fd, 0x010e }, { 0x2000, 0x2cdd, 0x0000, 0x7e9a }, { 0x2000, 0x0000, 0x38b4, 0x7e3b } }; static const u16 csc_coeff_rgb_out_eitu709[3][4] = { { 0x2000, 0x7106, 0x7a02, 0x00a7 }, { 0x2000, 0x3264, 0x0000, 0x7e6d }, { 0x2000, 0x0000, 0x3b61, 0x7e25 } }; static const u16 csc_coeff_rgb_in_eitu601[3][4] = { { 0x2591, 0x1322, 0x074b, 0x0000 }, { 0x6535, 0x2000, 0x7acc, 0x0200 }, { 0x6acd, 0x7534, 0x2000, 0x0200 } }; static const u16 csc_coeff_rgb_in_eitu709[3][4] = { { 0x2dc5, 0x0d9b, 0x049e, 0x0000 }, { 0x62f0, 0x2000, 0x7d11, 0x0200 }, { 0x6756, 0x78ab, 0x2000, 0x0200 } }; static const u16 csc_coeff_rgb_full_to_rgb_limited[3][4] = { { 0x1b7c, 0x0000, 0x0000, 0x0020 }, { 0x0000, 0x1b7c, 0x0000, 0x0020 }, { 0x0000, 0x0000, 0x1b7c, 0x0020 } }; struct hdmi_vmode { bool mdataenablepolarity; unsigned int mpixelclock; unsigned int mpixelrepetitioninput; unsigned int mpixelrepetitionoutput; unsigned int mtmdsclock; }; struct hdmi_data_info { unsigned int enc_in_bus_format; unsigned int enc_out_bus_format; unsigned int enc_in_encoding; unsigned int enc_out_encoding; unsigned int pix_repet_factor; unsigned int hdcp_enable; struct hdmi_vmode video_mode; bool rgb_limited_range; }; struct dw_hdmi_i2c { struct i2c_adapter adap; struct mutex lock; /* used to serialize data transfers */ struct completion cmp; u8 stat; u8 slave_reg; bool is_regaddr; bool is_segment; }; struct dw_hdmi_phy_data { enum dw_hdmi_phy_type type; const char *name; unsigned int gen; bool has_svsret; int (*configure)(struct dw_hdmi *hdmi, const struct dw_hdmi_plat_data *pdata, unsigned long mpixelclock); }; struct dw_hdmi { struct drm_connector connector; struct drm_bridge bridge; struct drm_bridge *next_bridge; unsigned int version; struct platform_device *audio; struct platform_device *cec; struct device *dev; struct clk *isfr_clk; struct clk *iahb_clk; struct clk *cec_clk; struct dw_hdmi_i2c *i2c; struct hdmi_data_info hdmi_data; const struct dw_hdmi_plat_data *plat_data; int vic; u8 edid[HDMI_EDID_LEN]; struct { const struct dw_hdmi_phy_ops *ops; const char *name; void *data; bool enabled; } phy; struct drm_display_mode previous_mode; struct i2c_adapter *ddc; void __iomem *regs; bool sink_is_hdmi; bool sink_has_audio; struct pinctrl *pinctrl; struct pinctrl_state *default_state; struct pinctrl_state *unwedge_state; struct mutex mutex; /* for state below and previous_mode */ enum drm_connector_force force; /* mutex-protected force state */ struct drm_connector *curr_conn;/* current connector (only valid when !disabled) */ bool disabled; /* DRM has disabled our bridge */ bool bridge_is_on; /* indicates the bridge is on */ bool rxsense; /* rxsense state */ u8 phy_mask; /* desired phy int mask settings */ u8 mc_clkdis; /* clock disable register */ spinlock_t audio_lock; struct mutex audio_mutex; unsigned int sample_non_pcm; unsigned int sample_width; unsigned int sample_rate; unsigned int channels; unsigned int audio_cts; unsigned int audio_n; bool audio_enable; unsigned int reg_shift; struct regmap *regm; void (*enable_audio)(struct dw_hdmi *hdmi); void (*disable_audio)(struct dw_hdmi *hdmi); struct mutex cec_notifier_mutex; struct cec_notifier *cec_notifier; hdmi_codec_plugged_cb plugged_cb; struct device *codec_dev; enum drm_connector_status last_connector_result; }; #define HDMI_IH_PHY_STAT0_RX_SENSE \ (HDMI_IH_PHY_STAT0_RX_SENSE0 | HDMI_IH_PHY_STAT0_RX_SENSE1 | \ HDMI_IH_PHY_STAT0_RX_SENSE2 | HDMI_IH_PHY_STAT0_RX_SENSE3) #define HDMI_PHY_RX_SENSE \ (HDMI_PHY_RX_SENSE0 | HDMI_PHY_RX_SENSE1 | \ HDMI_PHY_RX_SENSE2 | HDMI_PHY_RX_SENSE3) static inline void hdmi_writeb(struct dw_hdmi *hdmi, u8 val, int offset) { regmap_write(hdmi->regm, offset << hdmi->reg_shift, val); } static inline u8 hdmi_readb(struct dw_hdmi *hdmi, int offset) { unsigned int val = 0; regmap_read(hdmi->regm, offset << hdmi->reg_shift, &val); return val; } static void handle_plugged_change(struct dw_hdmi *hdmi, bool plugged) { if (hdmi->plugged_cb && hdmi->codec_dev) hdmi->plugged_cb(hdmi->codec_dev, plugged); } int dw_hdmi_set_plugged_cb(struct dw_hdmi *hdmi, hdmi_codec_plugged_cb fn, struct device *codec_dev) { bool plugged; mutex_lock(&hdmi->mutex); hdmi->plugged_cb = fn; hdmi->codec_dev = codec_dev; plugged = hdmi->last_connector_result == connector_status_connected; handle_plugged_change(hdmi, plugged); mutex_unlock(&hdmi->mutex); return 0; } EXPORT_SYMBOL_GPL(dw_hdmi_set_plugged_cb); static void hdmi_modb(struct dw_hdmi *hdmi, u8 data, u8 mask, unsigned reg) { regmap_update_bits(hdmi->regm, reg << hdmi->reg_shift, mask, data); } static void hdmi_mask_writeb(struct dw_hdmi *hdmi, u8 data, unsigned int reg, u8 shift, u8 mask) { hdmi_modb(hdmi, data << shift, mask, reg); } static void dw_hdmi_i2c_init(struct dw_hdmi *hdmi) { hdmi_writeb(hdmi, HDMI_PHY_I2CM_INT_ADDR_DONE_POL, HDMI_PHY_I2CM_INT_ADDR); hdmi_writeb(hdmi, HDMI_PHY_I2CM_CTLINT_ADDR_NAC_POL | HDMI_PHY_I2CM_CTLINT_ADDR_ARBITRATION_POL, HDMI_PHY_I2CM_CTLINT_ADDR); /* Software reset */ hdmi_writeb(hdmi, 0x00, HDMI_I2CM_SOFTRSTZ); /* Set Standard Mode speed (determined to be 100KHz on iMX6) */ hdmi_writeb(hdmi, 0x00, HDMI_I2CM_DIV); /* Set done, not acknowledged and arbitration interrupt polarities */ hdmi_writeb(hdmi, HDMI_I2CM_INT_DONE_POL, HDMI_I2CM_INT); hdmi_writeb(hdmi, HDMI_I2CM_CTLINT_NAC_POL | HDMI_I2CM_CTLINT_ARB_POL, HDMI_I2CM_CTLINT); /* Clear DONE and ERROR interrupts */ hdmi_writeb(hdmi, HDMI_IH_I2CM_STAT0_ERROR | HDMI_IH_I2CM_STAT0_DONE, HDMI_IH_I2CM_STAT0); /* Mute DONE and ERROR interrupts */ hdmi_writeb(hdmi, HDMI_IH_I2CM_STAT0_ERROR | HDMI_IH_I2CM_STAT0_DONE, HDMI_IH_MUTE_I2CM_STAT0); } static bool dw_hdmi_i2c_unwedge(struct dw_hdmi *hdmi) { /* If no unwedge state then give up */ if (!hdmi->unwedge_state) return false; dev_info(hdmi->dev, "Attempting to unwedge stuck i2c bus\n"); /* * This is a huge hack to workaround a problem where the dw_hdmi i2c * bus could sometimes get wedged. Once wedged there doesn't appear * to be any way to unwedge it (including the HDMI_I2CM_SOFTRSTZ) * other than pulsing the SDA line. * * We appear to be able to pulse the SDA line (in the eyes of dw_hdmi) * by: * 1. Remux the pin as a GPIO output, driven low. * 2. Wait a little while. 1 ms seems to work, but we'll do 10. * 3. Immediately jump to remux the pin as dw_hdmi i2c again. * * At the moment of remuxing, the line will still be low due to its * recent stint as an output, but then it will be pulled high by the * (presumed) external pullup. dw_hdmi seems to see this as a rising * edge and that seems to get it out of its jam. * * This wedging was only ever seen on one TV, and only on one of * its HDMI ports. It happened when the TV was powered on while the * device was plugged in. A scope trace shows the TV bringing both SDA * and SCL low, then bringing them both back up at roughly the same * time. Presumably this confuses dw_hdmi because it saw activity but * no real STOP (maybe it thinks there's another master on the bus?). * Giving it a clean rising edge of SDA while SCL is already high * presumably makes dw_hdmi see a STOP which seems to bring dw_hdmi out * of its stupor. * * Note that after coming back alive, transfers seem to immediately * resume, so if we unwedge due to a timeout we should wait a little * longer for our transfer to finish, since it might have just started * now. */ pinctrl_select_state(hdmi->pinctrl, hdmi->unwedge_state); msleep(10); pinctrl_select_state(hdmi->pinctrl, hdmi->default_state); return true; } static int dw_hdmi_i2c_wait(struct dw_hdmi *hdmi) { struct dw_hdmi_i2c *i2c = hdmi->i2c; int stat; stat = wait_for_completion_timeout(&i2c->cmp, HZ / 10); if (!stat) { /* If we can't unwedge, return timeout */ if (!dw_hdmi_i2c_unwedge(hdmi)) return -EAGAIN; /* We tried to unwedge; give it another chance */ stat = wait_for_completion_timeout(&i2c->cmp, HZ / 10); if (!stat) return -EAGAIN; } /* Check for error condition on the bus */ if (i2c->stat & HDMI_IH_I2CM_STAT0_ERROR) return -EIO; return 0; } static int dw_hdmi_i2c_read(struct dw_hdmi *hdmi, unsigned char *buf, unsigned int length) { struct dw_hdmi_i2c *i2c = hdmi->i2c; int ret; if (!i2c->is_regaddr) { dev_dbg(hdmi->dev, "set read register address to 0\n"); i2c->slave_reg = 0x00; i2c->is_regaddr = true; } while (length--) { reinit_completion(&i2c->cmp); hdmi_writeb(hdmi, i2c->slave_reg++, HDMI_I2CM_ADDRESS); if (i2c->is_segment) hdmi_writeb(hdmi, HDMI_I2CM_OPERATION_READ_EXT, HDMI_I2CM_OPERATION); else hdmi_writeb(hdmi, HDMI_I2CM_OPERATION_READ, HDMI_I2CM_OPERATION); ret = dw_hdmi_i2c_wait(hdmi); if (ret) return ret; *buf++ = hdmi_readb(hdmi, HDMI_I2CM_DATAI); } i2c->is_segment = false; return 0; } static int dw_hdmi_i2c_write(struct dw_hdmi *hdmi, unsigned char *buf, unsigned int length) { struct dw_hdmi_i2c *i2c = hdmi->i2c; int ret; if (!i2c->is_regaddr) { /* Use the first write byte as register address */ i2c->slave_reg = buf[0]; length--; buf++; i2c->is_regaddr = true; } while (length--) { reinit_completion(&i2c->cmp); hdmi_writeb(hdmi, *buf++, HDMI_I2CM_DATAO); hdmi_writeb(hdmi, i2c->slave_reg++, HDMI_I2CM_ADDRESS); hdmi_writeb(hdmi, HDMI_I2CM_OPERATION_WRITE, HDMI_I2CM_OPERATION); ret = dw_hdmi_i2c_wait(hdmi); if (ret) return ret; } return 0; } static int dw_hdmi_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num) { struct dw_hdmi *hdmi = i2c_get_adapdata(adap); struct dw_hdmi_i2c *i2c = hdmi->i2c; u8 addr = msgs[0].addr; int i, ret = 0; if (addr == DDC_CI_ADDR) /* * The internal I2C controller does not support the multi-byte * read and write operations needed for DDC/CI. * TOFIX: Blacklist the DDC/CI address until we filter out * unsupported I2C operations. */ return -EOPNOTSUPP; dev_dbg(hdmi->dev, "xfer: num: %d, addr: %#x\n", num, addr); for (i = 0; i < num; i++) { if (msgs[i].len == 0) { dev_dbg(hdmi->dev, "unsupported transfer %d/%d, no data\n", i + 1, num); return -EOPNOTSUPP; } } mutex_lock(&i2c->lock); /* Unmute DONE and ERROR interrupts */ hdmi_writeb(hdmi, 0x00, HDMI_IH_MUTE_I2CM_STAT0); /* Set slave device address taken from the first I2C message */ hdmi_writeb(hdmi, addr, HDMI_I2CM_SLAVE); /* Set slave device register address on transfer */ i2c->is_regaddr = false; /* Set segment pointer for I2C extended read mode operation */ i2c->is_segment = false; for (i = 0; i < num; i++) { dev_dbg(hdmi->dev, "xfer: num: %d/%d, len: %d, flags: %#x\n", i + 1, num, msgs[i].len, msgs[i].flags); if (msgs[i].addr == DDC_SEGMENT_ADDR && msgs[i].len == 1) { i2c->is_segment = true; hdmi_writeb(hdmi, DDC_SEGMENT_ADDR, HDMI_I2CM_SEGADDR); hdmi_writeb(hdmi, *msgs[i].buf, HDMI_I2CM_SEGPTR); } else { if (msgs[i].flags & I2C_M_RD) ret = dw_hdmi_i2c_read(hdmi, msgs[i].buf, msgs[i].len); else ret = dw_hdmi_i2c_write(hdmi, msgs[i].buf, msgs[i].len); } if (ret < 0) break; } if (!ret) ret = num; /* Mute DONE and ERROR interrupts */ hdmi_writeb(hdmi, HDMI_IH_I2CM_STAT0_ERROR | HDMI_IH_I2CM_STAT0_DONE, HDMI_IH_MUTE_I2CM_STAT0); mutex_unlock(&i2c->lock); return ret; } static u32 dw_hdmi_i2c_func(struct i2c_adapter *adapter) { return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL; } static const struct i2c_algorithm dw_hdmi_algorithm = { .master_xfer = dw_hdmi_i2c_xfer, .functionality = dw_hdmi_i2c_func, }; static struct i2c_adapter *dw_hdmi_i2c_adapter(struct dw_hdmi *hdmi) { struct i2c_adapter *adap; struct dw_hdmi_i2c *i2c; int ret; i2c = devm_kzalloc(hdmi->dev, sizeof(*i2c), GFP_KERNEL); if (!i2c) return ERR_PTR(-ENOMEM); mutex_init(&i2c->lock); init_completion(&i2c->cmp); adap = &i2c->adap; adap->class = I2C_CLASS_DDC; adap->owner = THIS_MODULE; adap->dev.parent = hdmi->dev; adap->algo = &dw_hdmi_algorithm; strscpy(adap->name, "DesignWare HDMI", sizeof(adap->name)); i2c_set_adapdata(adap, hdmi); ret = i2c_add_adapter(adap); if (ret) { dev_warn(hdmi->dev, "cannot add %s I2C adapter\n", adap->name); devm_kfree(hdmi->dev, i2c); return ERR_PTR(ret); } hdmi->i2c = i2c; dev_info(hdmi->dev, "registered %s I2C bus driver\n", adap->name); return adap; } static void hdmi_set_cts_n(struct dw_hdmi *hdmi, unsigned int cts, unsigned int n) { /* Must be set/cleared first */ hdmi_modb(hdmi, 0, HDMI_AUD_CTS3_CTS_MANUAL, HDMI_AUD_CTS3); /* nshift factor = 0 */ hdmi_modb(hdmi, 0, HDMI_AUD_CTS3_N_SHIFT_MASK, HDMI_AUD_CTS3); /* Use automatic CTS generation mode when CTS is not set */ if (cts) hdmi_writeb(hdmi, ((cts >> 16) & HDMI_AUD_CTS3_AUDCTS19_16_MASK) | HDMI_AUD_CTS3_CTS_MANUAL, HDMI_AUD_CTS3); else hdmi_writeb(hdmi, 0, HDMI_AUD_CTS3); hdmi_writeb(hdmi, (cts >> 8) & 0xff, HDMI_AUD_CTS2); hdmi_writeb(hdmi, cts & 0xff, HDMI_AUD_CTS1); hdmi_writeb(hdmi, (n >> 16) & 0x0f, HDMI_AUD_N3); hdmi_writeb(hdmi, (n >> 8) & 0xff, HDMI_AUD_N2); hdmi_writeb(hdmi, n & 0xff, HDMI_AUD_N1); } static unsigned int hdmi_compute_n(unsigned int freq, unsigned long pixel_clk) { unsigned int n = (128 * freq) / 1000; unsigned int mult = 1; while (freq > 48000) { mult *= 2; freq /= 2; } switch (freq) { case 32000: if (pixel_clk == 25175000) n = 4576; else if (pixel_clk == 27027000) n = 4096; else if (pixel_clk == 74176000 || pixel_clk == 148352000) n = 11648; else if (pixel_clk == 297000000) n = 3072; else n = 4096; n *= mult; break; case 44100: if (pixel_clk == 25175000) n = 7007; else if (pixel_clk == 74176000) n = 17836; else if (pixel_clk == 148352000) n = 8918; else if (pixel_clk == 297000000) n = 4704; else n = 6272; n *= mult; break; case 48000: if (pixel_clk == 25175000) n = 6864; else if (pixel_clk == 27027000) n = 6144; else if (pixel_clk == 74176000) n = 11648; else if (pixel_clk == 148352000) n = 5824; else if (pixel_clk == 297000000) n = 5120; else n = 6144; n *= mult; break; default: break; } return n; } /* * When transmitting IEC60958 linear PCM audio, these registers allow to * configure the channel status information of all the channel status * bits in the IEC60958 frame. For the moment this configuration is only * used when the I2S audio interface, General Purpose Audio (GPA), * or AHB audio DMA (AHBAUDDMA) interface is active * (for S/PDIF interface this information comes from the stream). */ void dw_hdmi_set_channel_status(struct dw_hdmi *hdmi, u8 *channel_status) { /* * Set channel status register for frequency and word length. * Use default values for other registers. */ hdmi_writeb(hdmi, channel_status[3], HDMI_FC_AUDSCHNLS7); hdmi_writeb(hdmi, channel_status[4], HDMI_FC_AUDSCHNLS8); } EXPORT_SYMBOL_GPL(dw_hdmi_set_channel_status); static void hdmi_set_clk_regenerator(struct dw_hdmi *hdmi, unsigned long pixel_clk, unsigned int sample_rate) { unsigned long ftdms = pixel_clk; unsigned int n, cts; u8 config3; u64 tmp; n = hdmi_compute_n(sample_rate, pixel_clk); config3 = hdmi_readb(hdmi, HDMI_CONFIG3_ID); /* Compute CTS when using internal AHB audio or General Parallel audio*/ if ((config3 & HDMI_CONFIG3_AHBAUDDMA) || (config3 & HDMI_CONFIG3_GPAUD)) { /* * Compute the CTS value from the N value. Note that CTS and N * can be up to 20 bits in total, so we need 64-bit math. Also * note that our TDMS clock is not fully accurate; it is * accurate to kHz. This can introduce an unnecessary remainder * in the calculation below, so we don't try to warn about that. */ tmp = (u64)ftdms * n; do_div(tmp, 128 * sample_rate); cts = tmp; dev_dbg(hdmi->dev, "%s: fs=%uHz ftdms=%lu.%03luMHz N=%d cts=%d\n", __func__, sample_rate, ftdms / 1000000, (ftdms / 1000) % 1000, n, cts); } else { cts = 0; } spin_lock_irq(&hdmi->audio_lock); hdmi->audio_n = n; hdmi->audio_cts = cts; hdmi_set_cts_n(hdmi, cts, hdmi->audio_enable ? n : 0); spin_unlock_irq(&hdmi->audio_lock); } static void hdmi_init_clk_regenerator(struct dw_hdmi *hdmi) { mutex_lock(&hdmi->audio_mutex); hdmi_set_clk_regenerator(hdmi, 74250000, hdmi->sample_rate); mutex_unlock(&hdmi->audio_mutex); } static void hdmi_clk_regenerator_update_pixel_clock(struct dw_hdmi *hdmi) { mutex_lock(&hdmi->audio_mutex); hdmi_set_clk_regenerator(hdmi, hdmi->hdmi_data.video_mode.mtmdsclock, hdmi->sample_rate); mutex_unlock(&hdmi->audio_mutex); } void dw_hdmi_set_sample_width(struct dw_hdmi *hdmi, unsigned int width) { mutex_lock(&hdmi->audio_mutex); hdmi->sample_width = width; mutex_unlock(&hdmi->audio_mutex); } EXPORT_SYMBOL_GPL(dw_hdmi_set_sample_width); void dw_hdmi_set_sample_non_pcm(struct dw_hdmi *hdmi, unsigned int non_pcm) { mutex_lock(&hdmi->audio_mutex); hdmi->sample_non_pcm = non_pcm; mutex_unlock(&hdmi->audio_mutex); } EXPORT_SYMBOL_GPL(dw_hdmi_set_sample_non_pcm); void dw_hdmi_set_sample_rate(struct dw_hdmi *hdmi, unsigned int rate) { mutex_lock(&hdmi->audio_mutex); hdmi->sample_rate = rate; hdmi_set_clk_regenerator(hdmi, hdmi->hdmi_data.video_mode.mtmdsclock, hdmi->sample_rate); mutex_unlock(&hdmi->audio_mutex); } EXPORT_SYMBOL_GPL(dw_hdmi_set_sample_rate); void dw_hdmi_set_channel_count(struct dw_hdmi *hdmi, unsigned int cnt) { u8 layout; mutex_lock(&hdmi->audio_mutex); hdmi->channels = cnt; /* * For >2 channel PCM audio, we need to select layout 1 * and set an appropriate channel map. */ if (cnt > 2) layout = HDMI_FC_AUDSCONF_AUD_PACKET_LAYOUT_LAYOUT1; else layout = HDMI_FC_AUDSCONF_AUD_PACKET_LAYOUT_LAYOUT0; hdmi_modb(hdmi, layout, HDMI_FC_AUDSCONF_AUD_PACKET_LAYOUT_MASK, HDMI_FC_AUDSCONF); /* Set the audio infoframes channel count */ hdmi_modb(hdmi, (cnt - 1) << HDMI_FC_AUDICONF0_CC_OFFSET, HDMI_FC_AUDICONF0_CC_MASK, HDMI_FC_AUDICONF0); mutex_unlock(&hdmi->audio_mutex); } EXPORT_SYMBOL_GPL(dw_hdmi_set_channel_count); void dw_hdmi_set_channel_allocation(struct dw_hdmi *hdmi, unsigned int ca) { mutex_lock(&hdmi->audio_mutex); hdmi_writeb(hdmi, ca, HDMI_FC_AUDICONF2); mutex_unlock(&hdmi->audio_mutex); } EXPORT_SYMBOL_GPL(dw_hdmi_set_channel_allocation); static void hdmi_enable_audio_clk(struct dw_hdmi *hdmi, bool enable) { if (enable) hdmi->mc_clkdis &= ~HDMI_MC_CLKDIS_AUDCLK_DISABLE; else hdmi->mc_clkdis |= HDMI_MC_CLKDIS_AUDCLK_DISABLE; hdmi_writeb(hdmi, hdmi->mc_clkdis, HDMI_MC_CLKDIS); } static u8 *hdmi_audio_get_eld(struct dw_hdmi *hdmi) { if (!hdmi->curr_conn) return NULL; return hdmi->curr_conn->eld; } static void dw_hdmi_gp_audio_enable(struct dw_hdmi *hdmi) { const struct dw_hdmi_plat_data *pdata = hdmi->plat_data; int sample_freq = 0x2, org_sample_freq = 0xD; int ch_mask = BIT(hdmi->channels) - 1; switch (hdmi->sample_rate) { case 32000: sample_freq = 0x03; org_sample_freq = 0x0C; break; case 44100: sample_freq = 0x00; org_sample_freq = 0x0F; break; case 48000: sample_freq = 0x02; org_sample_freq = 0x0D; break; case 88200: sample_freq = 0x08; org_sample_freq = 0x07; break; case 96000: sample_freq = 0x0A; org_sample_freq = 0x05; break; case 176400: sample_freq = 0x0C; org_sample_freq = 0x03; break; case 192000: sample_freq = 0x0E; org_sample_freq = 0x01; break; default: break; } hdmi_set_cts_n(hdmi, hdmi->audio_cts, hdmi->audio_n); hdmi_enable_audio_clk(hdmi, true); hdmi_writeb(hdmi, 0x1, HDMI_FC_AUDSCHNLS0); hdmi_writeb(hdmi, hdmi->channels, HDMI_FC_AUDSCHNLS2); hdmi_writeb(hdmi, 0x22, HDMI_FC_AUDSCHNLS3); hdmi_writeb(hdmi, 0x22, HDMI_FC_AUDSCHNLS4); hdmi_writeb(hdmi, 0x11, HDMI_FC_AUDSCHNLS5); hdmi_writeb(hdmi, 0x11, HDMI_FC_AUDSCHNLS6); hdmi_writeb(hdmi, (0x3 << 4) | sample_freq, HDMI_FC_AUDSCHNLS7); hdmi_writeb(hdmi, (org_sample_freq << 4) | 0xb, HDMI_FC_AUDSCHNLS8); hdmi_writeb(hdmi, ch_mask, HDMI_GP_CONF1); hdmi_writeb(hdmi, 0x02, HDMI_GP_CONF2); hdmi_writeb(hdmi, 0x01, HDMI_GP_CONF0); hdmi_modb(hdmi, 0x3, 0x3, HDMI_FC_DATAUTO3); /* hbr */ if (hdmi->sample_rate == 192000 && hdmi->channels == 8 && hdmi->sample_width == 32 && hdmi->sample_non_pcm) hdmi_modb(hdmi, 0x01, 0x01, HDMI_GP_CONF2); if (pdata->enable_audio) pdata->enable_audio(hdmi, hdmi->channels, hdmi->sample_width, hdmi->sample_rate, hdmi->sample_non_pcm); } static void dw_hdmi_gp_audio_disable(struct dw_hdmi *hdmi) { const struct dw_hdmi_plat_data *pdata = hdmi->plat_data; hdmi_set_cts_n(hdmi, hdmi->audio_cts, 0); hdmi_modb(hdmi, 0, 0x3, HDMI_FC_DATAUTO3); if (pdata->disable_audio) pdata->disable_audio(hdmi); hdmi_enable_audio_clk(hdmi, false); } static void dw_hdmi_ahb_audio_enable(struct dw_hdmi *hdmi) { hdmi_set_cts_n(hdmi, hdmi->audio_cts, hdmi->audio_n); } static void dw_hdmi_ahb_audio_disable(struct dw_hdmi *hdmi) { hdmi_set_cts_n(hdmi, hdmi->audio_cts, 0); } static void dw_hdmi_i2s_audio_enable(struct dw_hdmi *hdmi) { hdmi_set_cts_n(hdmi, hdmi->audio_cts, hdmi->audio_n); hdmi_enable_audio_clk(hdmi, true); } static void dw_hdmi_i2s_audio_disable(struct dw_hdmi *hdmi) { hdmi_enable_audio_clk(hdmi, false); } void dw_hdmi_audio_enable(struct dw_hdmi *hdmi) { unsigned long flags; spin_lock_irqsave(&hdmi->audio_lock, flags); hdmi->audio_enable = true; if (hdmi->enable_audio) hdmi->enable_audio(hdmi); spin_unlock_irqrestore(&hdmi->audio_lock, flags); } EXPORT_SYMBOL_GPL(dw_hdmi_audio_enable); void dw_hdmi_audio_disable(struct dw_hdmi *hdmi) { unsigned long flags; spin_lock_irqsave(&hdmi->audio_lock, flags); hdmi->audio_enable = false; if (hdmi->disable_audio) hdmi->disable_audio(hdmi); spin_unlock_irqrestore(&hdmi->audio_lock, flags); } EXPORT_SYMBOL_GPL(dw_hdmi_audio_disable); static bool hdmi_bus_fmt_is_rgb(unsigned int bus_format) { switch (bus_format) { case MEDIA_BUS_FMT_RGB888_1X24: case MEDIA_BUS_FMT_RGB101010_1X30: case MEDIA_BUS_FMT_RGB121212_1X36: case MEDIA_BUS_FMT_RGB161616_1X48: return true; default: return false; } } static bool hdmi_bus_fmt_is_yuv444(unsigned int bus_format) { switch (bus_format) { case MEDIA_BUS_FMT_YUV8_1X24: case MEDIA_BUS_FMT_YUV10_1X30: case MEDIA_BUS_FMT_YUV12_1X36: case MEDIA_BUS_FMT_YUV16_1X48: return true; default: return false; } } static bool hdmi_bus_fmt_is_yuv422(unsigned int bus_format) { switch (bus_format) { case MEDIA_BUS_FMT_UYVY8_1X16: case MEDIA_BUS_FMT_UYVY10_1X20: case MEDIA_BUS_FMT_UYVY12_1X24: return true; default: return false; } } static bool hdmi_bus_fmt_is_yuv420(unsigned int bus_format) { switch (bus_format) { case MEDIA_BUS_FMT_UYYVYY8_0_5X24: case MEDIA_BUS_FMT_UYYVYY10_0_5X30: case MEDIA_BUS_FMT_UYYVYY12_0_5X36: case MEDIA_BUS_FMT_UYYVYY16_0_5X48: return true; default: return false; } } static int hdmi_bus_fmt_color_depth(unsigned int bus_format) { switch (bus_format) { case MEDIA_BUS_FMT_RGB888_1X24: case MEDIA_BUS_FMT_YUV8_1X24: case MEDIA_BUS_FMT_UYVY8_1X16: case MEDIA_BUS_FMT_UYYVYY8_0_5X24: return 8; case MEDIA_BUS_FMT_RGB101010_1X30: case MEDIA_BUS_FMT_YUV10_1X30: case MEDIA_BUS_FMT_UYVY10_1X20: case MEDIA_BUS_FMT_UYYVYY10_0_5X30: return 10; case MEDIA_BUS_FMT_RGB121212_1X36: case MEDIA_BUS_FMT_YUV12_1X36: case MEDIA_BUS_FMT_UYVY12_1X24: case MEDIA_BUS_FMT_UYYVYY12_0_5X36: return 12; case MEDIA_BUS_FMT_RGB161616_1X48: case MEDIA_BUS_FMT_YUV16_1X48: case MEDIA_BUS_FMT_UYYVYY16_0_5X48: return 16; default: return 0; } } /* * this submodule is responsible for the video data synchronization. * for example, for RGB 4:4:4 input, the data map is defined as * pin{47~40} <==> R[7:0] * pin{31~24} <==> G[7:0] * pin{15~8} <==> B[7:0] */ static void hdmi_video_sample(struct dw_hdmi *hdmi) { int color_format = 0; u8 val; switch (hdmi->hdmi_data.enc_in_bus_format) { case MEDIA_BUS_FMT_RGB888_1X24: color_format = 0x01; break; case MEDIA_BUS_FMT_RGB101010_1X30: color_format = 0x03; break; case MEDIA_BUS_FMT_RGB121212_1X36: color_format = 0x05; break; case MEDIA_BUS_FMT_RGB161616_1X48: color_format = 0x07; break; case MEDIA_BUS_FMT_YUV8_1X24: case MEDIA_BUS_FMT_UYYVYY8_0_5X24: color_format = 0x09; break; case MEDIA_BUS_FMT_YUV10_1X30: case MEDIA_BUS_FMT_UYYVYY10_0_5X30: color_format = 0x0B; break; case MEDIA_BUS_FMT_YUV12_1X36: case MEDIA_BUS_FMT_UYYVYY12_0_5X36: color_format = 0x0D; break; case MEDIA_BUS_FMT_YUV16_1X48: case MEDIA_BUS_FMT_UYYVYY16_0_5X48: color_format = 0x0F; break; case MEDIA_BUS_FMT_UYVY8_1X16: color_format = 0x16; break; case MEDIA_BUS_FMT_UYVY10_1X20: color_format = 0x14; break; case MEDIA_BUS_FMT_UYVY12_1X24: color_format = 0x12; break; default: return; } val = HDMI_TX_INVID0_INTERNAL_DE_GENERATOR_DISABLE | ((color_format << HDMI_TX_INVID0_VIDEO_MAPPING_OFFSET) & HDMI_TX_INVID0_VIDEO_MAPPING_MASK); hdmi_writeb(hdmi, val, HDMI_TX_INVID0); /* Enable TX stuffing: When DE is inactive, fix the output data to 0 */ val = HDMI_TX_INSTUFFING_BDBDATA_STUFFING_ENABLE | HDMI_TX_INSTUFFING_RCRDATA_STUFFING_ENABLE | HDMI_TX_INSTUFFING_GYDATA_STUFFING_ENABLE; hdmi_writeb(hdmi, val, HDMI_TX_INSTUFFING); hdmi_writeb(hdmi, 0x0, HDMI_TX_GYDATA0); hdmi_writeb(hdmi, 0x0, HDMI_TX_GYDATA1); hdmi_writeb(hdmi, 0x0, HDMI_TX_RCRDATA0); hdmi_writeb(hdmi, 0x0, HDMI_TX_RCRDATA1); hdmi_writeb(hdmi, 0x0, HDMI_TX_BCBDATA0); hdmi_writeb(hdmi, 0x0, HDMI_TX_BCBDATA1); } static int is_color_space_conversion(struct dw_hdmi *hdmi) { struct hdmi_data_info *hdmi_data = &hdmi->hdmi_data; bool is_input_rgb, is_output_rgb; is_input_rgb = hdmi_bus_fmt_is_rgb(hdmi_data->enc_in_bus_format); is_output_rgb = hdmi_bus_fmt_is_rgb(hdmi_data->enc_out_bus_format); return (is_input_rgb != is_output_rgb) || (is_input_rgb && is_output_rgb && hdmi_data->rgb_limited_range); } static int is_color_space_decimation(struct dw_hdmi *hdmi) { if (!hdmi_bus_fmt_is_yuv422(hdmi->hdmi_data.enc_out_bus_format)) return 0; if (hdmi_bus_fmt_is_rgb(hdmi->hdmi_data.enc_in_bus_format) || hdmi_bus_fmt_is_yuv444(hdmi->hdmi_data.enc_in_bus_format)) return 1; return 0; } static int is_color_space_interpolation(struct dw_hdmi *hdmi) { if (!hdmi_bus_fmt_is_yuv422(hdmi->hdmi_data.enc_in_bus_format)) return 0; if (hdmi_bus_fmt_is_rgb(hdmi->hdmi_data.enc_out_bus_format) || hdmi_bus_fmt_is_yuv444(hdmi->hdmi_data.enc_out_bus_format)) return 1; return 0; } static bool is_csc_needed(struct dw_hdmi *hdmi) { return is_color_space_conversion(hdmi) || is_color_space_decimation(hdmi) || is_color_space_interpolation(hdmi); } static void dw_hdmi_update_csc_coeffs(struct dw_hdmi *hdmi) { const u16 (*csc_coeff)[3][4] = &csc_coeff_default; bool is_input_rgb, is_output_rgb; unsigned i; u32 csc_scale = 1; is_input_rgb = hdmi_bus_fmt_is_rgb(hdmi->hdmi_data.enc_in_bus_format); is_output_rgb = hdmi_bus_fmt_is_rgb(hdmi->hdmi_data.enc_out_bus_format); if (!is_input_rgb && is_output_rgb) { if (hdmi->hdmi_data.enc_out_encoding == V4L2_YCBCR_ENC_601) csc_coeff = &csc_coeff_rgb_out_eitu601; else csc_coeff = &csc_coeff_rgb_out_eitu709; } else if (is_input_rgb && !is_output_rgb) { if (hdmi->hdmi_data.enc_out_encoding == V4L2_YCBCR_ENC_601) csc_coeff = &csc_coeff_rgb_in_eitu601; else csc_coeff = &csc_coeff_rgb_in_eitu709; csc_scale = 0; } else if (is_input_rgb && is_output_rgb && hdmi->hdmi_data.rgb_limited_range) { csc_coeff = &csc_coeff_rgb_full_to_rgb_limited; } /* The CSC registers are sequential, alternating MSB then LSB */ for (i = 0; i < ARRAY_SIZE(csc_coeff_default[0]); i++) { u16 coeff_a = (*csc_coeff)[0][i]; u16 coeff_b = (*csc_coeff)[1][i]; u16 coeff_c = (*csc_coeff)[2][i]; hdmi_writeb(hdmi, coeff_a & 0xff, HDMI_CSC_COEF_A1_LSB + i * 2); hdmi_writeb(hdmi, coeff_a >> 8, HDMI_CSC_COEF_A1_MSB + i * 2); hdmi_writeb(hdmi, coeff_b & 0xff, HDMI_CSC_COEF_B1_LSB + i * 2); hdmi_writeb(hdmi, coeff_b >> 8, HDMI_CSC_COEF_B1_MSB + i * 2); hdmi_writeb(hdmi, coeff_c & 0xff, HDMI_CSC_COEF_C1_LSB + i * 2); hdmi_writeb(hdmi, coeff_c >> 8, HDMI_CSC_COEF_C1_MSB + i * 2); } hdmi_modb(hdmi, csc_scale, HDMI_CSC_SCALE_CSCSCALE_MASK, HDMI_CSC_SCALE); } static void hdmi_video_csc(struct dw_hdmi *hdmi) { int color_depth = 0; int interpolation = HDMI_CSC_CFG_INTMODE_DISABLE; int decimation = 0; /* YCC422 interpolation to 444 mode */ if (is_color_space_interpolation(hdmi)) interpolation = HDMI_CSC_CFG_INTMODE_CHROMA_INT_FORMULA1; else if (is_color_space_decimation(hdmi)) decimation = HDMI_CSC_CFG_DECMODE_CHROMA_INT_FORMULA3; switch (hdmi_bus_fmt_color_depth(hdmi->hdmi_data.enc_out_bus_format)) { case 8: color_depth = HDMI_CSC_SCALE_CSC_COLORDE_PTH_24BPP; break; case 10: color_depth = HDMI_CSC_SCALE_CSC_COLORDE_PTH_30BPP; break; case 12: color_depth = HDMI_CSC_SCALE_CSC_COLORDE_PTH_36BPP; break; case 16: color_depth = HDMI_CSC_SCALE_CSC_COLORDE_PTH_48BPP; break; default: return; } /* Configure the CSC registers */ hdmi_writeb(hdmi, interpolation | decimation, HDMI_CSC_CFG); hdmi_modb(hdmi, color_depth, HDMI_CSC_SCALE_CSC_COLORDE_PTH_MASK, HDMI_CSC_SCALE); dw_hdmi_update_csc_coeffs(hdmi); } /* * HDMI video packetizer is used to packetize the data. * for example, if input is YCC422 mode or repeater is used, * data should be repacked this module can be bypassed. */ static void hdmi_video_packetize(struct dw_hdmi *hdmi) { unsigned int color_depth = 0; unsigned int remap_size = HDMI_VP_REMAP_YCC422_16bit; unsigned int output_select = HDMI_VP_CONF_OUTPUT_SELECTOR_PP; struct hdmi_data_info *hdmi_data = &hdmi->hdmi_data; u8 val, vp_conf; u8 clear_gcp_auto = 0; if (hdmi_bus_fmt_is_rgb(hdmi->hdmi_data.enc_out_bus_format) || hdmi_bus_fmt_is_yuv444(hdmi->hdmi_data.enc_out_bus_format) || hdmi_bus_fmt_is_yuv420(hdmi->hdmi_data.enc_out_bus_format)) { switch (hdmi_bus_fmt_color_depth( hdmi->hdmi_data.enc_out_bus_format)) { case 8: color_depth = 4; output_select = HDMI_VP_CONF_OUTPUT_SELECTOR_BYPASS; clear_gcp_auto = 1; break; case 10: color_depth = 5; break; case 12: color_depth = 6; break; case 16: color_depth = 7; break; default: output_select = HDMI_VP_CONF_OUTPUT_SELECTOR_BYPASS; } } else if (hdmi_bus_fmt_is_yuv422(hdmi->hdmi_data.enc_out_bus_format)) { switch (hdmi_bus_fmt_color_depth( hdmi->hdmi_data.enc_out_bus_format)) { case 0: case 8: remap_size = HDMI_VP_REMAP_YCC422_16bit; clear_gcp_auto = 1; break; case 10: remap_size = HDMI_VP_REMAP_YCC422_20bit; break; case 12: remap_size = HDMI_VP_REMAP_YCC422_24bit; break; default: return; } output_select = HDMI_VP_CONF_OUTPUT_SELECTOR_YCC422; } else { return; } /* set the packetizer registers */ val = ((color_depth << HDMI_VP_PR_CD_COLOR_DEPTH_OFFSET) & HDMI_VP_PR_CD_COLOR_DEPTH_MASK) | ((hdmi_data->pix_repet_factor << HDMI_VP_PR_CD_DESIRED_PR_FACTOR_OFFSET) & HDMI_VP_PR_CD_DESIRED_PR_FACTOR_MASK); hdmi_writeb(hdmi, val, HDMI_VP_PR_CD); /* HDMI1.4b specification section 6.5.3: * Source shall only send GCPs with non-zero CD to sinks * that indicate support for Deep Color. * GCP only transmit CD and do not handle AVMUTE, PP norDefault_Phase (yet). * Disable Auto GCP when 24-bit color for sinks that not support Deep Color. */ val = hdmi_readb(hdmi, HDMI_FC_DATAUTO3); if (clear_gcp_auto == 1) val &= ~HDMI_FC_DATAUTO3_GCP_AUTO; else val |= HDMI_FC_DATAUTO3_GCP_AUTO; hdmi_writeb(hdmi, val, HDMI_FC_DATAUTO3); hdmi_modb(hdmi, HDMI_VP_STUFF_PR_STUFFING_STUFFING_MODE, HDMI_VP_STUFF_PR_STUFFING_MASK, HDMI_VP_STUFF); /* Data from pixel repeater block */ if (hdmi_data->pix_repet_factor > 1) { vp_conf = HDMI_VP_CONF_PR_EN_ENABLE | HDMI_VP_CONF_BYPASS_SELECT_PIX_REPEATER; } else { /* data from packetizer block */ vp_conf = HDMI_VP_CONF_PR_EN_DISABLE | HDMI_VP_CONF_BYPASS_SELECT_VID_PACKETIZER; } hdmi_modb(hdmi, vp_conf, HDMI_VP_CONF_PR_EN_MASK | HDMI_VP_CONF_BYPASS_SELECT_MASK, HDMI_VP_CONF); hdmi_modb(hdmi, 1 << HDMI_VP_STUFF_IDEFAULT_PHASE_OFFSET, HDMI_VP_STUFF_IDEFAULT_PHASE_MASK, HDMI_VP_STUFF); hdmi_writeb(hdmi, remap_size, HDMI_VP_REMAP); if (output_select == HDMI_VP_CONF_OUTPUT_SELECTOR_PP) { vp_conf = HDMI_VP_CONF_BYPASS_EN_DISABLE | HDMI_VP_CONF_PP_EN_ENABLE | HDMI_VP_CONF_YCC422_EN_DISABLE; } else if (output_select == HDMI_VP_CONF_OUTPUT_SELECTOR_YCC422) { vp_conf = HDMI_VP_CONF_BYPASS_EN_DISABLE | HDMI_VP_CONF_PP_EN_DISABLE | HDMI_VP_CONF_YCC422_EN_ENABLE; } else if (output_select == HDMI_VP_CONF_OUTPUT_SELECTOR_BYPASS) { vp_conf = HDMI_VP_CONF_BYPASS_EN_ENABLE | HDMI_VP_CONF_PP_EN_DISABLE | HDMI_VP_CONF_YCC422_EN_DISABLE; } else { return; } hdmi_modb(hdmi, vp_conf, HDMI_VP_CONF_BYPASS_EN_MASK | HDMI_VP_CONF_PP_EN_ENMASK | HDMI_VP_CONF_YCC422_EN_MASK, HDMI_VP_CONF); hdmi_modb(hdmi, HDMI_VP_STUFF_PP_STUFFING_STUFFING_MODE | HDMI_VP_STUFF_YCC422_STUFFING_STUFFING_MODE, HDMI_VP_STUFF_PP_STUFFING_MASK | HDMI_VP_STUFF_YCC422_STUFFING_MASK, HDMI_VP_STUFF); hdmi_modb(hdmi, output_select, HDMI_VP_CONF_OUTPUT_SELECTOR_MASK, HDMI_VP_CONF); } /* ----------------------------------------------------------------------------- * Synopsys PHY Handling */ static inline void hdmi_phy_test_clear(struct dw_hdmi *hdmi, unsigned char bit) { hdmi_modb(hdmi, bit << HDMI_PHY_TST0_TSTCLR_OFFSET, HDMI_PHY_TST0_TSTCLR_MASK, HDMI_PHY_TST0); } static bool hdmi_phy_wait_i2c_done(struct dw_hdmi *hdmi, int msec) { u32 val; while ((val = hdmi_readb(hdmi, HDMI_IH_I2CMPHY_STAT0) & 0x3) == 0) { if (msec-- == 0) return false; udelay(1000); } hdmi_writeb(hdmi, val, HDMI_IH_I2CMPHY_STAT0); return true; } void dw_hdmi_phy_i2c_write(struct dw_hdmi *hdmi, unsigned short data, unsigned char addr) { hdmi_writeb(hdmi, 0xFF, HDMI_IH_I2CMPHY_STAT0); hdmi_writeb(hdmi, addr, HDMI_PHY_I2CM_ADDRESS_ADDR); hdmi_writeb(hdmi, (unsigned char)(data >> 8), HDMI_PHY_I2CM_DATAO_1_ADDR); hdmi_writeb(hdmi, (unsigned char)(data >> 0), HDMI_PHY_I2CM_DATAO_0_ADDR); hdmi_writeb(hdmi, HDMI_PHY_I2CM_OPERATION_ADDR_WRITE, HDMI_PHY_I2CM_OPERATION_ADDR); hdmi_phy_wait_i2c_done(hdmi, 1000); } EXPORT_SYMBOL_GPL(dw_hdmi_phy_i2c_write); /* Filter out invalid setups to avoid configuring SCDC and scrambling */ static bool dw_hdmi_support_scdc(struct dw_hdmi *hdmi, const struct drm_display_info *display) { /* Completely disable SCDC support for older controllers */ if (hdmi->version < 0x200a) return false; /* Disable if no DDC bus */ if (!hdmi->ddc) return false; /* Disable if SCDC is not supported, or if an HF-VSDB block is absent */ if (!display->hdmi.scdc.supported || !display->hdmi.scdc.scrambling.supported) return false; /* * Disable if display only support low TMDS rates and scrambling * for low rates is not supported either */ if (!display->hdmi.scdc.scrambling.low_rates && display->max_tmds_clock <= 340000) return false; return true; } /* * HDMI2.0 Specifies the following procedure for High TMDS Bit Rates: * - The Source shall suspend transmission of the TMDS clock and data * - The Source shall write to the TMDS_Bit_Clock_Ratio bit to change it * from a 0 to a 1 or from a 1 to a 0 * - The Source shall allow a minimum of 1 ms and a maximum of 100 ms from * the time the TMDS_Bit_Clock_Ratio bit is written until resuming * transmission of TMDS clock and data * * To respect the 100ms maximum delay, the dw_hdmi_set_high_tmds_clock_ratio() * helper should called right before enabling the TMDS Clock and Data in * the PHY configuration callback. */ void dw_hdmi_set_high_tmds_clock_ratio(struct dw_hdmi *hdmi, const struct drm_display_info *display) { unsigned long mtmdsclock = hdmi->hdmi_data.video_mode.mtmdsclock; /* Control for TMDS Bit Period/TMDS Clock-Period Ratio */ if (dw_hdmi_support_scdc(hdmi, display)) { if (mtmdsclock > HDMI14_MAX_TMDSCLK) drm_scdc_set_high_tmds_clock_ratio(hdmi->curr_conn, 1); else drm_scdc_set_high_tmds_clock_ratio(hdmi->curr_conn, 0); } } EXPORT_SYMBOL_GPL(dw_hdmi_set_high_tmds_clock_ratio); static void dw_hdmi_phy_enable_powerdown(struct dw_hdmi *hdmi, bool enable) { hdmi_mask_writeb(hdmi, !enable, HDMI_PHY_CONF0, HDMI_PHY_CONF0_PDZ_OFFSET, HDMI_PHY_CONF0_PDZ_MASK); } static void dw_hdmi_phy_enable_tmds(struct dw_hdmi *hdmi, u8 enable) { hdmi_mask_writeb(hdmi, enable, HDMI_PHY_CONF0, HDMI_PHY_CONF0_ENTMDS_OFFSET, HDMI_PHY_CONF0_ENTMDS_MASK); } static void dw_hdmi_phy_enable_svsret(struct dw_hdmi *hdmi, u8 enable) { hdmi_mask_writeb(hdmi, enable, HDMI_PHY_CONF0, HDMI_PHY_CONF0_SVSRET_OFFSET, HDMI_PHY_CONF0_SVSRET_MASK); } void dw_hdmi_phy_gen2_pddq(struct dw_hdmi *hdmi, u8 enable) { hdmi_mask_writeb(hdmi, enable, HDMI_PHY_CONF0, HDMI_PHY_CONF0_GEN2_PDDQ_OFFSET, HDMI_PHY_CONF0_GEN2_PDDQ_MASK); } EXPORT_SYMBOL_GPL(dw_hdmi_phy_gen2_pddq); void dw_hdmi_phy_gen2_txpwron(struct dw_hdmi *hdmi, u8 enable) { hdmi_mask_writeb(hdmi, enable, HDMI_PHY_CONF0, HDMI_PHY_CONF0_GEN2_TXPWRON_OFFSET, HDMI_PHY_CONF0_GEN2_TXPWRON_MASK); } EXPORT_SYMBOL_GPL(dw_hdmi_phy_gen2_txpwron); static void dw_hdmi_phy_sel_data_en_pol(struct dw_hdmi *hdmi, u8 enable) { hdmi_mask_writeb(hdmi, enable, HDMI_PHY_CONF0, HDMI_PHY_CONF0_SELDATAENPOL_OFFSET, HDMI_PHY_CONF0_SELDATAENPOL_MASK); } static void dw_hdmi_phy_sel_interface_control(struct dw_hdmi *hdmi, u8 enable) { hdmi_mask_writeb(hdmi, enable, HDMI_PHY_CONF0, HDMI_PHY_CONF0_SELDIPIF_OFFSET, HDMI_PHY_CONF0_SELDIPIF_MASK); } void dw_hdmi_phy_gen1_reset(struct dw_hdmi *hdmi) { /* PHY reset. The reset signal is active low on Gen1 PHYs. */ hdmi_writeb(hdmi, 0, HDMI_MC_PHYRSTZ); hdmi_writeb(hdmi, HDMI_MC_PHYRSTZ_PHYRSTZ, HDMI_MC_PHYRSTZ); } EXPORT_SYMBOL_GPL(dw_hdmi_phy_gen1_reset); void dw_hdmi_phy_gen2_reset(struct dw_hdmi *hdmi) { /* PHY reset. The reset signal is active high on Gen2 PHYs. */ hdmi_writeb(hdmi, HDMI_MC_PHYRSTZ_PHYRSTZ, HDMI_MC_PHYRSTZ); hdmi_writeb(hdmi, 0, HDMI_MC_PHYRSTZ); } EXPORT_SYMBOL_GPL(dw_hdmi_phy_gen2_reset); void dw_hdmi_phy_i2c_set_addr(struct dw_hdmi *hdmi, u8 address) { hdmi_phy_test_clear(hdmi, 1); hdmi_writeb(hdmi, address, HDMI_PHY_I2CM_SLAVE_ADDR); hdmi_phy_test_clear(hdmi, 0); } EXPORT_SYMBOL_GPL(dw_hdmi_phy_i2c_set_addr); static void dw_hdmi_phy_power_off(struct dw_hdmi *hdmi) { const struct dw_hdmi_phy_data *phy = hdmi->phy.data; unsigned int i; u16 val; if (phy->gen == 1) { dw_hdmi_phy_enable_tmds(hdmi, 0); dw_hdmi_phy_enable_powerdown(hdmi, true); return; } dw_hdmi_phy_gen2_txpwron(hdmi, 0); /* * Wait for TX_PHY_LOCK to be deasserted to indicate that the PHY went * to low power mode. */ for (i = 0; i < 5; ++i) { val = hdmi_readb(hdmi, HDMI_PHY_STAT0); if (!(val & HDMI_PHY_TX_PHY_LOCK)) break; usleep_range(1000, 2000); } if (val & HDMI_PHY_TX_PHY_LOCK) dev_warn(hdmi->dev, "PHY failed to power down\n"); else dev_dbg(hdmi->dev, "PHY powered down in %u iterations\n", i); dw_hdmi_phy_gen2_pddq(hdmi, 1); } static int dw_hdmi_phy_power_on(struct dw_hdmi *hdmi) { const struct dw_hdmi_phy_data *phy = hdmi->phy.data; unsigned int i; u8 val; if (phy->gen == 1) { dw_hdmi_phy_enable_powerdown(hdmi, false); /* Toggle TMDS enable. */ dw_hdmi_phy_enable_tmds(hdmi, 0); dw_hdmi_phy_enable_tmds(hdmi, 1); return 0; } dw_hdmi_phy_gen2_txpwron(hdmi, 1); dw_hdmi_phy_gen2_pddq(hdmi, 0); /* Wait for PHY PLL lock */ for (i = 0; i < 5; ++i) { val = hdmi_readb(hdmi, HDMI_PHY_STAT0) & HDMI_PHY_TX_PHY_LOCK; if (val) break; usleep_range(1000, 2000); } if (!val) { dev_err(hdmi->dev, "PHY PLL failed to lock\n"); return -ETIMEDOUT; } dev_dbg(hdmi->dev, "PHY PLL locked %u iterations\n", i); return 0; } /* * PHY configuration function for the DWC HDMI 3D TX PHY. Based on the available * information the DWC MHL PHY has the same register layout and is thus also * supported by this function. */ static int hdmi_phy_configure_dwc_hdmi_3d_tx(struct dw_hdmi *hdmi, const struct dw_hdmi_plat_data *pdata, unsigned long mpixelclock) { const struct dw_hdmi_mpll_config *mpll_config = pdata->mpll_cfg; const struct dw_hdmi_curr_ctrl *curr_ctrl = pdata->cur_ctr; const struct dw_hdmi_phy_config *phy_config = pdata->phy_config; /* TOFIX Will need 420 specific PHY configuration tables */ /* PLL/MPLL Cfg - always match on final entry */ for (; mpll_config->mpixelclock != ~0UL; mpll_config++) if (mpixelclock <= mpll_config->mpixelclock) break; for (; curr_ctrl->mpixelclock != ~0UL; curr_ctrl++) if (mpixelclock <= curr_ctrl->mpixelclock) break; for (; phy_config->mpixelclock != ~0UL; phy_config++) if (mpixelclock <= phy_config->mpixelclock) break; if (mpll_config->mpixelclock == ~0UL || curr_ctrl->mpixelclock == ~0UL || phy_config->mpixelclock == ~0UL) return -EINVAL; dw_hdmi_phy_i2c_write(hdmi, mpll_config->res[0].cpce, HDMI_3D_TX_PHY_CPCE_CTRL); dw_hdmi_phy_i2c_write(hdmi, mpll_config->res[0].gmp, HDMI_3D_TX_PHY_GMPCTRL); dw_hdmi_phy_i2c_write(hdmi, curr_ctrl->curr[0], HDMI_3D_TX_PHY_CURRCTRL); dw_hdmi_phy_i2c_write(hdmi, 0, HDMI_3D_TX_PHY_PLLPHBYCTRL); dw_hdmi_phy_i2c_write(hdmi, HDMI_3D_TX_PHY_MSM_CTRL_CKO_SEL_FB_CLK, HDMI_3D_TX_PHY_MSM_CTRL); dw_hdmi_phy_i2c_write(hdmi, phy_config->term, HDMI_3D_TX_PHY_TXTERM); dw_hdmi_phy_i2c_write(hdmi, phy_config->sym_ctr, HDMI_3D_TX_PHY_CKSYMTXCTRL); dw_hdmi_phy_i2c_write(hdmi, phy_config->vlev_ctr, HDMI_3D_TX_PHY_VLEVCTRL); /* Override and disable clock termination. */ dw_hdmi_phy_i2c_write(hdmi, HDMI_3D_TX_PHY_CKCALCTRL_OVERRIDE, HDMI_3D_TX_PHY_CKCALCTRL); return 0; } static int hdmi_phy_configure(struct dw_hdmi *hdmi, const struct drm_display_info *display) { const struct dw_hdmi_phy_data *phy = hdmi->phy.data; const struct dw_hdmi_plat_data *pdata = hdmi->plat_data; unsigned long mpixelclock = hdmi->hdmi_data.video_mode.mpixelclock; unsigned long mtmdsclock = hdmi->hdmi_data.video_mode.mtmdsclock; int ret; dw_hdmi_phy_power_off(hdmi); dw_hdmi_set_high_tmds_clock_ratio(hdmi, display); /* Leave low power consumption mode by asserting SVSRET. */ if (phy->has_svsret) dw_hdmi_phy_enable_svsret(hdmi, 1); dw_hdmi_phy_gen2_reset(hdmi); hdmi_writeb(hdmi, HDMI_MC_HEACPHY_RST_ASSERT, HDMI_MC_HEACPHY_RST); dw_hdmi_phy_i2c_set_addr(hdmi, HDMI_PHY_I2CM_SLAVE_ADDR_PHY_GEN2); /* Write to the PHY as configured by the platform */ if (pdata->configure_phy) ret = pdata->configure_phy(hdmi, pdata->priv_data, mpixelclock); else ret = phy->configure(hdmi, pdata, mpixelclock); if (ret) { dev_err(hdmi->dev, "PHY configuration failed (clock %lu)\n", mpixelclock); return ret; } /* Wait for resuming transmission of TMDS clock and data */ if (mtmdsclock > HDMI14_MAX_TMDSCLK) msleep(100); return dw_hdmi_phy_power_on(hdmi); } static int dw_hdmi_phy_init(struct dw_hdmi *hdmi, void *data, const struct drm_display_info *display, const struct drm_display_mode *mode) { int i, ret; /* HDMI Phy spec says to do the phy initialization sequence twice */ for (i = 0; i < 2; i++) { dw_hdmi_phy_sel_data_en_pol(hdmi, 1); dw_hdmi_phy_sel_interface_control(hdmi, 0); ret = hdmi_phy_configure(hdmi, display); if (ret) return ret; } return 0; } static void dw_hdmi_phy_disable(struct dw_hdmi *hdmi, void *data) { dw_hdmi_phy_power_off(hdmi); } enum drm_connector_status dw_hdmi_phy_read_hpd(struct dw_hdmi *hdmi, void *data) { return hdmi_readb(hdmi, HDMI_PHY_STAT0) & HDMI_PHY_HPD ? connector_status_connected : connector_status_disconnected; } EXPORT_SYMBOL_GPL(dw_hdmi_phy_read_hpd); void dw_hdmi_phy_update_hpd(struct dw_hdmi *hdmi, void *data, bool force, bool disabled, bool rxsense) { u8 old_mask = hdmi->phy_mask; if (force || disabled || !rxsense) hdmi->phy_mask |= HDMI_PHY_RX_SENSE; else hdmi->phy_mask &= ~HDMI_PHY_RX_SENSE; if (old_mask != hdmi->phy_mask) hdmi_writeb(hdmi, hdmi->phy_mask, HDMI_PHY_MASK0); } EXPORT_SYMBOL_GPL(dw_hdmi_phy_update_hpd); void dw_hdmi_phy_setup_hpd(struct dw_hdmi *hdmi, void *data) { /* * Configure the PHY RX SENSE and HPD interrupts polarities and clear * any pending interrupt. */ hdmi_writeb(hdmi, HDMI_PHY_HPD | HDMI_PHY_RX_SENSE, HDMI_PHY_POL0); hdmi_writeb(hdmi, HDMI_IH_PHY_STAT0_HPD | HDMI_IH_PHY_STAT0_RX_SENSE, HDMI_IH_PHY_STAT0); /* Enable cable hot plug irq. */ hdmi_writeb(hdmi, hdmi->phy_mask, HDMI_PHY_MASK0); /* Clear and unmute interrupts. */ hdmi_writeb(hdmi, HDMI_IH_PHY_STAT0_HPD | HDMI_IH_PHY_STAT0_RX_SENSE, HDMI_IH_PHY_STAT0); hdmi_writeb(hdmi, ~(HDMI_IH_PHY_STAT0_HPD | HDMI_IH_PHY_STAT0_RX_SENSE), HDMI_IH_MUTE_PHY_STAT0); } EXPORT_SYMBOL_GPL(dw_hdmi_phy_setup_hpd); static const struct dw_hdmi_phy_ops dw_hdmi_synopsys_phy_ops = { .init = dw_hdmi_phy_init, .disable = dw_hdmi_phy_disable, .read_hpd = dw_hdmi_phy_read_hpd, .update_hpd = dw_hdmi_phy_update_hpd, .setup_hpd = dw_hdmi_phy_setup_hpd, }; /* ----------------------------------------------------------------------------- * HDMI TX Setup */ static void hdmi_tx_hdcp_config(struct dw_hdmi *hdmi) { u8 de; if (hdmi->hdmi_data.video_mode.mdataenablepolarity) de = HDMI_A_VIDPOLCFG_DATAENPOL_ACTIVE_HIGH; else de = HDMI_A_VIDPOLCFG_DATAENPOL_ACTIVE_LOW; /* disable rx detect */ hdmi_modb(hdmi, HDMI_A_HDCPCFG0_RXDETECT_DISABLE, HDMI_A_HDCPCFG0_RXDETECT_MASK, HDMI_A_HDCPCFG0); hdmi_modb(hdmi, de, HDMI_A_VIDPOLCFG_DATAENPOL_MASK, HDMI_A_VIDPOLCFG); hdmi_modb(hdmi, HDMI_A_HDCPCFG1_ENCRYPTIONDISABLE_DISABLE, HDMI_A_HDCPCFG1_ENCRYPTIONDISABLE_MASK, HDMI_A_HDCPCFG1); } static void hdmi_config_AVI(struct dw_hdmi *hdmi, const struct drm_connector *connector, const struct drm_display_mode *mode) { struct hdmi_avi_infoframe frame; u8 val; /* Initialise info frame from DRM mode */ drm_hdmi_avi_infoframe_from_display_mode(&frame, connector, mode); if (hdmi_bus_fmt_is_rgb(hdmi->hdmi_data.enc_out_bus_format)) { drm_hdmi_avi_infoframe_quant_range(&frame, connector, mode, hdmi->hdmi_data.rgb_limited_range ? HDMI_QUANTIZATION_RANGE_LIMITED : HDMI_QUANTIZATION_RANGE_FULL); } else { frame.quantization_range = HDMI_QUANTIZATION_RANGE_DEFAULT; frame.ycc_quantization_range = HDMI_YCC_QUANTIZATION_RANGE_LIMITED; } if (hdmi_bus_fmt_is_yuv444(hdmi->hdmi_data.enc_out_bus_format)) frame.colorspace = HDMI_COLORSPACE_YUV444; else if (hdmi_bus_fmt_is_yuv422(hdmi->hdmi_data.enc_out_bus_format)) frame.colorspace = HDMI_COLORSPACE_YUV422; else if (hdmi_bus_fmt_is_yuv420(hdmi->hdmi_data.enc_out_bus_format)) frame.colorspace = HDMI_COLORSPACE_YUV420; else frame.colorspace = HDMI_COLORSPACE_RGB; /* Set up colorimetry */ if (!hdmi_bus_fmt_is_rgb(hdmi->hdmi_data.enc_out_bus_format)) { switch (hdmi->hdmi_data.enc_out_encoding) { case V4L2_YCBCR_ENC_601: if (hdmi->hdmi_data.enc_in_encoding == V4L2_YCBCR_ENC_XV601) frame.colorimetry = HDMI_COLORIMETRY_EXTENDED; else frame.colorimetry = HDMI_COLORIMETRY_ITU_601; frame.extended_colorimetry = HDMI_EXTENDED_COLORIMETRY_XV_YCC_601; break; case V4L2_YCBCR_ENC_709: if (hdmi->hdmi_data.enc_in_encoding == V4L2_YCBCR_ENC_XV709) frame.colorimetry = HDMI_COLORIMETRY_EXTENDED; else frame.colorimetry = HDMI_COLORIMETRY_ITU_709; frame.extended_colorimetry = HDMI_EXTENDED_COLORIMETRY_XV_YCC_709; break; default: /* Carries no data */ frame.colorimetry = HDMI_COLORIMETRY_ITU_601; frame.extended_colorimetry = HDMI_EXTENDED_COLORIMETRY_XV_YCC_601; break; } } else { frame.colorimetry = HDMI_COLORIMETRY_NONE; frame.extended_colorimetry = HDMI_EXTENDED_COLORIMETRY_XV_YCC_601; } /* * The Designware IP uses a different byte format from standard * AVI info frames, though generally the bits are in the correct * bytes. */ /* * AVI data byte 1 differences: Colorspace in bits 0,1 rather than 5,6, * scan info in bits 4,5 rather than 0,1 and active aspect present in * bit 6 rather than 4. */ val = (frame.scan_mode & 3) << 4 | (frame.colorspace & 3); if (frame.active_aspect & 15) val |= HDMI_FC_AVICONF0_ACTIVE_FMT_INFO_PRESENT; if (frame.top_bar || frame.bottom_bar) val |= HDMI_FC_AVICONF0_BAR_DATA_HORIZ_BAR; if (frame.left_bar || frame.right_bar) val |= HDMI_FC_AVICONF0_BAR_DATA_VERT_BAR; hdmi_writeb(hdmi, val, HDMI_FC_AVICONF0); /* AVI data byte 2 differences: none */ val = ((frame.colorimetry & 0x3) << 6) | ((frame.picture_aspect & 0x3) << 4) | (frame.active_aspect & 0xf); hdmi_writeb(hdmi, val, HDMI_FC_AVICONF1); /* AVI data byte 3 differences: none */ val = ((frame.extended_colorimetry & 0x7) << 4) | ((frame.quantization_range & 0x3) << 2) | (frame.nups & 0x3); if (frame.itc) val |= HDMI_FC_AVICONF2_IT_CONTENT_VALID; hdmi_writeb(hdmi, val, HDMI_FC_AVICONF2); /* AVI data byte 4 differences: none */ val = frame.video_code & 0x7f; hdmi_writeb(hdmi, val, HDMI_FC_AVIVID); /* AVI Data Byte 5- set up input and output pixel repetition */ val = (((hdmi->hdmi_data.video_mode.mpixelrepetitioninput + 1) << HDMI_FC_PRCONF_INCOMING_PR_FACTOR_OFFSET) & HDMI_FC_PRCONF_INCOMING_PR_FACTOR_MASK) | ((hdmi->hdmi_data.video_mode.mpixelrepetitionoutput << HDMI_FC_PRCONF_OUTPUT_PR_FACTOR_OFFSET) & HDMI_FC_PRCONF_OUTPUT_PR_FACTOR_MASK); hdmi_writeb(hdmi, val, HDMI_FC_PRCONF); /* * AVI data byte 5 differences: content type in 0,1 rather than 4,5, * ycc range in bits 2,3 rather than 6,7 */ val = ((frame.ycc_quantization_range & 0x3) << 2) | (frame.content_type & 0x3); hdmi_writeb(hdmi, val, HDMI_FC_AVICONF3); /* AVI Data Bytes 6-13 */ hdmi_writeb(hdmi, frame.top_bar & 0xff, HDMI_FC_AVIETB0); hdmi_writeb(hdmi, (frame.top_bar >> 8) & 0xff, HDMI_FC_AVIETB1); hdmi_writeb(hdmi, frame.bottom_bar & 0xff, HDMI_FC_AVISBB0); hdmi_writeb(hdmi, (frame.bottom_bar >> 8) & 0xff, HDMI_FC_AVISBB1); hdmi_writeb(hdmi, frame.left_bar & 0xff, HDMI_FC_AVIELB0); hdmi_writeb(hdmi, (frame.left_bar >> 8) & 0xff, HDMI_FC_AVIELB1); hdmi_writeb(hdmi, frame.right_bar & 0xff, HDMI_FC_AVISRB0); hdmi_writeb(hdmi, (frame.right_bar >> 8) & 0xff, HDMI_FC_AVISRB1); } static void hdmi_config_vendor_specific_infoframe(struct dw_hdmi *hdmi, const struct drm_connector *connector, const struct drm_display_mode *mode) { struct hdmi_vendor_infoframe frame; u8 buffer[10]; ssize_t err; err = drm_hdmi_vendor_infoframe_from_display_mode(&frame, connector, mode); if (err < 0) /* * Going into that statement does not means vendor infoframe * fails. It just informed us that vendor infoframe is not * needed for the selected mode. Only 4k or stereoscopic 3D * mode requires vendor infoframe. So just simply return. */ return; err = hdmi_vendor_infoframe_pack(&frame, buffer, sizeof(buffer)); if (err < 0) { dev_err(hdmi->dev, "Failed to pack vendor infoframe: %zd\n", err); return; } hdmi_mask_writeb(hdmi, 0, HDMI_FC_DATAUTO0, HDMI_FC_DATAUTO0_VSD_OFFSET, HDMI_FC_DATAUTO0_VSD_MASK); /* Set the length of HDMI vendor specific InfoFrame payload */ hdmi_writeb(hdmi, buffer[2], HDMI_FC_VSDSIZE); /* Set 24bit IEEE Registration Identifier */ hdmi_writeb(hdmi, buffer[4], HDMI_FC_VSDIEEEID0); hdmi_writeb(hdmi, buffer[5], HDMI_FC_VSDIEEEID1); hdmi_writeb(hdmi, buffer[6], HDMI_FC_VSDIEEEID2); /* Set HDMI_Video_Format and HDMI_VIC/3D_Structure */ hdmi_writeb(hdmi, buffer[7], HDMI_FC_VSDPAYLOAD0); hdmi_writeb(hdmi, buffer[8], HDMI_FC_VSDPAYLOAD1); if (frame.s3d_struct >= HDMI_3D_STRUCTURE_SIDE_BY_SIDE_HALF) hdmi_writeb(hdmi, buffer[9], HDMI_FC_VSDPAYLOAD2); /* Packet frame interpolation */ hdmi_writeb(hdmi, 1, HDMI_FC_DATAUTO1); /* Auto packets per frame and line spacing */ hdmi_writeb(hdmi, 0x11, HDMI_FC_DATAUTO2); /* Configures the Frame Composer On RDRB mode */ hdmi_mask_writeb(hdmi, 1, HDMI_FC_DATAUTO0, HDMI_FC_DATAUTO0_VSD_OFFSET, HDMI_FC_DATAUTO0_VSD_MASK); } static void hdmi_config_drm_infoframe(struct dw_hdmi *hdmi, const struct drm_connector *connector) { const struct drm_connector_state *conn_state = connector->state; struct hdmi_drm_infoframe frame; u8 buffer[30]; ssize_t err; int i; if (!hdmi->plat_data->use_drm_infoframe) return; hdmi_modb(hdmi, HDMI_FC_PACKET_TX_EN_DRM_DISABLE, HDMI_FC_PACKET_TX_EN_DRM_MASK, HDMI_FC_PACKET_TX_EN); err = drm_hdmi_infoframe_set_hdr_metadata(&frame, conn_state); if (err < 0) return; err = hdmi_drm_infoframe_pack(&frame, buffer, sizeof(buffer)); if (err < 0) { dev_err(hdmi->dev, "Failed to pack drm infoframe: %zd\n", err); return; } hdmi_writeb(hdmi, frame.version, HDMI_FC_DRM_HB0); hdmi_writeb(hdmi, frame.length, HDMI_FC_DRM_HB1); for (i = 0; i < frame.length; i++) hdmi_writeb(hdmi, buffer[4 + i], HDMI_FC_DRM_PB0 + i); hdmi_writeb(hdmi, 1, HDMI_FC_DRM_UP); hdmi_modb(hdmi, HDMI_FC_PACKET_TX_EN_DRM_ENABLE, HDMI_FC_PACKET_TX_EN_DRM_MASK, HDMI_FC_PACKET_TX_EN); } static void hdmi_av_composer(struct dw_hdmi *hdmi, const struct drm_display_info *display, const struct drm_display_mode *mode) { u8 inv_val, bytes; const struct drm_hdmi_info *hdmi_info = &display->hdmi; struct hdmi_vmode *vmode = &hdmi->hdmi_data.video_mode; int hblank, vblank, h_de_hs, v_de_vs, hsync_len, vsync_len; unsigned int vdisplay, hdisplay; vmode->mpixelclock = mode->clock * 1000; dev_dbg(hdmi->dev, "final pixclk = %d\n", vmode->mpixelclock); vmode->mtmdsclock = vmode->mpixelclock; if (!hdmi_bus_fmt_is_yuv422(hdmi->hdmi_data.enc_out_bus_format)) { switch (hdmi_bus_fmt_color_depth( hdmi->hdmi_data.enc_out_bus_format)) { case 16: vmode->mtmdsclock = vmode->mpixelclock * 2; break; case 12: vmode->mtmdsclock = vmode->mpixelclock * 3 / 2; break; case 10: vmode->mtmdsclock = vmode->mpixelclock * 5 / 4; break; } } if (hdmi_bus_fmt_is_yuv420(hdmi->hdmi_data.enc_out_bus_format)) vmode->mtmdsclock /= 2; dev_dbg(hdmi->dev, "final tmdsclock = %d\n", vmode->mtmdsclock); /* Set up HDMI_FC_INVIDCONF */ inv_val = (hdmi->hdmi_data.hdcp_enable || (dw_hdmi_support_scdc(hdmi, display) && (vmode->mtmdsclock > HDMI14_MAX_TMDSCLK || hdmi_info->scdc.scrambling.low_rates)) ? HDMI_FC_INVIDCONF_HDCP_KEEPOUT_ACTIVE : HDMI_FC_INVIDCONF_HDCP_KEEPOUT_INACTIVE); inv_val |= mode->flags & DRM_MODE_FLAG_PVSYNC ? HDMI_FC_INVIDCONF_VSYNC_IN_POLARITY_ACTIVE_HIGH : HDMI_FC_INVIDCONF_VSYNC_IN_POLARITY_ACTIVE_LOW; inv_val |= mode->flags & DRM_MODE_FLAG_PHSYNC ? HDMI_FC_INVIDCONF_HSYNC_IN_POLARITY_ACTIVE_HIGH : HDMI_FC_INVIDCONF_HSYNC_IN_POLARITY_ACTIVE_LOW; inv_val |= (vmode->mdataenablepolarity ? HDMI_FC_INVIDCONF_DE_IN_POLARITY_ACTIVE_HIGH : HDMI_FC_INVIDCONF_DE_IN_POLARITY_ACTIVE_LOW); if (hdmi->vic == 39) inv_val |= HDMI_FC_INVIDCONF_R_V_BLANK_IN_OSC_ACTIVE_HIGH; else inv_val |= mode->flags & DRM_MODE_FLAG_INTERLACE ? HDMI_FC_INVIDCONF_R_V_BLANK_IN_OSC_ACTIVE_HIGH : HDMI_FC_INVIDCONF_R_V_BLANK_IN_OSC_ACTIVE_LOW; inv_val |= mode->flags & DRM_MODE_FLAG_INTERLACE ? HDMI_FC_INVIDCONF_IN_I_P_INTERLACED : HDMI_FC_INVIDCONF_IN_I_P_PROGRESSIVE; inv_val |= hdmi->sink_is_hdmi ? HDMI_FC_INVIDCONF_DVI_MODEZ_HDMI_MODE : HDMI_FC_INVIDCONF_DVI_MODEZ_DVI_MODE; hdmi_writeb(hdmi, inv_val, HDMI_FC_INVIDCONF); hdisplay = mode->hdisplay; hblank = mode->htotal - mode->hdisplay; h_de_hs = mode->hsync_start - mode->hdisplay; hsync_len = mode->hsync_end - mode->hsync_start; /* * When we're setting a YCbCr420 mode, we need * to adjust the horizontal timing to suit. */ if (hdmi_bus_fmt_is_yuv420(hdmi->hdmi_data.enc_out_bus_format)) { hdisplay /= 2; hblank /= 2; h_de_hs /= 2; hsync_len /= 2; } vdisplay = mode->vdisplay; vblank = mode->vtotal - mode->vdisplay; v_de_vs = mode->vsync_start - mode->vdisplay; vsync_len = mode->vsync_end - mode->vsync_start; /* * When we're setting an interlaced mode, we need * to adjust the vertical timing to suit. */ if (mode->flags & DRM_MODE_FLAG_INTERLACE) { vdisplay /= 2; vblank /= 2; v_de_vs /= 2; vsync_len /= 2; } /* Scrambling Control */ if (dw_hdmi_support_scdc(hdmi, display)) { if (vmode->mtmdsclock > HDMI14_MAX_TMDSCLK || hdmi_info->scdc.scrambling.low_rates) { /* * HDMI2.0 Specifies the following procedure: * After the Source Device has determined that * SCDC_Present is set (=1), the Source Device should * write the accurate Version of the Source Device * to the Source Version field in the SCDCS. * Source Devices compliant shall set the * Source Version = 1. */ drm_scdc_readb(hdmi->ddc, SCDC_SINK_VERSION, &bytes); drm_scdc_writeb(hdmi->ddc, SCDC_SOURCE_VERSION, min_t(u8, bytes, SCDC_MIN_SOURCE_VERSION)); /* Enabled Scrambling in the Sink */ drm_scdc_set_scrambling(hdmi->curr_conn, 1); /* * To activate the scrambler feature, you must ensure * that the quasi-static configuration bit * fc_invidconf.HDCP_keepout is set at configuration * time, before the required mc_swrstzreq.tmdsswrst_req * reset request is issued. */ hdmi_writeb(hdmi, (u8)~HDMI_MC_SWRSTZ_TMDSSWRST_REQ, HDMI_MC_SWRSTZ); hdmi_writeb(hdmi, 1, HDMI_FC_SCRAMBLER_CTRL); } else { hdmi_writeb(hdmi, 0, HDMI_FC_SCRAMBLER_CTRL); hdmi_writeb(hdmi, (u8)~HDMI_MC_SWRSTZ_TMDSSWRST_REQ, HDMI_MC_SWRSTZ); drm_scdc_set_scrambling(hdmi->curr_conn, 0); } } /* Set up horizontal active pixel width */ hdmi_writeb(hdmi, hdisplay >> 8, HDMI_FC_INHACTV1); hdmi_writeb(hdmi, hdisplay, HDMI_FC_INHACTV0); /* Set up vertical active lines */ hdmi_writeb(hdmi, vdisplay >> 8, HDMI_FC_INVACTV1); hdmi_writeb(hdmi, vdisplay, HDMI_FC_INVACTV0); /* Set up horizontal blanking pixel region width */ hdmi_writeb(hdmi, hblank >> 8, HDMI_FC_INHBLANK1); hdmi_writeb(hdmi, hblank, HDMI_FC_INHBLANK0); /* Set up vertical blanking pixel region width */ hdmi_writeb(hdmi, vblank, HDMI_FC_INVBLANK); /* Set up HSYNC active edge delay width (in pixel clks) */ hdmi_writeb(hdmi, h_de_hs >> 8, HDMI_FC_HSYNCINDELAY1); hdmi_writeb(hdmi, h_de_hs, HDMI_FC_HSYNCINDELAY0); /* Set up VSYNC active edge delay (in lines) */ hdmi_writeb(hdmi, v_de_vs, HDMI_FC_VSYNCINDELAY); /* Set up HSYNC active pulse width (in pixel clks) */ hdmi_writeb(hdmi, hsync_len >> 8, HDMI_FC_HSYNCINWIDTH1); hdmi_writeb(hdmi, hsync_len, HDMI_FC_HSYNCINWIDTH0); /* Set up VSYNC active edge delay (in lines) */ hdmi_writeb(hdmi, vsync_len, HDMI_FC_VSYNCINWIDTH); } /* HDMI Initialization Step B.4 */ static void dw_hdmi_enable_video_path(struct dw_hdmi *hdmi) { /* control period minimum duration */ hdmi_writeb(hdmi, 12, HDMI_FC_CTRLDUR); hdmi_writeb(hdmi, 32, HDMI_FC_EXCTRLDUR); hdmi_writeb(hdmi, 1, HDMI_FC_EXCTRLSPAC); /* Set to fill TMDS data channels */ hdmi_writeb(hdmi, 0x0B, HDMI_FC_CH0PREAM); hdmi_writeb(hdmi, 0x16, HDMI_FC_CH1PREAM); hdmi_writeb(hdmi, 0x21, HDMI_FC_CH2PREAM); /* Enable pixel clock and tmds data path */ hdmi->mc_clkdis |= HDMI_MC_CLKDIS_HDCPCLK_DISABLE | HDMI_MC_CLKDIS_CSCCLK_DISABLE | HDMI_MC_CLKDIS_AUDCLK_DISABLE | HDMI_MC_CLKDIS_PREPCLK_DISABLE | HDMI_MC_CLKDIS_TMDSCLK_DISABLE; hdmi->mc_clkdis &= ~HDMI_MC_CLKDIS_PIXELCLK_DISABLE; hdmi_writeb(hdmi, hdmi->mc_clkdis, HDMI_MC_CLKDIS); hdmi->mc_clkdis &= ~HDMI_MC_CLKDIS_TMDSCLK_DISABLE; hdmi_writeb(hdmi, hdmi->mc_clkdis, HDMI_MC_CLKDIS); /* Enable csc path */ if (is_csc_needed(hdmi)) { hdmi->mc_clkdis &= ~HDMI_MC_CLKDIS_CSCCLK_DISABLE; hdmi_writeb(hdmi, hdmi->mc_clkdis, HDMI_MC_CLKDIS); hdmi_writeb(hdmi, HDMI_MC_FLOWCTRL_FEED_THROUGH_OFF_CSC_IN_PATH, HDMI_MC_FLOWCTRL); } else { hdmi->mc_clkdis |= HDMI_MC_CLKDIS_CSCCLK_DISABLE; hdmi_writeb(hdmi, hdmi->mc_clkdis, HDMI_MC_CLKDIS); hdmi_writeb(hdmi, HDMI_MC_FLOWCTRL_FEED_THROUGH_OFF_CSC_BYPASS, HDMI_MC_FLOWCTRL); } } /* Workaround to clear the overflow condition */ static void dw_hdmi_clear_overflow(struct dw_hdmi *hdmi) { unsigned int count; unsigned int i; u8 val; /* * Under some circumstances the Frame Composer arithmetic unit can miss * an FC register write due to being busy processing the previous one. * The issue can be worked around by issuing a TMDS software reset and * then write one of the FC registers several times. * * The number of iterations matters and depends on the HDMI TX revision * (and possibly on the platform). * 4 iterations for i.MX6Q(v1.30a) and 1 iteration for others. * i.MX6DL (v1.31a), Allwinner SoCs (v1.32a), Rockchip RK3288 SoC (v2.00a), * Amlogic Meson GX SoCs (v2.01a), RK3328/RK3399 SoCs (v2.11a) * and i.MX8MPlus (v2.13a) have been identified as needing the workaround * with a single iteration. */ switch (hdmi->version) { case 0x130a: count = 4; break; default: count = 1; break; } /* TMDS software reset */ hdmi_writeb(hdmi, (u8)~HDMI_MC_SWRSTZ_TMDSSWRST_REQ, HDMI_MC_SWRSTZ); val = hdmi_readb(hdmi, HDMI_FC_INVIDCONF); for (i = 0; i < count; i++) hdmi_writeb(hdmi, val, HDMI_FC_INVIDCONF); } static void hdmi_disable_overflow_interrupts(struct dw_hdmi *hdmi) { hdmi_writeb(hdmi, HDMI_IH_MUTE_FC_STAT2_OVERFLOW_MASK, HDMI_IH_MUTE_FC_STAT2); } static int dw_hdmi_setup(struct dw_hdmi *hdmi, const struct drm_connector *connector, const struct drm_display_mode *mode) { int ret; hdmi_disable_overflow_interrupts(hdmi); hdmi->vic = drm_match_cea_mode(mode); if (!hdmi->vic) { dev_dbg(hdmi->dev, "Non-CEA mode used in HDMI\n"); } else { dev_dbg(hdmi->dev, "CEA mode used vic=%d\n", hdmi->vic); } if ((hdmi->vic == 6) || (hdmi->vic == 7) || (hdmi->vic == 21) || (hdmi->vic == 22) || (hdmi->vic == 2) || (hdmi->vic == 3) || (hdmi->vic == 17) || (hdmi->vic == 18)) hdmi->hdmi_data.enc_out_encoding = V4L2_YCBCR_ENC_601; else hdmi->hdmi_data.enc_out_encoding = V4L2_YCBCR_ENC_709; hdmi->hdmi_data.video_mode.mpixelrepetitionoutput = 0; hdmi->hdmi_data.video_mode.mpixelrepetitioninput = 0; if (hdmi->hdmi_data.enc_in_bus_format == MEDIA_BUS_FMT_FIXED) hdmi->hdmi_data.enc_in_bus_format = MEDIA_BUS_FMT_RGB888_1X24; /* TOFIX: Get input encoding from plat data or fallback to none */ if (hdmi->plat_data->input_bus_encoding) hdmi->hdmi_data.enc_in_encoding = hdmi->plat_data->input_bus_encoding; else hdmi->hdmi_data.enc_in_encoding = V4L2_YCBCR_ENC_DEFAULT; if (hdmi->hdmi_data.enc_out_bus_format == MEDIA_BUS_FMT_FIXED) hdmi->hdmi_data.enc_out_bus_format = MEDIA_BUS_FMT_RGB888_1X24; hdmi->hdmi_data.rgb_limited_range = hdmi->sink_is_hdmi && drm_default_rgb_quant_range(mode) == HDMI_QUANTIZATION_RANGE_LIMITED; hdmi->hdmi_data.pix_repet_factor = 0; hdmi->hdmi_data.hdcp_enable = 0; hdmi->hdmi_data.video_mode.mdataenablepolarity = true; /* HDMI Initialization Step B.1 */ hdmi_av_composer(hdmi, &connector->display_info, mode); /* HDMI Initializateion Step B.2 */ ret = hdmi->phy.ops->init(hdmi, hdmi->phy.data, &connector->display_info, &hdmi->previous_mode); if (ret) return ret; hdmi->phy.enabled = true; /* HDMI Initialization Step B.3 */ dw_hdmi_enable_video_path(hdmi); if (hdmi->sink_has_audio) { dev_dbg(hdmi->dev, "sink has audio support\n"); /* HDMI Initialization Step E - Configure audio */ hdmi_clk_regenerator_update_pixel_clock(hdmi); hdmi_enable_audio_clk(hdmi, hdmi->audio_enable); } /* not for DVI mode */ if (hdmi->sink_is_hdmi) { dev_dbg(hdmi->dev, "%s HDMI mode\n", __func__); /* HDMI Initialization Step F - Configure AVI InfoFrame */ hdmi_config_AVI(hdmi, connector, mode); hdmi_config_vendor_specific_infoframe(hdmi, connector, mode); hdmi_config_drm_infoframe(hdmi, connector); } else { dev_dbg(hdmi->dev, "%s DVI mode\n", __func__); } hdmi_video_packetize(hdmi); hdmi_video_csc(hdmi); hdmi_video_sample(hdmi); hdmi_tx_hdcp_config(hdmi); dw_hdmi_clear_overflow(hdmi); return 0; } static void initialize_hdmi_ih_mutes(struct dw_hdmi *hdmi) { u8 ih_mute; /* * Boot up defaults are: * HDMI_IH_MUTE = 0x03 (disabled) * HDMI_IH_MUTE_* = 0x00 (enabled) * * Disable top level interrupt bits in HDMI block */ ih_mute = hdmi_readb(hdmi, HDMI_IH_MUTE) | HDMI_IH_MUTE_MUTE_WAKEUP_INTERRUPT | HDMI_IH_MUTE_MUTE_ALL_INTERRUPT; hdmi_writeb(hdmi, ih_mute, HDMI_IH_MUTE); /* by default mask all interrupts */ hdmi_writeb(hdmi, 0xff, HDMI_VP_MASK); hdmi_writeb(hdmi, 0xff, HDMI_FC_MASK0); hdmi_writeb(hdmi, 0xff, HDMI_FC_MASK1); hdmi_writeb(hdmi, 0xff, HDMI_FC_MASK2); hdmi_writeb(hdmi, 0xff, HDMI_PHY_MASK0); hdmi_writeb(hdmi, 0xff, HDMI_PHY_I2CM_INT_ADDR); hdmi_writeb(hdmi, 0xff, HDMI_PHY_I2CM_CTLINT_ADDR); hdmi_writeb(hdmi, 0xff, HDMI_AUD_INT); hdmi_writeb(hdmi, 0xff, HDMI_AUD_SPDIFINT); hdmi_writeb(hdmi, 0xff, HDMI_AUD_HBR_MASK); hdmi_writeb(hdmi, 0xff, HDMI_GP_MASK); hdmi_writeb(hdmi, 0xff, HDMI_A_APIINTMSK); hdmi_writeb(hdmi, 0xff, HDMI_I2CM_INT); hdmi_writeb(hdmi, 0xff, HDMI_I2CM_CTLINT); /* Disable interrupts in the IH_MUTE_* registers */ hdmi_writeb(hdmi, 0xff, HDMI_IH_MUTE_FC_STAT0); hdmi_writeb(hdmi, 0xff, HDMI_IH_MUTE_FC_STAT1); hdmi_writeb(hdmi, 0xff, HDMI_IH_MUTE_FC_STAT2); hdmi_writeb(hdmi, 0xff, HDMI_IH_MUTE_AS_STAT0); hdmi_writeb(hdmi, 0xff, HDMI_IH_MUTE_PHY_STAT0); hdmi_writeb(hdmi, 0xff, HDMI_IH_MUTE_I2CM_STAT0); hdmi_writeb(hdmi, 0xff, HDMI_IH_MUTE_CEC_STAT0); hdmi_writeb(hdmi, 0xff, HDMI_IH_MUTE_VP_STAT0); hdmi_writeb(hdmi, 0xff, HDMI_IH_MUTE_I2CMPHY_STAT0); hdmi_writeb(hdmi, 0xff, HDMI_IH_MUTE_AHBDMAAUD_STAT0); /* Enable top level interrupt bits in HDMI block */ ih_mute &= ~(HDMI_IH_MUTE_MUTE_WAKEUP_INTERRUPT | HDMI_IH_MUTE_MUTE_ALL_INTERRUPT); hdmi_writeb(hdmi, ih_mute, HDMI_IH_MUTE); } static void dw_hdmi_poweron(struct dw_hdmi *hdmi) { hdmi->bridge_is_on = true; /* * The curr_conn field is guaranteed to be valid here, as this function * is only be called when !hdmi->disabled. */ dw_hdmi_setup(hdmi, hdmi->curr_conn, &hdmi->previous_mode); } static void dw_hdmi_poweroff(struct dw_hdmi *hdmi) { if (hdmi->phy.enabled) { hdmi->phy.ops->disable(hdmi, hdmi->phy.data); hdmi->phy.enabled = false; } hdmi->bridge_is_on = false; } static void dw_hdmi_update_power(struct dw_hdmi *hdmi) { int force = hdmi->force; if (hdmi->disabled) { force = DRM_FORCE_OFF; } else if (force == DRM_FORCE_UNSPECIFIED) { if (hdmi->rxsense) force = DRM_FORCE_ON; else force = DRM_FORCE_OFF; } if (force == DRM_FORCE_OFF) { if (hdmi->bridge_is_on) dw_hdmi_poweroff(hdmi); } else { if (!hdmi->bridge_is_on) dw_hdmi_poweron(hdmi); } } /* * Adjust the detection of RXSENSE according to whether we have a forced * connection mode enabled, or whether we have been disabled. There is * no point processing RXSENSE interrupts if we have a forced connection * state, or DRM has us disabled. * * We also disable rxsense interrupts when we think we're disconnected * to avoid floating TDMS signals giving false rxsense interrupts. * * Note: we still need to listen for HPD interrupts even when DRM has us * disabled so that we can detect a connect event. */ static void dw_hdmi_update_phy_mask(struct dw_hdmi *hdmi) { if (hdmi->phy.ops->update_hpd) hdmi->phy.ops->update_hpd(hdmi, hdmi->phy.data, hdmi->force, hdmi->disabled, hdmi->rxsense); } static enum drm_connector_status dw_hdmi_detect(struct dw_hdmi *hdmi) { enum drm_connector_status result; result = hdmi->phy.ops->read_hpd(hdmi, hdmi->phy.data); hdmi->last_connector_result = result; return result; } static struct edid *dw_hdmi_get_edid(struct dw_hdmi *hdmi, struct drm_connector *connector) { struct edid *edid; if (!hdmi->ddc) return NULL; edid = drm_get_edid(connector, hdmi->ddc); if (!edid) { dev_dbg(hdmi->dev, "failed to get edid\n"); return NULL; } dev_dbg(hdmi->dev, "got edid: width[%d] x height[%d]\n", edid->width_cm, edid->height_cm); hdmi->sink_is_hdmi = drm_detect_hdmi_monitor(edid); hdmi->sink_has_audio = drm_detect_monitor_audio(edid); return edid; } /* ----------------------------------------------------------------------------- * DRM Connector Operations */ static enum drm_connector_status dw_hdmi_connector_detect(struct drm_connector *connector, bool force) { struct dw_hdmi *hdmi = container_of(connector, struct dw_hdmi, connector); return dw_hdmi_detect(hdmi); } static int dw_hdmi_connector_get_modes(struct drm_connector *connector) { struct dw_hdmi *hdmi = container_of(connector, struct dw_hdmi, connector); struct edid *edid; int ret; edid = dw_hdmi_get_edid(hdmi, connector); if (!edid) return 0; drm_connector_update_edid_property(connector, edid); cec_notifier_set_phys_addr_from_edid(hdmi->cec_notifier, edid); ret = drm_add_edid_modes(connector, edid); kfree(edid); return ret; } static int dw_hdmi_connector_atomic_check(struct drm_connector *connector, struct drm_atomic_state *state) { struct drm_connector_state *old_state = drm_atomic_get_old_connector_state(state, connector); struct drm_connector_state *new_state = drm_atomic_get_new_connector_state(state, connector); struct drm_crtc *crtc = new_state->crtc; struct drm_crtc_state *crtc_state; if (!crtc) return 0; if (!drm_connector_atomic_hdr_metadata_equal(old_state, new_state)) { crtc_state = drm_atomic_get_crtc_state(state, crtc); if (IS_ERR(crtc_state)) return PTR_ERR(crtc_state); crtc_state->mode_changed = true; } return 0; } static void dw_hdmi_connector_force(struct drm_connector *connector) { struct dw_hdmi *hdmi = container_of(connector, struct dw_hdmi, connector); mutex_lock(&hdmi->mutex); hdmi->force = connector->force; dw_hdmi_update_power(hdmi); dw_hdmi_update_phy_mask(hdmi); mutex_unlock(&hdmi->mutex); } static const struct drm_connector_funcs dw_hdmi_connector_funcs = { .fill_modes = drm_helper_probe_single_connector_modes, .detect = dw_hdmi_connector_detect, .destroy = drm_connector_cleanup, .force = dw_hdmi_connector_force, .reset = drm_atomic_helper_connector_reset, .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state, .atomic_destroy_state = drm_atomic_helper_connector_destroy_state, }; static const struct drm_connector_helper_funcs dw_hdmi_connector_helper_funcs = { .get_modes = dw_hdmi_connector_get_modes, .atomic_check = dw_hdmi_connector_atomic_check, }; static int dw_hdmi_connector_create(struct dw_hdmi *hdmi) { struct drm_connector *connector = &hdmi->connector; struct cec_connector_info conn_info; struct cec_notifier *notifier; if (hdmi->version >= 0x200a) connector->ycbcr_420_allowed = hdmi->plat_data->ycbcr_420_allowed; else connector->ycbcr_420_allowed = false; connector->interlace_allowed = 1; connector->polled = DRM_CONNECTOR_POLL_HPD; drm_connector_helper_add(connector, &dw_hdmi_connector_helper_funcs); drm_connector_init_with_ddc(hdmi->bridge.dev, connector, &dw_hdmi_connector_funcs, DRM_MODE_CONNECTOR_HDMIA, hdmi->ddc); /* * drm_connector_attach_max_bpc_property() requires the * connector to have a state. */ drm_atomic_helper_connector_reset(connector); drm_connector_attach_max_bpc_property(connector, 8, 16); if (hdmi->version >= 0x200a && hdmi->plat_data->use_drm_infoframe) drm_connector_attach_hdr_output_metadata_property(connector); drm_connector_attach_encoder(connector, hdmi->bridge.encoder); cec_fill_conn_info_from_drm(&conn_info, connector); notifier = cec_notifier_conn_register(hdmi->dev, NULL, &conn_info); if (!notifier) return -ENOMEM; mutex_lock(&hdmi->cec_notifier_mutex); hdmi->cec_notifier = notifier; mutex_unlock(&hdmi->cec_notifier_mutex); return 0; } /* ----------------------------------------------------------------------------- * DRM Bridge Operations */ /* * Possible output formats : * - MEDIA_BUS_FMT_UYYVYY16_0_5X48, * - MEDIA_BUS_FMT_UYYVYY12_0_5X36, * - MEDIA_BUS_FMT_UYYVYY10_0_5X30, * - MEDIA_BUS_FMT_UYYVYY8_0_5X24, * - MEDIA_BUS_FMT_YUV16_1X48, * - MEDIA_BUS_FMT_RGB161616_1X48, * - MEDIA_BUS_FMT_UYVY12_1X24, * - MEDIA_BUS_FMT_YUV12_1X36, * - MEDIA_BUS_FMT_RGB121212_1X36, * - MEDIA_BUS_FMT_UYVY10_1X20, * - MEDIA_BUS_FMT_YUV10_1X30, * - MEDIA_BUS_FMT_RGB101010_1X30, * - MEDIA_BUS_FMT_UYVY8_1X16, * - MEDIA_BUS_FMT_YUV8_1X24, * - MEDIA_BUS_FMT_RGB888_1X24, */ /* Can return a maximum of 11 possible output formats for a mode/connector */ #define MAX_OUTPUT_SEL_FORMATS 11 static u32 *dw_hdmi_bridge_atomic_get_output_bus_fmts(struct drm_bridge *bridge, struct drm_bridge_state *bridge_state, struct drm_crtc_state *crtc_state, struct drm_connector_state *conn_state, unsigned int *num_output_fmts) { struct drm_connector *conn = conn_state->connector; struct drm_display_info *info = &conn->display_info; struct drm_display_mode *mode = &crtc_state->mode; u8 max_bpc = conn_state->max_requested_bpc; bool is_hdmi2_sink = info->hdmi.scdc.supported || (info->color_formats & DRM_COLOR_FORMAT_YCBCR420); u32 *output_fmts; unsigned int i = 0; *num_output_fmts = 0; output_fmts = kcalloc(MAX_OUTPUT_SEL_FORMATS, sizeof(*output_fmts), GFP_KERNEL); if (!output_fmts) return NULL; /* If dw-hdmi is the first or only bridge, avoid negociating with ourselves */ if (list_is_singular(&bridge->encoder->bridge_chain) || list_is_first(&bridge->chain_node, &bridge->encoder->bridge_chain)) { *num_output_fmts = 1; output_fmts[0] = MEDIA_BUS_FMT_FIXED; return output_fmts; } /* * If the current mode enforces 4:2:0, force the output but format * to 4:2:0 and do not add the YUV422/444/RGB formats */ if (conn->ycbcr_420_allowed && (drm_mode_is_420_only(info, mode) || (is_hdmi2_sink && drm_mode_is_420_also(info, mode)))) { /* Order bus formats from 16bit to 8bit if supported */ if (max_bpc >= 16 && info->bpc == 16 && (info->hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_48)) output_fmts[i++] = MEDIA_BUS_FMT_UYYVYY16_0_5X48; if (max_bpc >= 12 && info->bpc >= 12 && (info->hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_36)) output_fmts[i++] = MEDIA_BUS_FMT_UYYVYY12_0_5X36; if (max_bpc >= 10 && info->bpc >= 10 && (info->hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_30)) output_fmts[i++] = MEDIA_BUS_FMT_UYYVYY10_0_5X30; /* Default 8bit fallback */ output_fmts[i++] = MEDIA_BUS_FMT_UYYVYY8_0_5X24; if (drm_mode_is_420_only(info, mode)) { *num_output_fmts = i; return output_fmts; } } /* * Order bus formats from 16bit to 8bit and from YUV422 to RGB * if supported. In any case the default RGB888 format is added */ /* Default 8bit RGB fallback */ output_fmts[i++] = MEDIA_BUS_FMT_RGB888_1X24; if (max_bpc >= 16 && info->bpc == 16) { if (info->color_formats & DRM_COLOR_FORMAT_YCBCR444) output_fmts[i++] = MEDIA_BUS_FMT_YUV16_1X48; output_fmts[i++] = MEDIA_BUS_FMT_RGB161616_1X48; } if (max_bpc >= 12 && info->bpc >= 12) { if (info->color_formats & DRM_COLOR_FORMAT_YCBCR422) output_fmts[i++] = MEDIA_BUS_FMT_UYVY12_1X24; if (info->color_formats & DRM_COLOR_FORMAT_YCBCR444) output_fmts[i++] = MEDIA_BUS_FMT_YUV12_1X36; output_fmts[i++] = MEDIA_BUS_FMT_RGB121212_1X36; } if (max_bpc >= 10 && info->bpc >= 10) { if (info->color_formats & DRM_COLOR_FORMAT_YCBCR422) output_fmts[i++] = MEDIA_BUS_FMT_UYVY10_1X20; if (info->color_formats & DRM_COLOR_FORMAT_YCBCR444) output_fmts[i++] = MEDIA_BUS_FMT_YUV10_1X30; output_fmts[i++] = MEDIA_BUS_FMT_RGB101010_1X30; } if (info->color_formats & DRM_COLOR_FORMAT_YCBCR422) output_fmts[i++] = MEDIA_BUS_FMT_UYVY8_1X16; if (info->color_formats & DRM_COLOR_FORMAT_YCBCR444) output_fmts[i++] = MEDIA_BUS_FMT_YUV8_1X24; *num_output_fmts = i; return output_fmts; } /* * Possible input formats : * - MEDIA_BUS_FMT_RGB888_1X24 * - MEDIA_BUS_FMT_YUV8_1X24 * - MEDIA_BUS_FMT_UYVY8_1X16 * - MEDIA_BUS_FMT_UYYVYY8_0_5X24 * - MEDIA_BUS_FMT_RGB101010_1X30 * - MEDIA_BUS_FMT_YUV10_1X30 * - MEDIA_BUS_FMT_UYVY10_1X20 * - MEDIA_BUS_FMT_UYYVYY10_0_5X30 * - MEDIA_BUS_FMT_RGB121212_1X36 * - MEDIA_BUS_FMT_YUV12_1X36 * - MEDIA_BUS_FMT_UYVY12_1X24 * - MEDIA_BUS_FMT_UYYVYY12_0_5X36 * - MEDIA_BUS_FMT_RGB161616_1X48 * - MEDIA_BUS_FMT_YUV16_1X48 * - MEDIA_BUS_FMT_UYYVYY16_0_5X48 */ /* Can return a maximum of 3 possible input formats for an output format */ #define MAX_INPUT_SEL_FORMATS 3 static u32 *dw_hdmi_bridge_atomic_get_input_bus_fmts(struct drm_bridge *bridge, struct drm_bridge_state *bridge_state, struct drm_crtc_state *crtc_state, struct drm_connector_state *conn_state, u32 output_fmt, unsigned int *num_input_fmts) { u32 *input_fmts; unsigned int i = 0; *num_input_fmts = 0; input_fmts = kcalloc(MAX_INPUT_SEL_FORMATS, sizeof(*input_fmts), GFP_KERNEL); if (!input_fmts) return NULL; switch (output_fmt) { /* If MEDIA_BUS_FMT_FIXED is tested, return default bus format */ case MEDIA_BUS_FMT_FIXED: input_fmts[i++] = MEDIA_BUS_FMT_RGB888_1X24; break; /* 8bit */ case MEDIA_BUS_FMT_RGB888_1X24: input_fmts[i++] = MEDIA_BUS_FMT_RGB888_1X24; input_fmts[i++] = MEDIA_BUS_FMT_YUV8_1X24; input_fmts[i++] = MEDIA_BUS_FMT_UYVY8_1X16; break; case MEDIA_BUS_FMT_YUV8_1X24: input_fmts[i++] = MEDIA_BUS_FMT_YUV8_1X24; input_fmts[i++] = MEDIA_BUS_FMT_UYVY8_1X16; input_fmts[i++] = MEDIA_BUS_FMT_RGB888_1X24; break; case MEDIA_BUS_FMT_UYVY8_1X16: input_fmts[i++] = MEDIA_BUS_FMT_UYVY8_1X16; input_fmts[i++] = MEDIA_BUS_FMT_YUV8_1X24; input_fmts[i++] = MEDIA_BUS_FMT_RGB888_1X24; break; /* 10bit */ case MEDIA_BUS_FMT_RGB101010_1X30: input_fmts[i++] = MEDIA_BUS_FMT_RGB101010_1X30; input_fmts[i++] = MEDIA_BUS_FMT_YUV10_1X30; input_fmts[i++] = MEDIA_BUS_FMT_UYVY10_1X20; break; case MEDIA_BUS_FMT_YUV10_1X30: input_fmts[i++] = MEDIA_BUS_FMT_YUV10_1X30; input_fmts[i++] = MEDIA_BUS_FMT_UYVY10_1X20; input_fmts[i++] = MEDIA_BUS_FMT_RGB101010_1X30; break; case MEDIA_BUS_FMT_UYVY10_1X20: input_fmts[i++] = MEDIA_BUS_FMT_UYVY10_1X20; input_fmts[i++] = MEDIA_BUS_FMT_YUV10_1X30; input_fmts[i++] = MEDIA_BUS_FMT_RGB101010_1X30; break; /* 12bit */ case MEDIA_BUS_FMT_RGB121212_1X36: input_fmts[i++] = MEDIA_BUS_FMT_RGB121212_1X36; input_fmts[i++] = MEDIA_BUS_FMT_YUV12_1X36; input_fmts[i++] = MEDIA_BUS_FMT_UYVY12_1X24; break; case MEDIA_BUS_FMT_YUV12_1X36: input_fmts[i++] = MEDIA_BUS_FMT_YUV12_1X36; input_fmts[i++] = MEDIA_BUS_FMT_UYVY12_1X24; input_fmts[i++] = MEDIA_BUS_FMT_RGB121212_1X36; break; case MEDIA_BUS_FMT_UYVY12_1X24: input_fmts[i++] = MEDIA_BUS_FMT_UYVY12_1X24; input_fmts[i++] = MEDIA_BUS_FMT_YUV12_1X36; input_fmts[i++] = MEDIA_BUS_FMT_RGB121212_1X36; break; /* 16bit */ case MEDIA_BUS_FMT_RGB161616_1X48: input_fmts[i++] = MEDIA_BUS_FMT_RGB161616_1X48; input_fmts[i++] = MEDIA_BUS_FMT_YUV16_1X48; break; case MEDIA_BUS_FMT_YUV16_1X48: input_fmts[i++] = MEDIA_BUS_FMT_YUV16_1X48; input_fmts[i++] = MEDIA_BUS_FMT_RGB161616_1X48; break; /*YUV 4:2:0 */ case MEDIA_BUS_FMT_UYYVYY8_0_5X24: case MEDIA_BUS_FMT_UYYVYY10_0_5X30: case MEDIA_BUS_FMT_UYYVYY12_0_5X36: case MEDIA_BUS_FMT_UYYVYY16_0_5X48: input_fmts[i++] = output_fmt; break; } *num_input_fmts = i; if (*num_input_fmts == 0) { kfree(input_fmts); input_fmts = NULL; } return input_fmts; } static int dw_hdmi_bridge_atomic_check(struct drm_bridge *bridge, struct drm_bridge_state *bridge_state, struct drm_crtc_state *crtc_state, struct drm_connector_state *conn_state) { struct dw_hdmi *hdmi = bridge->driver_private; hdmi->hdmi_data.enc_out_bus_format = bridge_state->output_bus_cfg.format; hdmi->hdmi_data.enc_in_bus_format = bridge_state->input_bus_cfg.format; dev_dbg(hdmi->dev, "input format 0x%04x, output format 0x%04x\n", bridge_state->input_bus_cfg.format, bridge_state->output_bus_cfg.format); return 0; } static int dw_hdmi_bridge_attach(struct drm_bridge *bridge, enum drm_bridge_attach_flags flags) { struct dw_hdmi *hdmi = bridge->driver_private; if (flags & DRM_BRIDGE_ATTACH_NO_CONNECTOR) return drm_bridge_attach(bridge->encoder, hdmi->next_bridge, bridge, flags); return dw_hdmi_connector_create(hdmi); } static void dw_hdmi_bridge_detach(struct drm_bridge *bridge) { struct dw_hdmi *hdmi = bridge->driver_private; mutex_lock(&hdmi->cec_notifier_mutex); cec_notifier_conn_unregister(hdmi->cec_notifier); hdmi->cec_notifier = NULL; mutex_unlock(&hdmi->cec_notifier_mutex); } static enum drm_mode_status dw_hdmi_bridge_mode_valid(struct drm_bridge *bridge, const struct drm_display_info *info, const struct drm_display_mode *mode) { struct dw_hdmi *hdmi = bridge->driver_private; const struct dw_hdmi_plat_data *pdata = hdmi->plat_data; enum drm_mode_status mode_status = MODE_OK; /* We don't support double-clocked modes */ if (mode->flags & DRM_MODE_FLAG_DBLCLK) return MODE_BAD; if (pdata->mode_valid) mode_status = pdata->mode_valid(hdmi, pdata->priv_data, info, mode); return mode_status; } static void dw_hdmi_bridge_mode_set(struct drm_bridge *bridge, const struct drm_display_mode *orig_mode, const struct drm_display_mode *mode) { struct dw_hdmi *hdmi = bridge->driver_private; mutex_lock(&hdmi->mutex); /* Store the display mode for plugin/DKMS poweron events */ drm_mode_copy(&hdmi->previous_mode, mode); mutex_unlock(&hdmi->mutex); } static void dw_hdmi_bridge_atomic_disable(struct drm_bridge *bridge, struct drm_bridge_state *old_state) { struct dw_hdmi *hdmi = bridge->driver_private; mutex_lock(&hdmi->mutex); hdmi->disabled = true; hdmi->curr_conn = NULL; dw_hdmi_update_power(hdmi); dw_hdmi_update_phy_mask(hdmi); handle_plugged_change(hdmi, false); mutex_unlock(&hdmi->mutex); } static void dw_hdmi_bridge_atomic_enable(struct drm_bridge *bridge, struct drm_bridge_state *old_state) { struct dw_hdmi *hdmi = bridge->driver_private; struct drm_atomic_state *state = old_state->base.state; struct drm_connector *connector; connector = drm_atomic_get_new_connector_for_encoder(state, bridge->encoder); mutex_lock(&hdmi->mutex); hdmi->disabled = false; hdmi->curr_conn = connector; dw_hdmi_update_power(hdmi); dw_hdmi_update_phy_mask(hdmi); handle_plugged_change(hdmi, true); mutex_unlock(&hdmi->mutex); } static enum drm_connector_status dw_hdmi_bridge_detect(struct drm_bridge *bridge) { struct dw_hdmi *hdmi = bridge->driver_private; return dw_hdmi_detect(hdmi); } static struct edid *dw_hdmi_bridge_get_edid(struct drm_bridge *bridge, struct drm_connector *connector) { struct dw_hdmi *hdmi = bridge->driver_private; return dw_hdmi_get_edid(hdmi, connector); } static const struct drm_bridge_funcs dw_hdmi_bridge_funcs = { .atomic_duplicate_state = drm_atomic_helper_bridge_duplicate_state, .atomic_destroy_state = drm_atomic_helper_bridge_destroy_state, .atomic_reset = drm_atomic_helper_bridge_reset, .attach = dw_hdmi_bridge_attach, .detach = dw_hdmi_bridge_detach, .atomic_check = dw_hdmi_bridge_atomic_check, .atomic_get_output_bus_fmts = dw_hdmi_bridge_atomic_get_output_bus_fmts, .atomic_get_input_bus_fmts = dw_hdmi_bridge_atomic_get_input_bus_fmts, .atomic_enable = dw_hdmi_bridge_atomic_enable, .atomic_disable = dw_hdmi_bridge_atomic_disable, .mode_set = dw_hdmi_bridge_mode_set, .mode_valid = dw_hdmi_bridge_mode_valid, .detect = dw_hdmi_bridge_detect, .get_edid = dw_hdmi_bridge_get_edid, }; /* ----------------------------------------------------------------------------- * IRQ Handling */ static irqreturn_t dw_hdmi_i2c_irq(struct dw_hdmi *hdmi) { struct dw_hdmi_i2c *i2c = hdmi->i2c; unsigned int stat; stat = hdmi_readb(hdmi, HDMI_IH_I2CM_STAT0); if (!stat) return IRQ_NONE; hdmi_writeb(hdmi, stat, HDMI_IH_I2CM_STAT0); i2c->stat = stat; complete(&i2c->cmp); return IRQ_HANDLED; } static irqreturn_t dw_hdmi_hardirq(int irq, void *dev_id) { struct dw_hdmi *hdmi = dev_id; u8 intr_stat; irqreturn_t ret = IRQ_NONE; if (hdmi->i2c) ret = dw_hdmi_i2c_irq(hdmi); intr_stat = hdmi_readb(hdmi, HDMI_IH_PHY_STAT0); if (intr_stat) { hdmi_writeb(hdmi, ~0, HDMI_IH_MUTE_PHY_STAT0); return IRQ_WAKE_THREAD; } return ret; } void dw_hdmi_setup_rx_sense(struct dw_hdmi *hdmi, bool hpd, bool rx_sense) { mutex_lock(&hdmi->mutex); if (!hdmi->force) { /* * If the RX sense status indicates we're disconnected, * clear the software rxsense status. */ if (!rx_sense) hdmi->rxsense = false; /* * Only set the software rxsense status when both * rxsense and hpd indicates we're connected. * This avoids what seems to be bad behaviour in * at least iMX6S versions of the phy. */ if (hpd) hdmi->rxsense = true; dw_hdmi_update_power(hdmi); dw_hdmi_update_phy_mask(hdmi); } mutex_unlock(&hdmi->mutex); } EXPORT_SYMBOL_GPL(dw_hdmi_setup_rx_sense); static irqreturn_t dw_hdmi_irq(int irq, void *dev_id) { struct dw_hdmi *hdmi = dev_id; u8 intr_stat, phy_int_pol, phy_pol_mask, phy_stat; enum drm_connector_status status = connector_status_unknown; intr_stat = hdmi_readb(hdmi, HDMI_IH_PHY_STAT0); phy_int_pol = hdmi_readb(hdmi, HDMI_PHY_POL0); phy_stat = hdmi_readb(hdmi, HDMI_PHY_STAT0); phy_pol_mask = 0; if (intr_stat & HDMI_IH_PHY_STAT0_HPD) phy_pol_mask |= HDMI_PHY_HPD; if (intr_stat & HDMI_IH_PHY_STAT0_RX_SENSE0) phy_pol_mask |= HDMI_PHY_RX_SENSE0; if (intr_stat & HDMI_IH_PHY_STAT0_RX_SENSE1) phy_pol_mask |= HDMI_PHY_RX_SENSE1; if (intr_stat & HDMI_IH_PHY_STAT0_RX_SENSE2) phy_pol_mask |= HDMI_PHY_RX_SENSE2; if (intr_stat & HDMI_IH_PHY_STAT0_RX_SENSE3) phy_pol_mask |= HDMI_PHY_RX_SENSE3; if (phy_pol_mask) hdmi_modb(hdmi, ~phy_int_pol, phy_pol_mask, HDMI_PHY_POL0); /* * RX sense tells us whether the TDMS transmitters are detecting * load - in other words, there's something listening on the * other end of the link. Use this to decide whether we should * power on the phy as HPD may be toggled by the sink to merely * ask the source to re-read the EDID. */ if (intr_stat & (HDMI_IH_PHY_STAT0_RX_SENSE | HDMI_IH_PHY_STAT0_HPD)) { dw_hdmi_setup_rx_sense(hdmi, phy_stat & HDMI_PHY_HPD, phy_stat & HDMI_PHY_RX_SENSE); if ((phy_stat & (HDMI_PHY_RX_SENSE | HDMI_PHY_HPD)) == 0) { mutex_lock(&hdmi->cec_notifier_mutex); cec_notifier_phys_addr_invalidate(hdmi->cec_notifier); mutex_unlock(&hdmi->cec_notifier_mutex); } if (phy_stat & HDMI_PHY_HPD) status = connector_status_connected; if (!(phy_stat & (HDMI_PHY_HPD | HDMI_PHY_RX_SENSE))) status = connector_status_disconnected; } if (status != connector_status_unknown) { dev_dbg(hdmi->dev, "EVENT=%s\n", status == connector_status_connected ? "plugin" : "plugout"); if (hdmi->bridge.dev) { drm_helper_hpd_irq_event(hdmi->bridge.dev); drm_bridge_hpd_notify(&hdmi->bridge, status); } } hdmi_writeb(hdmi, intr_stat, HDMI_IH_PHY_STAT0); hdmi_writeb(hdmi, ~(HDMI_IH_PHY_STAT0_HPD | HDMI_IH_PHY_STAT0_RX_SENSE), HDMI_IH_MUTE_PHY_STAT0); return IRQ_HANDLED; } static const struct dw_hdmi_phy_data dw_hdmi_phys[] = { { .type = DW_HDMI_PHY_DWC_HDMI_TX_PHY, .name = "DWC HDMI TX PHY", .gen = 1, }, { .type = DW_HDMI_PHY_DWC_MHL_PHY_HEAC, .name = "DWC MHL PHY + HEAC PHY", .gen = 2, .has_svsret = true, .configure = hdmi_phy_configure_dwc_hdmi_3d_tx, }, { .type = DW_HDMI_PHY_DWC_MHL_PHY, .name = "DWC MHL PHY", .gen = 2, .has_svsret = true, .configure = hdmi_phy_configure_dwc_hdmi_3d_tx, }, { .type = DW_HDMI_PHY_DWC_HDMI_3D_TX_PHY_HEAC, .name = "DWC HDMI 3D TX PHY + HEAC PHY", .gen = 2, .configure = hdmi_phy_configure_dwc_hdmi_3d_tx, }, { .type = DW_HDMI_PHY_DWC_HDMI_3D_TX_PHY, .name = "DWC HDMI 3D TX PHY", .gen = 2, .configure = hdmi_phy_configure_dwc_hdmi_3d_tx, }, { .type = DW_HDMI_PHY_DWC_HDMI20_TX_PHY, .name = "DWC HDMI 2.0 TX PHY", .gen = 2, .has_svsret = true, .configure = hdmi_phy_configure_dwc_hdmi_3d_tx, }, { .type = DW_HDMI_PHY_VENDOR_PHY, .name = "Vendor PHY", } }; static int dw_hdmi_detect_phy(struct dw_hdmi *hdmi) { unsigned int i; u8 phy_type; phy_type = hdmi->plat_data->phy_force_vendor ? DW_HDMI_PHY_VENDOR_PHY : hdmi_readb(hdmi, HDMI_CONFIG2_ID); if (phy_type == DW_HDMI_PHY_VENDOR_PHY) { /* Vendor PHYs require support from the glue layer. */ if (!hdmi->plat_data->phy_ops || !hdmi->plat_data->phy_name) { dev_err(hdmi->dev, "Vendor HDMI PHY not supported by glue layer\n"); return -ENODEV; } hdmi->phy.ops = hdmi->plat_data->phy_ops; hdmi->phy.data = hdmi->plat_data->phy_data; hdmi->phy.name = hdmi->plat_data->phy_name; return 0; } /* Synopsys PHYs are handled internally. */ for (i = 0; i < ARRAY_SIZE(dw_hdmi_phys); ++i) { if (dw_hdmi_phys[i].type == phy_type) { hdmi->phy.ops = &dw_hdmi_synopsys_phy_ops; hdmi->phy.name = dw_hdmi_phys[i].name; hdmi->phy.data = (void *)&dw_hdmi_phys[i]; if (!dw_hdmi_phys[i].configure && !hdmi->plat_data->configure_phy) { dev_err(hdmi->dev, "%s requires platform support\n", hdmi->phy.name); return -ENODEV; } return 0; } } dev_err(hdmi->dev, "Unsupported HDMI PHY type (%02x)\n", phy_type); return -ENODEV; } static void dw_hdmi_cec_enable(struct dw_hdmi *hdmi) { mutex_lock(&hdmi->mutex); hdmi->mc_clkdis &= ~HDMI_MC_CLKDIS_CECCLK_DISABLE; hdmi_writeb(hdmi, hdmi->mc_clkdis, HDMI_MC_CLKDIS); mutex_unlock(&hdmi->mutex); } static void dw_hdmi_cec_disable(struct dw_hdmi *hdmi) { mutex_lock(&hdmi->mutex); hdmi->mc_clkdis |= HDMI_MC_CLKDIS_CECCLK_DISABLE; hdmi_writeb(hdmi, hdmi->mc_clkdis, HDMI_MC_CLKDIS); mutex_unlock(&hdmi->mutex); } static const struct dw_hdmi_cec_ops dw_hdmi_cec_ops = { .write = hdmi_writeb, .read = hdmi_readb, .enable = dw_hdmi_cec_enable, .disable = dw_hdmi_cec_disable, }; static const struct regmap_config hdmi_regmap_8bit_config = { .reg_bits = 32, .val_bits = 8, .reg_stride = 1, .max_register = HDMI_I2CM_FS_SCL_LCNT_0_ADDR, }; static const struct regmap_config hdmi_regmap_32bit_config = { .reg_bits = 32, .val_bits = 32, .reg_stride = 4, .max_register = HDMI_I2CM_FS_SCL_LCNT_0_ADDR << 2, }; static void dw_hdmi_init_hw(struct dw_hdmi *hdmi) { initialize_hdmi_ih_mutes(hdmi); /* * Reset HDMI DDC I2C master controller and mute I2CM interrupts. * Even if we are using a separate i2c adapter doing this doesn't * hurt. */ dw_hdmi_i2c_init(hdmi); if (hdmi->phy.ops->setup_hpd) hdmi->phy.ops->setup_hpd(hdmi, hdmi->phy.data); } /* ----------------------------------------------------------------------------- * Probe/remove API, used from platforms based on the DRM bridge API. */ static int dw_hdmi_parse_dt(struct dw_hdmi *hdmi) { struct device_node *endpoint; struct device_node *remote; if (!hdmi->plat_data->output_port) return 0; endpoint = of_graph_get_endpoint_by_regs(hdmi->dev->of_node, hdmi->plat_data->output_port, -1); if (!endpoint) { /* * On platforms whose bindings don't make the output port * mandatory (such as Rockchip) the plat_data->output_port * field isn't set, so it's safe to make this a fatal error. */ dev_err(hdmi->dev, "Missing endpoint in port@%u\n", hdmi->plat_data->output_port); return -ENODEV; } remote = of_graph_get_remote_port_parent(endpoint); of_node_put(endpoint); if (!remote) { dev_err(hdmi->dev, "Endpoint in port@%u unconnected\n", hdmi->plat_data->output_port); return -ENODEV; } if (!of_device_is_available(remote)) { dev_err(hdmi->dev, "port@%u remote device is disabled\n", hdmi->plat_data->output_port); of_node_put(remote); return -ENODEV; } hdmi->next_bridge = of_drm_find_bridge(remote); of_node_put(remote); if (!hdmi->next_bridge) return -EPROBE_DEFER; return 0; } bool dw_hdmi_bus_fmt_is_420(struct dw_hdmi *hdmi) { return hdmi_bus_fmt_is_yuv420(hdmi->hdmi_data.enc_out_bus_format); } EXPORT_SYMBOL_GPL(dw_hdmi_bus_fmt_is_420); struct dw_hdmi *dw_hdmi_probe(struct platform_device *pdev, const struct dw_hdmi_plat_data *plat_data) { struct device *dev = &pdev->dev; struct device_node *np = dev->of_node; struct platform_device_info pdevinfo; struct device_node *ddc_node; struct dw_hdmi_cec_data cec; struct dw_hdmi *hdmi; struct resource *iores = NULL; int irq; int ret; u32 val = 1; u8 prod_id0; u8 prod_id1; u8 config0; u8 config3; hdmi = devm_kzalloc(dev, sizeof(*hdmi), GFP_KERNEL); if (!hdmi) return ERR_PTR(-ENOMEM); hdmi->plat_data = plat_data; hdmi->dev = dev; hdmi->sample_rate = 48000; hdmi->channels = 2; hdmi->disabled = true; hdmi->rxsense = true; hdmi->phy_mask = (u8)~(HDMI_PHY_HPD | HDMI_PHY_RX_SENSE); hdmi->mc_clkdis = 0x7f; hdmi->last_connector_result = connector_status_disconnected; mutex_init(&hdmi->mutex); mutex_init(&hdmi->audio_mutex); mutex_init(&hdmi->cec_notifier_mutex); spin_lock_init(&hdmi->audio_lock); ret = dw_hdmi_parse_dt(hdmi); if (ret < 0) return ERR_PTR(ret); ddc_node = of_parse_phandle(np, "ddc-i2c-bus", 0); if (ddc_node) { hdmi->ddc = of_get_i2c_adapter_by_node(ddc_node); of_node_put(ddc_node); if (!hdmi->ddc) { dev_dbg(hdmi->dev, "failed to read ddc node\n"); return ERR_PTR(-EPROBE_DEFER); } } else { dev_dbg(hdmi->dev, "no ddc property found\n"); } if (!plat_data->regm) { const struct regmap_config *reg_config; of_property_read_u32(np, "reg-io-width", &val); switch (val) { case 4: reg_config = &hdmi_regmap_32bit_config; hdmi->reg_shift = 2; break; case 1: reg_config = &hdmi_regmap_8bit_config; break; default: dev_err(dev, "reg-io-width must be 1 or 4\n"); return ERR_PTR(-EINVAL); } iores = platform_get_resource(pdev, IORESOURCE_MEM, 0); hdmi->regs = devm_ioremap_resource(dev, iores); if (IS_ERR(hdmi->regs)) { ret = PTR_ERR(hdmi->regs); goto err_res; } hdmi->regm = devm_regmap_init_mmio(dev, hdmi->regs, reg_config); if (IS_ERR(hdmi->regm)) { dev_err(dev, "Failed to configure regmap\n"); ret = PTR_ERR(hdmi->regm); goto err_res; } } else { hdmi->regm = plat_data->regm; } hdmi->isfr_clk = devm_clk_get(hdmi->dev, "isfr"); if (IS_ERR(hdmi->isfr_clk)) { ret = PTR_ERR(hdmi->isfr_clk); dev_err(hdmi->dev, "Unable to get HDMI isfr clk: %d\n", ret); goto err_res; } ret = clk_prepare_enable(hdmi->isfr_clk); if (ret) { dev_err(hdmi->dev, "Cannot enable HDMI isfr clock: %d\n", ret); goto err_res; } hdmi->iahb_clk = devm_clk_get(hdmi->dev, "iahb"); if (IS_ERR(hdmi->iahb_clk)) { ret = PTR_ERR(hdmi->iahb_clk); dev_err(hdmi->dev, "Unable to get HDMI iahb clk: %d\n", ret); goto err_isfr; } ret = clk_prepare_enable(hdmi->iahb_clk); if (ret) { dev_err(hdmi->dev, "Cannot enable HDMI iahb clock: %d\n", ret); goto err_isfr; } hdmi->cec_clk = devm_clk_get(hdmi->dev, "cec"); if (PTR_ERR(hdmi->cec_clk) == -ENOENT) { hdmi->cec_clk = NULL; } else if (IS_ERR(hdmi->cec_clk)) { ret = PTR_ERR(hdmi->cec_clk); if (ret != -EPROBE_DEFER) dev_err(hdmi->dev, "Cannot get HDMI cec clock: %d\n", ret); hdmi->cec_clk = NULL; goto err_iahb; } else { ret = clk_prepare_enable(hdmi->cec_clk); if (ret) { dev_err(hdmi->dev, "Cannot enable HDMI cec clock: %d\n", ret); goto err_iahb; } } /* Product and revision IDs */ hdmi->version = (hdmi_readb(hdmi, HDMI_DESIGN_ID) << 8) | (hdmi_readb(hdmi, HDMI_REVISION_ID) << 0); prod_id0 = hdmi_readb(hdmi, HDMI_PRODUCT_ID0); prod_id1 = hdmi_readb(hdmi, HDMI_PRODUCT_ID1); if (prod_id0 != HDMI_PRODUCT_ID0_HDMI_TX || (prod_id1 & ~HDMI_PRODUCT_ID1_HDCP) != HDMI_PRODUCT_ID1_HDMI_TX) { dev_err(dev, "Unsupported HDMI controller (%04x:%02x:%02x)\n", hdmi->version, prod_id0, prod_id1); ret = -ENODEV; goto err_iahb; } ret = dw_hdmi_detect_phy(hdmi); if (ret < 0) goto err_iahb; dev_info(dev, "Detected HDMI TX controller v%x.%03x %s HDCP (%s)\n", hdmi->version >> 12, hdmi->version & 0xfff, prod_id1 & HDMI_PRODUCT_ID1_HDCP ? "with" : "without", hdmi->phy.name); dw_hdmi_init_hw(hdmi); irq = platform_get_irq(pdev, 0); if (irq < 0) { ret = irq; goto err_iahb; } ret = devm_request_threaded_irq(dev, irq, dw_hdmi_hardirq, dw_hdmi_irq, IRQF_SHARED, dev_name(dev), hdmi); if (ret) goto err_iahb; /* * To prevent overflows in HDMI_IH_FC_STAT2, set the clk regenerator * N and cts values before enabling phy */ hdmi_init_clk_regenerator(hdmi); /* If DDC bus is not specified, try to register HDMI I2C bus */ if (!hdmi->ddc) { /* Look for (optional) stuff related to unwedging */ hdmi->pinctrl = devm_pinctrl_get(dev); if (!IS_ERR(hdmi->pinctrl)) { hdmi->unwedge_state = pinctrl_lookup_state(hdmi->pinctrl, "unwedge"); hdmi->default_state = pinctrl_lookup_state(hdmi->pinctrl, "default"); if (IS_ERR(hdmi->default_state) || IS_ERR(hdmi->unwedge_state)) { if (!IS_ERR(hdmi->unwedge_state)) dev_warn(dev, "Unwedge requires default pinctrl\n"); hdmi->default_state = NULL; hdmi->unwedge_state = NULL; } } hdmi->ddc = dw_hdmi_i2c_adapter(hdmi); if (IS_ERR(hdmi->ddc)) hdmi->ddc = NULL; } hdmi->bridge.driver_private = hdmi; hdmi->bridge.funcs = &dw_hdmi_bridge_funcs; hdmi->bridge.ops = DRM_BRIDGE_OP_DETECT | DRM_BRIDGE_OP_EDID | DRM_BRIDGE_OP_HPD; hdmi->bridge.interlace_allowed = true; hdmi->bridge.ddc = hdmi->ddc; #ifdef CONFIG_OF hdmi->bridge.of_node = pdev->dev.of_node; #endif memset(&pdevinfo, 0, sizeof(pdevinfo)); pdevinfo.parent = dev; pdevinfo.id = PLATFORM_DEVID_AUTO; config0 = hdmi_readb(hdmi, HDMI_CONFIG0_ID); config3 = hdmi_readb(hdmi, HDMI_CONFIG3_ID); if (iores && config3 & HDMI_CONFIG3_AHBAUDDMA) { struct dw_hdmi_audio_data audio; audio.phys = iores->start; audio.base = hdmi->regs; audio.irq = irq; audio.hdmi = hdmi; audio.get_eld = hdmi_audio_get_eld; hdmi->enable_audio = dw_hdmi_ahb_audio_enable; hdmi->disable_audio = dw_hdmi_ahb_audio_disable; pdevinfo.name = "dw-hdmi-ahb-audio"; pdevinfo.data = &audio; pdevinfo.size_data = sizeof(audio); pdevinfo.dma_mask = DMA_BIT_MASK(32); hdmi->audio = platform_device_register_full(&pdevinfo); } else if (config0 & HDMI_CONFIG0_I2S) { struct dw_hdmi_i2s_audio_data audio; audio.hdmi = hdmi; audio.get_eld = hdmi_audio_get_eld; audio.write = hdmi_writeb; audio.read = hdmi_readb; hdmi->enable_audio = dw_hdmi_i2s_audio_enable; hdmi->disable_audio = dw_hdmi_i2s_audio_disable; pdevinfo.name = "dw-hdmi-i2s-audio"; pdevinfo.data = &audio; pdevinfo.size_data = sizeof(audio); pdevinfo.dma_mask = DMA_BIT_MASK(32); hdmi->audio = platform_device_register_full(&pdevinfo); } else if (iores && config3 & HDMI_CONFIG3_GPAUD) { struct dw_hdmi_audio_data audio; audio.phys = iores->start; audio.base = hdmi->regs; audio.irq = irq; audio.hdmi = hdmi; audio.get_eld = hdmi_audio_get_eld; hdmi->enable_audio = dw_hdmi_gp_audio_enable; hdmi->disable_audio = dw_hdmi_gp_audio_disable; pdevinfo.name = "dw-hdmi-gp-audio"; pdevinfo.id = PLATFORM_DEVID_NONE; pdevinfo.data = &audio; pdevinfo.size_data = sizeof(audio); pdevinfo.dma_mask = DMA_BIT_MASK(32); hdmi->audio = platform_device_register_full(&pdevinfo); } if (!plat_data->disable_cec && (config0 & HDMI_CONFIG0_CEC)) { cec.hdmi = hdmi; cec.ops = &dw_hdmi_cec_ops; cec.irq = irq; pdevinfo.name = "dw-hdmi-cec"; pdevinfo.data = &cec; pdevinfo.size_data = sizeof(cec); pdevinfo.dma_mask = 0; hdmi->cec = platform_device_register_full(&pdevinfo); } drm_bridge_add(&hdmi->bridge); return hdmi; err_iahb: clk_disable_unprepare(hdmi->iahb_clk); clk_disable_unprepare(hdmi->cec_clk); err_isfr: clk_disable_unprepare(hdmi->isfr_clk); err_res: i2c_put_adapter(hdmi->ddc); return ERR_PTR(ret); } EXPORT_SYMBOL_GPL(dw_hdmi_probe); void dw_hdmi_remove(struct dw_hdmi *hdmi) { drm_bridge_remove(&hdmi->bridge); if (hdmi->audio && !IS_ERR(hdmi->audio)) platform_device_unregister(hdmi->audio); if (!IS_ERR(hdmi->cec)) platform_device_unregister(hdmi->cec); /* Disable all interrupts */ hdmi_writeb(hdmi, ~0, HDMI_IH_MUTE_PHY_STAT0); clk_disable_unprepare(hdmi->iahb_clk); clk_disable_unprepare(hdmi->isfr_clk); clk_disable_unprepare(hdmi->cec_clk); if (hdmi->i2c) i2c_del_adapter(&hdmi->i2c->adap); else i2c_put_adapter(hdmi->ddc); } EXPORT_SYMBOL_GPL(dw_hdmi_remove); /* ----------------------------------------------------------------------------- * Bind/unbind API, used from platforms based on the component framework. */ struct dw_hdmi *dw_hdmi_bind(struct platform_device *pdev, struct drm_encoder *encoder, const struct dw_hdmi_plat_data *plat_data) { struct dw_hdmi *hdmi; int ret; hdmi = dw_hdmi_probe(pdev, plat_data); if (IS_ERR(hdmi)) return hdmi; ret = drm_bridge_attach(encoder, &hdmi->bridge, NULL, 0); if (ret) { dw_hdmi_remove(hdmi); return ERR_PTR(ret); } return hdmi; } EXPORT_SYMBOL_GPL(dw_hdmi_bind); void dw_hdmi_unbind(struct dw_hdmi *hdmi) { dw_hdmi_remove(hdmi); } EXPORT_SYMBOL_GPL(dw_hdmi_unbind); void dw_hdmi_resume(struct dw_hdmi *hdmi) { dw_hdmi_init_hw(hdmi); } EXPORT_SYMBOL_GPL(dw_hdmi_resume); MODULE_AUTHOR("Sascha Hauer <[email protected]>"); MODULE_AUTHOR("Andy Yan <[email protected]>"); MODULE_AUTHOR("Yakir Yang <[email protected]>"); MODULE_AUTHOR("Vladimir Zapolskiy <[email protected]>"); MODULE_DESCRIPTION("DW HDMI transmitter driver"); MODULE_LICENSE("GPL"); MODULE_ALIAS("platform:dw-hdmi");
linux-master
drivers/gpu/drm/bridge/synopsys/dw-hdmi.c
// SPDX-License-Identifier: GPL-2.0 /* * dw-hdmi-i2s-audio.c * * Copyright (c) 2017 Renesas Solutions Corp. * Kuninori Morimoto <[email protected]> */ #include <linux/dma-mapping.h> #include <linux/module.h> #include <drm/bridge/dw_hdmi.h> #include <drm/drm_crtc.h> #include <sound/hdmi-codec.h> #include "dw-hdmi.h" #include "dw-hdmi-audio.h" #define DRIVER_NAME "dw-hdmi-i2s-audio" static inline void hdmi_write(struct dw_hdmi_i2s_audio_data *audio, u8 val, int offset) { struct dw_hdmi *hdmi = audio->hdmi; audio->write(hdmi, val, offset); } static inline u8 hdmi_read(struct dw_hdmi_i2s_audio_data *audio, int offset) { struct dw_hdmi *hdmi = audio->hdmi; return audio->read(hdmi, offset); } static int dw_hdmi_i2s_hw_params(struct device *dev, void *data, struct hdmi_codec_daifmt *fmt, struct hdmi_codec_params *hparms) { struct dw_hdmi_i2s_audio_data *audio = data; struct dw_hdmi *hdmi = audio->hdmi; u8 conf0 = 0; u8 conf1 = 0; u8 inputclkfs = 0; /* it cares I2S only */ if (fmt->bit_clk_provider | fmt->frame_clk_provider) { dev_err(dev, "unsupported clock settings\n"); return -EINVAL; } /* Reset the FIFOs before applying new params */ hdmi_write(audio, HDMI_AUD_CONF0_SW_RESET, HDMI_AUD_CONF0); hdmi_write(audio, (u8)~HDMI_MC_SWRSTZ_I2SSWRST_REQ, HDMI_MC_SWRSTZ); inputclkfs = HDMI_AUD_INPUTCLKFS_64FS; conf0 = (HDMI_AUD_CONF0_I2S_SELECT | HDMI_AUD_CONF0_I2S_EN0); /* Enable the required i2s lanes */ switch (hparms->channels) { case 7 ... 8: conf0 |= HDMI_AUD_CONF0_I2S_EN3; fallthrough; case 5 ... 6: conf0 |= HDMI_AUD_CONF0_I2S_EN2; fallthrough; case 3 ... 4: conf0 |= HDMI_AUD_CONF0_I2S_EN1; /* Fall-thru */ } switch (hparms->sample_width) { case 16: conf1 = HDMI_AUD_CONF1_WIDTH_16; break; case 24: case 32: conf1 = HDMI_AUD_CONF1_WIDTH_24; break; } switch (fmt->fmt) { case HDMI_I2S: conf1 |= HDMI_AUD_CONF1_MODE_I2S; break; case HDMI_RIGHT_J: conf1 |= HDMI_AUD_CONF1_MODE_RIGHT_J; break; case HDMI_LEFT_J: conf1 |= HDMI_AUD_CONF1_MODE_LEFT_J; break; case HDMI_DSP_A: conf1 |= HDMI_AUD_CONF1_MODE_BURST_1; break; case HDMI_DSP_B: conf1 |= HDMI_AUD_CONF1_MODE_BURST_2; break; default: dev_err(dev, "unsupported format\n"); return -EINVAL; } dw_hdmi_set_sample_rate(hdmi, hparms->sample_rate); dw_hdmi_set_channel_status(hdmi, hparms->iec.status); dw_hdmi_set_channel_count(hdmi, hparms->channels); dw_hdmi_set_channel_allocation(hdmi, hparms->cea.channel_allocation); hdmi_write(audio, inputclkfs, HDMI_AUD_INPUTCLKFS); hdmi_write(audio, conf0, HDMI_AUD_CONF0); hdmi_write(audio, conf1, HDMI_AUD_CONF1); return 0; } static int dw_hdmi_i2s_audio_startup(struct device *dev, void *data) { struct dw_hdmi_i2s_audio_data *audio = data; struct dw_hdmi *hdmi = audio->hdmi; dw_hdmi_audio_enable(hdmi); return 0; } static void dw_hdmi_i2s_audio_shutdown(struct device *dev, void *data) { struct dw_hdmi_i2s_audio_data *audio = data; struct dw_hdmi *hdmi = audio->hdmi; dw_hdmi_audio_disable(hdmi); } static int dw_hdmi_i2s_get_eld(struct device *dev, void *data, uint8_t *buf, size_t len) { struct dw_hdmi_i2s_audio_data *audio = data; u8 *eld; eld = audio->get_eld(audio->hdmi); if (eld) memcpy(buf, eld, min_t(size_t, MAX_ELD_BYTES, len)); else /* Pass en empty ELD if connector not available */ memset(buf, 0, len); return 0; } static int dw_hdmi_i2s_get_dai_id(struct snd_soc_component *component, struct device_node *endpoint) { struct of_endpoint of_ep; int ret; ret = of_graph_parse_endpoint(endpoint, &of_ep); if (ret < 0) return ret; /* * HDMI sound should be located as reg = <2> * Then, it is sound port 0 */ if (of_ep.port == 2) return 0; return -EINVAL; } static int dw_hdmi_i2s_hook_plugged_cb(struct device *dev, void *data, hdmi_codec_plugged_cb fn, struct device *codec_dev) { struct dw_hdmi_i2s_audio_data *audio = data; struct dw_hdmi *hdmi = audio->hdmi; return dw_hdmi_set_plugged_cb(hdmi, fn, codec_dev); } static const struct hdmi_codec_ops dw_hdmi_i2s_ops = { .hw_params = dw_hdmi_i2s_hw_params, .audio_startup = dw_hdmi_i2s_audio_startup, .audio_shutdown = dw_hdmi_i2s_audio_shutdown, .get_eld = dw_hdmi_i2s_get_eld, .get_dai_id = dw_hdmi_i2s_get_dai_id, .hook_plugged_cb = dw_hdmi_i2s_hook_plugged_cb, }; static int snd_dw_hdmi_probe(struct platform_device *pdev) { struct dw_hdmi_i2s_audio_data *audio = pdev->dev.platform_data; struct platform_device_info pdevinfo; struct hdmi_codec_pdata pdata; struct platform_device *platform; memset(&pdata, 0, sizeof(pdata)); pdata.ops = &dw_hdmi_i2s_ops; pdata.i2s = 1; pdata.max_i2s_channels = 8; pdata.data = audio; memset(&pdevinfo, 0, sizeof(pdevinfo)); pdevinfo.parent = pdev->dev.parent; pdevinfo.id = PLATFORM_DEVID_AUTO; pdevinfo.name = HDMI_CODEC_DRV_NAME; pdevinfo.data = &pdata; pdevinfo.size_data = sizeof(pdata); pdevinfo.dma_mask = DMA_BIT_MASK(32); platform = platform_device_register_full(&pdevinfo); if (IS_ERR(platform)) return PTR_ERR(platform); dev_set_drvdata(&pdev->dev, platform); return 0; } static void snd_dw_hdmi_remove(struct platform_device *pdev) { struct platform_device *platform = dev_get_drvdata(&pdev->dev); platform_device_unregister(platform); } static struct platform_driver snd_dw_hdmi_driver = { .probe = snd_dw_hdmi_probe, .remove_new = snd_dw_hdmi_remove, .driver = { .name = DRIVER_NAME, }, }; module_platform_driver(snd_dw_hdmi_driver); MODULE_AUTHOR("Kuninori Morimoto <[email protected]>"); MODULE_DESCRIPTION("Synopsis Designware HDMI I2S ALSA SoC interface"); MODULE_LICENSE("GPL v2"); MODULE_ALIAS("platform:" DRIVER_NAME);
linux-master
drivers/gpu/drm/bridge/synopsys/dw-hdmi-i2s-audio.c
// SPDX-License-Identifier: (GPL-2.0+ OR MIT) /* * dw-hdmi-gp-audio.c * * Copyright 2020-2022 NXP */ #include <linux/io.h> #include <linux/interrupt.h> #include <linux/module.h> #include <linux/platform_device.h> #include <linux/dmaengine.h> #include <linux/dma-mapping.h> #include <drm/bridge/dw_hdmi.h> #include <drm/drm_edid.h> #include <drm/drm_connector.h> #include <sound/hdmi-codec.h> #include <sound/asoundef.h> #include <sound/core.h> #include <sound/initval.h> #include <sound/pcm.h> #include <sound/pcm_drm_eld.h> #include <sound/pcm_iec958.h> #include <sound/dmaengine_pcm.h> #include "dw-hdmi-audio.h" #define DRIVER_NAME "dw-hdmi-gp-audio" #define DRV_NAME "hdmi-gp-audio" struct snd_dw_hdmi { struct dw_hdmi_audio_data data; struct platform_device *audio_pdev; unsigned int pos; }; struct dw_hdmi_channel_conf { u8 conf1; u8 ca; }; /* * The default mapping of ALSA channels to HDMI channels and speaker * allocation bits. Note that we can't do channel remapping here - * channels must be in the same order. * * Mappings for alsa-lib pcm/surround*.conf files: * * Front Sur4.0 Sur4.1 Sur5.0 Sur5.1 Sur7.1 * Channels 2 4 6 6 6 8 * * Our mapping from ALSA channel to CEA686D speaker name and HDMI channel: * * Number of ALSA channels * ALSA Channel 2 3 4 5 6 7 8 * 0 FL:0 = = = = = = * 1 FR:1 = = = = = = * 2 FC:3 RL:4 LFE:2 = = = * 3 RR:5 RL:4 FC:3 = = * 4 RR:5 RL:4 = = * 5 RR:5 = = * 6 RC:6 = * 7 RLC/FRC RLC/FRC */ static struct dw_hdmi_channel_conf default_hdmi_channel_config[7] = { { 0x03, 0x00 }, /* FL,FR */ { 0x0b, 0x02 }, /* FL,FR,FC */ { 0x33, 0x08 }, /* FL,FR,RL,RR */ { 0x37, 0x09 }, /* FL,FR,LFE,RL,RR */ { 0x3f, 0x0b }, /* FL,FR,LFE,FC,RL,RR */ { 0x7f, 0x0f }, /* FL,FR,LFE,FC,RL,RR,RC */ { 0xff, 0x13 }, /* FL,FR,LFE,FC,RL,RR,[FR]RC,[FR]LC */ }; static int audio_hw_params(struct device *dev, void *data, struct hdmi_codec_daifmt *daifmt, struct hdmi_codec_params *params) { struct snd_dw_hdmi *dw = dev_get_drvdata(dev); u8 ca; dw_hdmi_set_sample_rate(dw->data.hdmi, params->sample_rate); ca = default_hdmi_channel_config[params->channels - 2].ca; dw_hdmi_set_channel_count(dw->data.hdmi, params->channels); dw_hdmi_set_channel_allocation(dw->data.hdmi, ca); dw_hdmi_set_sample_non_pcm(dw->data.hdmi, params->iec.status[0] & IEC958_AES0_NONAUDIO); dw_hdmi_set_sample_width(dw->data.hdmi, params->sample_width); return 0; } static void audio_shutdown(struct device *dev, void *data) { } static int audio_mute_stream(struct device *dev, void *data, bool enable, int direction) { struct snd_dw_hdmi *dw = dev_get_drvdata(dev); if (!enable) dw_hdmi_audio_enable(dw->data.hdmi); else dw_hdmi_audio_disable(dw->data.hdmi); return 0; } static int audio_get_eld(struct device *dev, void *data, u8 *buf, size_t len) { struct dw_hdmi_audio_data *audio = data; u8 *eld; eld = audio->get_eld(audio->hdmi); if (eld) memcpy(buf, eld, min_t(size_t, MAX_ELD_BYTES, len)); else /* Pass en empty ELD if connector not available */ memset(buf, 0, len); return 0; } static int audio_hook_plugged_cb(struct device *dev, void *data, hdmi_codec_plugged_cb fn, struct device *codec_dev) { struct snd_dw_hdmi *dw = dev_get_drvdata(dev); return dw_hdmi_set_plugged_cb(dw->data.hdmi, fn, codec_dev); } static const struct hdmi_codec_ops audio_codec_ops = { .hw_params = audio_hw_params, .audio_shutdown = audio_shutdown, .mute_stream = audio_mute_stream, .get_eld = audio_get_eld, .hook_plugged_cb = audio_hook_plugged_cb, }; static int snd_dw_hdmi_probe(struct platform_device *pdev) { struct dw_hdmi_audio_data *data = pdev->dev.platform_data; struct snd_dw_hdmi *dw; const struct hdmi_codec_pdata codec_data = { .i2s = 1, .spdif = 0, .ops = &audio_codec_ops, .max_i2s_channels = 8, .data = data, }; dw = devm_kzalloc(&pdev->dev, sizeof(*dw), GFP_KERNEL); if (!dw) return -ENOMEM; dw->data = *data; platform_set_drvdata(pdev, dw); dw->audio_pdev = platform_device_register_data(&pdev->dev, HDMI_CODEC_DRV_NAME, 1, &codec_data, sizeof(codec_data)); return PTR_ERR_OR_ZERO(dw->audio_pdev); } static void snd_dw_hdmi_remove(struct platform_device *pdev) { struct snd_dw_hdmi *dw = platform_get_drvdata(pdev); platform_device_unregister(dw->audio_pdev); } static struct platform_driver snd_dw_hdmi_driver = { .probe = snd_dw_hdmi_probe, .remove_new = snd_dw_hdmi_remove, .driver = { .name = DRIVER_NAME, }, }; module_platform_driver(snd_dw_hdmi_driver); MODULE_AUTHOR("Shengjiu Wang <[email protected]>"); MODULE_DESCRIPTION("Synopsys Designware HDMI GPA ALSA interface"); MODULE_LICENSE("GPL"); MODULE_ALIAS("platform:" DRIVER_NAME);
linux-master
drivers/gpu/drm/bridge/synopsys/dw-hdmi-gp-audio.c
/* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright(c) 2016, Analogix Semiconductor. * * Based on anx7808 driver obtained from chromeos with copyright: * Copyright(c) 2013, Google Inc. */ #include <linux/regmap.h> #include <drm/display/drm_dp_helper.h> #include <drm/drm.h> #include <drm/drm_print.h> #include "analogix-i2c-dptx.h" #define AUX_WAIT_TIMEOUT_MS 15 #define AUX_CH_BUFFER_SIZE 16 static int anx_i2c_dp_clear_bits(struct regmap *map, u8 reg, u8 mask) { return regmap_update_bits(map, reg, mask, 0); } static bool anx_dp_aux_op_finished(struct regmap *map_dptx) { unsigned int value; int err; err = regmap_read(map_dptx, SP_DP_AUX_CH_CTRL2_REG, &value); if (err < 0) return false; return (value & SP_AUX_EN) == 0; } static int anx_dp_aux_wait(struct regmap *map_dptx) { unsigned long timeout; unsigned int status; int err; timeout = jiffies + msecs_to_jiffies(AUX_WAIT_TIMEOUT_MS) + 1; while (!anx_dp_aux_op_finished(map_dptx)) { if (time_after(jiffies, timeout)) { if (!anx_dp_aux_op_finished(map_dptx)) { DRM_ERROR("Timed out waiting AUX to finish\n"); return -ETIMEDOUT; } break; } usleep_range(1000, 2000); } /* Read the AUX channel access status */ err = regmap_read(map_dptx, SP_AUX_CH_STATUS_REG, &status); if (err < 0) { DRM_ERROR("Failed to read from AUX channel: %d\n", err); return err; } if (status & SP_AUX_STATUS) { DRM_ERROR("Failed to wait for AUX channel (status: %02x)\n", status); return -ETIMEDOUT; } return 0; } static int anx_dp_aux_address(struct regmap *map_dptx, unsigned int addr) { int err; err = regmap_write(map_dptx, SP_AUX_ADDR_7_0_REG, addr & 0xff); if (err) return err; err = regmap_write(map_dptx, SP_AUX_ADDR_15_8_REG, (addr & 0xff00) >> 8); if (err) return err; /* * DP AUX CH Address Register #2, only update bits[3:0] * [7:4] RESERVED * [3:0] AUX_ADDR[19:16], Register control AUX CH address. */ err = regmap_update_bits(map_dptx, SP_AUX_ADDR_19_16_REG, SP_AUX_ADDR_19_16_MASK, (addr & 0xf0000) >> 16); if (err) return err; return 0; } ssize_t anx_dp_aux_transfer(struct regmap *map_dptx, struct drm_dp_aux_msg *msg) { u8 ctrl1 = msg->request; u8 ctrl2 = SP_AUX_EN; u8 *buffer = msg->buffer; int err; /* The DP AUX transmit and receive buffer has 16 bytes. */ if (WARN_ON(msg->size > AUX_CH_BUFFER_SIZE)) return -E2BIG; /* Zero-sized messages specify address-only transactions. */ if (msg->size < 1) ctrl2 |= SP_ADDR_ONLY; else /* For non-zero-sized set the length field. */ ctrl1 |= (msg->size - 1) << SP_AUX_LENGTH_SHIFT; if ((msg->size > 0) && ((msg->request & DP_AUX_I2C_READ) == 0)) { /* When WRITE | MOT write values to data buffer */ err = regmap_bulk_write(map_dptx, SP_DP_BUF_DATA0_REG, buffer, msg->size); if (err) return err; } /* Write address and request */ err = anx_dp_aux_address(map_dptx, msg->address); if (err) return err; err = regmap_write(map_dptx, SP_DP_AUX_CH_CTRL1_REG, ctrl1); if (err) return err; /* Start transaction */ err = regmap_update_bits(map_dptx, SP_DP_AUX_CH_CTRL2_REG, SP_ADDR_ONLY | SP_AUX_EN, ctrl2); if (err) return err; err = anx_dp_aux_wait(map_dptx); if (err) return err; msg->reply = DP_AUX_I2C_REPLY_ACK; if ((msg->size > 0) && (msg->request & DP_AUX_I2C_READ)) { /* Read values from data buffer */ err = regmap_bulk_read(map_dptx, SP_DP_BUF_DATA0_REG, buffer, msg->size); if (err) return err; } err = anx_i2c_dp_clear_bits(map_dptx, SP_DP_AUX_CH_CTRL2_REG, SP_ADDR_ONLY); if (err) return err; return msg->size; } EXPORT_SYMBOL_GPL(anx_dp_aux_transfer);
linux-master
drivers/gpu/drm/bridge/analogix/analogix-i2c-dptx.c
// SPDX-License-Identifier: GPL-2.0-only /* * Copyright(c) 2016, Analogix Semiconductor. * * Based on anx7808 driver obtained from chromeos with copyright: * Copyright(c) 2013, Google Inc. */ #include <linux/delay.h> #include <linux/err.h> #include <linux/gpio/consumer.h> #include <linux/i2c.h> #include <linux/interrupt.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/of_irq.h> #include <linux/of_platform.h> #include <linux/regmap.h> #include <linux/regulator/consumer.h> #include <linux/types.h> #include <drm/display/drm_dp_helper.h> #include <drm/drm_atomic_helper.h> #include <drm/drm_bridge.h> #include <drm/drm_crtc.h> #include <drm/drm_edid.h> #include <drm/drm_print.h> #include <drm/drm_probe_helper.h> #include "analogix-anx78xx.h" #define I2C_NUM_ADDRESSES 5 #define I2C_IDX_TX_P0 0 #define I2C_IDX_TX_P1 1 #define I2C_IDX_TX_P2 2 #define I2C_IDX_RX_P0 3 #define I2C_IDX_RX_P1 4 #define XTAL_CLK 270 /* 27M */ static const u8 anx7808_i2c_addresses[] = { [I2C_IDX_TX_P0] = 0x78, [I2C_IDX_TX_P1] = 0x7a, [I2C_IDX_TX_P2] = 0x72, [I2C_IDX_RX_P0] = 0x7e, [I2C_IDX_RX_P1] = 0x80, }; static const u8 anx781x_i2c_addresses[] = { [I2C_IDX_TX_P0] = 0x70, [I2C_IDX_TX_P1] = 0x7a, [I2C_IDX_TX_P2] = 0x72, [I2C_IDX_RX_P0] = 0x7e, [I2C_IDX_RX_P1] = 0x80, }; struct anx78xx_platform_data { struct regulator *dvdd10; struct gpio_desc *gpiod_hpd; struct gpio_desc *gpiod_pd; struct gpio_desc *gpiod_reset; int hpd_irq; int intp_irq; }; struct anx78xx { struct drm_dp_aux aux; struct drm_bridge bridge; struct i2c_client *client; struct edid *edid; struct drm_connector connector; struct anx78xx_platform_data pdata; struct mutex lock; /* * I2C Slave addresses of ANX7814 are mapped as TX_P0, TX_P1, TX_P2, * RX_P0 and RX_P1. */ struct i2c_client *i2c_dummy[I2C_NUM_ADDRESSES]; struct regmap *map[I2C_NUM_ADDRESSES]; u16 chipid; u8 dpcd[DP_RECEIVER_CAP_SIZE]; bool powered; }; static inline struct anx78xx *connector_to_anx78xx(struct drm_connector *c) { return container_of(c, struct anx78xx, connector); } static inline struct anx78xx *bridge_to_anx78xx(struct drm_bridge *bridge) { return container_of(bridge, struct anx78xx, bridge); } static int anx78xx_set_bits(struct regmap *map, u8 reg, u8 mask) { return regmap_update_bits(map, reg, mask, mask); } static int anx78xx_clear_bits(struct regmap *map, u8 reg, u8 mask) { return regmap_update_bits(map, reg, mask, 0); } static ssize_t anx78xx_aux_transfer(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg) { struct anx78xx *anx78xx = container_of(aux, struct anx78xx, aux); return anx_dp_aux_transfer(anx78xx->map[I2C_IDX_TX_P0], msg); } static int anx78xx_set_hpd(struct anx78xx *anx78xx) { int err; err = anx78xx_clear_bits(anx78xx->map[I2C_IDX_RX_P0], SP_TMDS_CTRL_BASE + 7, SP_PD_RT); if (err) return err; err = anx78xx_set_bits(anx78xx->map[I2C_IDX_TX_P2], SP_VID_CTRL3_REG, SP_HPD_OUT); if (err) return err; return 0; } static int anx78xx_clear_hpd(struct anx78xx *anx78xx) { int err; err = anx78xx_clear_bits(anx78xx->map[I2C_IDX_TX_P2], SP_VID_CTRL3_REG, SP_HPD_OUT); if (err) return err; err = anx78xx_set_bits(anx78xx->map[I2C_IDX_RX_P0], SP_TMDS_CTRL_BASE + 7, SP_PD_RT); if (err) return err; return 0; } static const struct reg_sequence tmds_phy_initialization[] = { { SP_TMDS_CTRL_BASE + 1, 0x90 }, { SP_TMDS_CTRL_BASE + 2, 0xa9 }, { SP_TMDS_CTRL_BASE + 6, 0x92 }, { SP_TMDS_CTRL_BASE + 7, 0x80 }, { SP_TMDS_CTRL_BASE + 20, 0xf2 }, { SP_TMDS_CTRL_BASE + 22, 0xc4 }, { SP_TMDS_CTRL_BASE + 23, 0x18 }, }; static int anx78xx_rx_initialization(struct anx78xx *anx78xx) { int err; err = regmap_write(anx78xx->map[I2C_IDX_RX_P0], SP_HDMI_MUTE_CTRL_REG, SP_AUD_MUTE | SP_VID_MUTE); if (err) return err; err = anx78xx_set_bits(anx78xx->map[I2C_IDX_RX_P0], SP_CHIP_CTRL_REG, SP_MAN_HDMI5V_DET | SP_PLLLOCK_CKDT_EN | SP_DIGITAL_CKDT_EN); if (err) return err; err = anx78xx_set_bits(anx78xx->map[I2C_IDX_RX_P0], SP_SOFTWARE_RESET1_REG, SP_HDCP_MAN_RST | SP_SW_MAN_RST | SP_TMDS_RST | SP_VIDEO_RST); if (err) return err; err = anx78xx_clear_bits(anx78xx->map[I2C_IDX_RX_P0], SP_SOFTWARE_RESET1_REG, SP_HDCP_MAN_RST | SP_SW_MAN_RST | SP_TMDS_RST | SP_VIDEO_RST); if (err) return err; /* Sync detect change, GP set mute */ err = anx78xx_set_bits(anx78xx->map[I2C_IDX_RX_P0], SP_AUD_EXCEPTION_ENABLE_BASE + 1, BIT(5) | BIT(6)); if (err) return err; err = anx78xx_set_bits(anx78xx->map[I2C_IDX_RX_P0], SP_AUD_EXCEPTION_ENABLE_BASE + 3, SP_AEC_EN21); if (err) return err; err = anx78xx_set_bits(anx78xx->map[I2C_IDX_RX_P0], SP_AUDVID_CTRL_REG, SP_AVC_EN | SP_AAC_OE | SP_AAC_EN); if (err) return err; err = anx78xx_clear_bits(anx78xx->map[I2C_IDX_RX_P0], SP_SYSTEM_POWER_DOWN1_REG, SP_PWDN_CTRL); if (err) return err; err = anx78xx_set_bits(anx78xx->map[I2C_IDX_RX_P0], SP_VID_DATA_RANGE_CTRL_REG, SP_R2Y_INPUT_LIMIT); if (err) return err; /* Enable DDC stretch */ err = regmap_write(anx78xx->map[I2C_IDX_TX_P0], SP_DP_EXTRA_I2C_DEV_ADDR_REG, SP_I2C_EXTRA_ADDR); if (err) return err; /* TMDS phy initialization */ err = regmap_multi_reg_write(anx78xx->map[I2C_IDX_RX_P0], tmds_phy_initialization, ARRAY_SIZE(tmds_phy_initialization)); if (err) return err; err = anx78xx_clear_hpd(anx78xx); if (err) return err; return 0; } static const u8 dp_tx_output_precise_tune_bits[20] = { 0x01, 0x03, 0x07, 0x7f, 0x71, 0x6b, 0x7f, 0x73, 0x7f, 0x7f, 0x00, 0x00, 0x00, 0x00, 0x0c, 0x42, 0x1e, 0x3e, 0x72, 0x7e, }; static int anx78xx_link_phy_initialization(struct anx78xx *anx78xx) { int err; /* * REVISIT : It is writing to a RESERVED bits in Analog Control 0 * register. */ err = regmap_write(anx78xx->map[I2C_IDX_TX_P2], SP_ANALOG_CTRL0_REG, 0x02); if (err) return err; /* * Write DP TX output emphasis precise tune bits. */ err = regmap_bulk_write(anx78xx->map[I2C_IDX_TX_P1], SP_DP_TX_LT_CTRL0_REG, dp_tx_output_precise_tune_bits, ARRAY_SIZE(dp_tx_output_precise_tune_bits)); if (err) return err; return 0; } static int anx78xx_xtal_clk_sel(struct anx78xx *anx78xx) { unsigned int value; int err; err = regmap_update_bits(anx78xx->map[I2C_IDX_TX_P2], SP_ANALOG_DEBUG2_REG, SP_XTAL_FRQ | SP_FORCE_SW_OFF_BYPASS, SP_XTAL_FRQ_27M); if (err) return err; err = regmap_write(anx78xx->map[I2C_IDX_TX_P0], SP_DP_AUX_CH_CTRL3_REG, XTAL_CLK & SP_WAIT_COUNTER_7_0_MASK); if (err) return err; err = regmap_write(anx78xx->map[I2C_IDX_TX_P0], SP_DP_AUX_CH_CTRL4_REG, ((XTAL_CLK & 0xff00) >> 2) | (XTAL_CLK / 10)); if (err) return err; err = regmap_write(anx78xx->map[I2C_IDX_TX_P0], SP_I2C_GEN_10US_TIMER0_REG, XTAL_CLK & 0xff); if (err) return err; err = regmap_write(anx78xx->map[I2C_IDX_TX_P0], SP_I2C_GEN_10US_TIMER1_REG, (XTAL_CLK & 0xff00) >> 8); if (err) return err; err = regmap_write(anx78xx->map[I2C_IDX_TX_P0], SP_AUX_MISC_CTRL_REG, XTAL_CLK / 10 - 1); if (err) return err; err = regmap_read(anx78xx->map[I2C_IDX_RX_P0], SP_HDMI_US_TIMER_CTRL_REG, &value); if (err) return err; err = regmap_write(anx78xx->map[I2C_IDX_RX_P0], SP_HDMI_US_TIMER_CTRL_REG, (value & SP_MS_TIMER_MARGIN_10_8_MASK) | ((((XTAL_CLK / 10) >> 1) - 2) << 3)); if (err) return err; return 0; } static const struct reg_sequence otp_key_protect[] = { { SP_OTP_KEY_PROTECT1_REG, SP_OTP_PSW1 }, { SP_OTP_KEY_PROTECT2_REG, SP_OTP_PSW2 }, { SP_OTP_KEY_PROTECT3_REG, SP_OTP_PSW3 }, }; static int anx78xx_tx_initialization(struct anx78xx *anx78xx) { int err; /* Set terminal resistor to 50 ohm */ err = regmap_write(anx78xx->map[I2C_IDX_TX_P0], SP_DP_AUX_CH_CTRL2_REG, 0x30); if (err) return err; /* Enable aux double diff output */ err = anx78xx_set_bits(anx78xx->map[I2C_IDX_TX_P0], SP_DP_AUX_CH_CTRL2_REG, 0x08); if (err) return err; err = anx78xx_clear_bits(anx78xx->map[I2C_IDX_TX_P0], SP_DP_HDCP_CTRL_REG, SP_AUTO_EN | SP_AUTO_START); if (err) return err; err = regmap_multi_reg_write(anx78xx->map[I2C_IDX_TX_P0], otp_key_protect, ARRAY_SIZE(otp_key_protect)); if (err) return err; err = anx78xx_set_bits(anx78xx->map[I2C_IDX_TX_P0], SP_HDCP_KEY_COMMAND_REG, SP_DISABLE_SYNC_HDCP); if (err) return err; err = regmap_write(anx78xx->map[I2C_IDX_TX_P2], SP_VID_CTRL8_REG, SP_VID_VRES_TH); if (err) return err; /* * DP HDCP auto authentication wait timer (when downstream starts to * auth, DP side will wait for this period then do auth automatically) */ err = regmap_write(anx78xx->map[I2C_IDX_TX_P0], SP_HDCP_AUTO_TIMER_REG, 0x00); if (err) return err; err = anx78xx_set_bits(anx78xx->map[I2C_IDX_TX_P0], SP_DP_HDCP_CTRL_REG, SP_LINK_POLLING); if (err) return err; err = anx78xx_set_bits(anx78xx->map[I2C_IDX_TX_P0], SP_DP_LINK_DEBUG_CTRL_REG, SP_M_VID_DEBUG); if (err) return err; err = anx78xx_set_bits(anx78xx->map[I2C_IDX_TX_P2], SP_ANALOG_DEBUG2_REG, SP_POWERON_TIME_1P5MS); if (err) return err; err = anx78xx_xtal_clk_sel(anx78xx); if (err) return err; err = regmap_write(anx78xx->map[I2C_IDX_TX_P0], SP_AUX_DEFER_CTRL_REG, SP_DEFER_CTRL_EN | 0x0c); if (err) return err; err = anx78xx_set_bits(anx78xx->map[I2C_IDX_TX_P0], SP_DP_POLLING_CTRL_REG, SP_AUTO_POLLING_DISABLE); if (err) return err; /* * Short the link integrity check timer to speed up bstatus * polling for HDCP CTS item 1A-07 */ err = regmap_write(anx78xx->map[I2C_IDX_TX_P0], SP_HDCP_LINK_CHECK_TIMER_REG, 0x1d); if (err) return err; err = anx78xx_set_bits(anx78xx->map[I2C_IDX_TX_P0], SP_DP_MISC_CTRL_REG, SP_EQ_TRAINING_LOOP); if (err) return err; /* Power down the main link by default */ err = anx78xx_set_bits(anx78xx->map[I2C_IDX_TX_P0], SP_DP_ANALOG_POWER_DOWN_REG, SP_CH0_PD); if (err) return err; err = anx78xx_link_phy_initialization(anx78xx); if (err) return err; /* Gen m_clk with downspreading */ err = anx78xx_set_bits(anx78xx->map[I2C_IDX_TX_P0], SP_DP_M_CALCULATION_CTRL_REG, SP_M_GEN_CLK_SEL); if (err) return err; return 0; } static int anx78xx_enable_interrupts(struct anx78xx *anx78xx) { int err; /* * BIT0: INT pin assertion polarity: 1 = assert high * BIT1: INT pin output type: 0 = push/pull */ err = regmap_write(anx78xx->map[I2C_IDX_TX_P2], SP_INT_CTRL_REG, 0x01); if (err) return err; err = regmap_write(anx78xx->map[I2C_IDX_TX_P2], SP_COMMON_INT_MASK4_REG, SP_HPD_LOST | SP_HPD_PLUG); if (err) return err; err = regmap_write(anx78xx->map[I2C_IDX_TX_P2], SP_DP_INT_MASK1_REG, SP_TRAINING_FINISH); if (err) return err; err = regmap_write(anx78xx->map[I2C_IDX_RX_P0], SP_INT_MASK1_REG, SP_CKDT_CHG | SP_SCDT_CHG); if (err) return err; return 0; } static void anx78xx_poweron(struct anx78xx *anx78xx) { struct anx78xx_platform_data *pdata = &anx78xx->pdata; int err; if (WARN_ON(anx78xx->powered)) return; if (pdata->dvdd10) { err = regulator_enable(pdata->dvdd10); if (err) { DRM_ERROR("Failed to enable DVDD10 regulator: %d\n", err); return; } usleep_range(1000, 2000); } gpiod_set_value_cansleep(pdata->gpiod_reset, 1); usleep_range(1000, 2000); gpiod_set_value_cansleep(pdata->gpiod_pd, 0); usleep_range(1000, 2000); gpiod_set_value_cansleep(pdata->gpiod_reset, 0); /* Power on registers module */ anx78xx_set_bits(anx78xx->map[I2C_IDX_TX_P2], SP_POWERDOWN_CTRL_REG, SP_HDCP_PD | SP_AUDIO_PD | SP_VIDEO_PD | SP_LINK_PD); anx78xx_clear_bits(anx78xx->map[I2C_IDX_TX_P2], SP_POWERDOWN_CTRL_REG, SP_REGISTER_PD | SP_TOTAL_PD); anx78xx->powered = true; } static void anx78xx_poweroff(struct anx78xx *anx78xx) { struct anx78xx_platform_data *pdata = &anx78xx->pdata; int err; if (WARN_ON(!anx78xx->powered)) return; gpiod_set_value_cansleep(pdata->gpiod_reset, 1); usleep_range(1000, 2000); gpiod_set_value_cansleep(pdata->gpiod_pd, 1); usleep_range(1000, 2000); if (pdata->dvdd10) { err = regulator_disable(pdata->dvdd10); if (err) { DRM_ERROR("Failed to disable DVDD10 regulator: %d\n", err); return; } usleep_range(1000, 2000); } anx78xx->powered = false; } static int anx78xx_start(struct anx78xx *anx78xx) { int err; /* Power on all modules */ err = anx78xx_clear_bits(anx78xx->map[I2C_IDX_TX_P2], SP_POWERDOWN_CTRL_REG, SP_HDCP_PD | SP_AUDIO_PD | SP_VIDEO_PD | SP_LINK_PD); err = anx78xx_enable_interrupts(anx78xx); if (err) { DRM_ERROR("Failed to enable interrupts: %d\n", err); goto err_poweroff; } err = anx78xx_rx_initialization(anx78xx); if (err) { DRM_ERROR("Failed receiver initialization: %d\n", err); goto err_poweroff; } err = anx78xx_tx_initialization(anx78xx); if (err) { DRM_ERROR("Failed transmitter initialization: %d\n", err); goto err_poweroff; } /* * This delay seems to help keep the hardware in a good state. Without * it, there are times where it fails silently. */ usleep_range(10000, 15000); return 0; err_poweroff: DRM_ERROR("Failed SlimPort transmitter initialization: %d\n", err); anx78xx_poweroff(anx78xx); return err; } static int anx78xx_init_pdata(struct anx78xx *anx78xx) { struct anx78xx_platform_data *pdata = &anx78xx->pdata; struct device *dev = &anx78xx->client->dev; /* 1.0V digital core power regulator */ pdata->dvdd10 = devm_regulator_get(dev, "dvdd10"); if (IS_ERR(pdata->dvdd10)) { if (PTR_ERR(pdata->dvdd10) != -EPROBE_DEFER) DRM_ERROR("DVDD10 regulator not found\n"); return PTR_ERR(pdata->dvdd10); } /* GPIO for HPD */ pdata->gpiod_hpd = devm_gpiod_get(dev, "hpd", GPIOD_IN); if (IS_ERR(pdata->gpiod_hpd)) return PTR_ERR(pdata->gpiod_hpd); /* GPIO for chip power down */ pdata->gpiod_pd = devm_gpiod_get(dev, "pd", GPIOD_OUT_HIGH); if (IS_ERR(pdata->gpiod_pd)) return PTR_ERR(pdata->gpiod_pd); /* GPIO for chip reset */ pdata->gpiod_reset = devm_gpiod_get(dev, "reset", GPIOD_OUT_LOW); return PTR_ERR_OR_ZERO(pdata->gpiod_reset); } static int anx78xx_dp_link_training(struct anx78xx *anx78xx) { u8 dp_bw, dpcd[2]; int err; err = regmap_write(anx78xx->map[I2C_IDX_RX_P0], SP_HDMI_MUTE_CTRL_REG, 0x0); if (err) return err; err = anx78xx_clear_bits(anx78xx->map[I2C_IDX_TX_P2], SP_POWERDOWN_CTRL_REG, SP_TOTAL_PD); if (err) return err; err = drm_dp_dpcd_readb(&anx78xx->aux, DP_MAX_LINK_RATE, &dp_bw); if (err < 0) return err; switch (dp_bw) { case DP_LINK_BW_1_62: case DP_LINK_BW_2_7: case DP_LINK_BW_5_4: break; default: DRM_DEBUG_KMS("DP bandwidth (%#02x) not supported\n", dp_bw); return -EINVAL; } err = anx78xx_set_bits(anx78xx->map[I2C_IDX_TX_P2], SP_VID_CTRL1_REG, SP_VIDEO_MUTE); if (err) return err; err = anx78xx_clear_bits(anx78xx->map[I2C_IDX_TX_P2], SP_VID_CTRL1_REG, SP_VIDEO_EN); if (err) return err; /* Get DPCD info */ err = drm_dp_dpcd_read(&anx78xx->aux, DP_DPCD_REV, &anx78xx->dpcd, DP_RECEIVER_CAP_SIZE); if (err < 0) { DRM_ERROR("Failed to read DPCD: %d\n", err); return err; } /* Clear channel x SERDES power down */ err = anx78xx_clear_bits(anx78xx->map[I2C_IDX_TX_P0], SP_DP_ANALOG_POWER_DOWN_REG, SP_CH0_PD); if (err) return err; /* * Power up the sink (DP_SET_POWER register is only available on DPCD * v1.1 and later). */ if (anx78xx->dpcd[DP_DPCD_REV] >= 0x11) { err = drm_dp_dpcd_readb(&anx78xx->aux, DP_SET_POWER, &dpcd[0]); if (err < 0) { DRM_ERROR("Failed to read DP_SET_POWER register: %d\n", err); return err; } dpcd[0] &= ~DP_SET_POWER_MASK; dpcd[0] |= DP_SET_POWER_D0; err = drm_dp_dpcd_writeb(&anx78xx->aux, DP_SET_POWER, dpcd[0]); if (err < 0) { DRM_ERROR("Failed to power up DisplayPort link: %d\n", err); return err; } /* * According to the DP 1.1 specification, a "Sink Device must * exit the power saving state within 1 ms" (Section 2.5.3.1, * Table 5-52, "Sink Control Field" (register 0x600). */ usleep_range(1000, 2000); } /* Possibly enable downspread on the sink */ err = regmap_write(anx78xx->map[I2C_IDX_TX_P0], SP_DP_DOWNSPREAD_CTRL1_REG, 0); if (err) return err; if (anx78xx->dpcd[DP_MAX_DOWNSPREAD] & DP_MAX_DOWNSPREAD_0_5) { DRM_DEBUG("Enable downspread on the sink\n"); /* 4000PPM */ err = regmap_write(anx78xx->map[I2C_IDX_TX_P0], SP_DP_DOWNSPREAD_CTRL1_REG, 8); if (err) return err; err = drm_dp_dpcd_writeb(&anx78xx->aux, DP_DOWNSPREAD_CTRL, DP_SPREAD_AMP_0_5); if (err < 0) return err; } else { err = drm_dp_dpcd_writeb(&anx78xx->aux, DP_DOWNSPREAD_CTRL, 0); if (err < 0) return err; } /* Set the lane count and the link rate on the sink */ if (drm_dp_enhanced_frame_cap(anx78xx->dpcd)) err = anx78xx_set_bits(anx78xx->map[I2C_IDX_TX_P0], SP_DP_SYSTEM_CTRL_BASE + 4, SP_ENHANCED_MODE); else err = anx78xx_clear_bits(anx78xx->map[I2C_IDX_TX_P0], SP_DP_SYSTEM_CTRL_BASE + 4, SP_ENHANCED_MODE); if (err) return err; err = regmap_write(anx78xx->map[I2C_IDX_TX_P0], SP_DP_MAIN_LINK_BW_SET_REG, anx78xx->dpcd[DP_MAX_LINK_RATE]); if (err) return err; dpcd[1] = drm_dp_max_lane_count(anx78xx->dpcd); if (drm_dp_enhanced_frame_cap(anx78xx->dpcd)) dpcd[1] |= DP_LANE_COUNT_ENHANCED_FRAME_EN; err = drm_dp_dpcd_write(&anx78xx->aux, DP_LINK_BW_SET, dpcd, sizeof(dpcd)); if (err < 0) { DRM_ERROR("Failed to configure link: %d\n", err); return err; } /* Start training on the source */ err = regmap_write(anx78xx->map[I2C_IDX_TX_P0], SP_DP_LT_CTRL_REG, SP_LT_EN); if (err) return err; return 0; } static int anx78xx_config_dp_output(struct anx78xx *anx78xx) { int err; err = anx78xx_clear_bits(anx78xx->map[I2C_IDX_TX_P2], SP_VID_CTRL1_REG, SP_VIDEO_MUTE); if (err) return err; /* Enable DP output */ err = anx78xx_set_bits(anx78xx->map[I2C_IDX_TX_P2], SP_VID_CTRL1_REG, SP_VIDEO_EN); if (err) return err; return 0; } static int anx78xx_send_video_infoframe(struct anx78xx *anx78xx, struct hdmi_avi_infoframe *frame) { u8 buffer[HDMI_INFOFRAME_HEADER_SIZE + HDMI_AVI_INFOFRAME_SIZE]; int err; err = hdmi_avi_infoframe_pack(frame, buffer, sizeof(buffer)); if (err < 0) { DRM_ERROR("Failed to pack AVI infoframe: %d\n", err); return err; } err = anx78xx_clear_bits(anx78xx->map[I2C_IDX_TX_P0], SP_PACKET_SEND_CTRL_REG, SP_AVI_IF_EN); if (err) return err; err = regmap_bulk_write(anx78xx->map[I2C_IDX_TX_P2], SP_INFOFRAME_AVI_DB1_REG, buffer, frame->length); if (err) return err; err = anx78xx_set_bits(anx78xx->map[I2C_IDX_TX_P0], SP_PACKET_SEND_CTRL_REG, SP_AVI_IF_UD); if (err) return err; err = anx78xx_set_bits(anx78xx->map[I2C_IDX_TX_P0], SP_PACKET_SEND_CTRL_REG, SP_AVI_IF_EN); if (err) return err; return 0; } static int anx78xx_get_downstream_info(struct anx78xx *anx78xx) { u8 value; int err; err = drm_dp_dpcd_readb(&anx78xx->aux, DP_SINK_COUNT, &value); if (err < 0) { DRM_ERROR("Get sink count failed %d\n", err); return err; } if (!DP_GET_SINK_COUNT(value)) { DRM_ERROR("Downstream disconnected\n"); return -EIO; } return 0; } static int anx78xx_get_modes(struct drm_connector *connector) { struct anx78xx *anx78xx = connector_to_anx78xx(connector); int err, num_modes = 0; if (WARN_ON(!anx78xx->powered)) return 0; if (anx78xx->edid) return drm_add_edid_modes(connector, anx78xx->edid); mutex_lock(&anx78xx->lock); err = anx78xx_get_downstream_info(anx78xx); if (err) { DRM_ERROR("Failed to get downstream info: %d\n", err); goto unlock; } anx78xx->edid = drm_get_edid(connector, &anx78xx->aux.ddc); if (!anx78xx->edid) { DRM_ERROR("Failed to read EDID\n"); goto unlock; } err = drm_connector_update_edid_property(connector, anx78xx->edid); if (err) { DRM_ERROR("Failed to update EDID property: %d\n", err); goto unlock; } num_modes = drm_add_edid_modes(connector, anx78xx->edid); unlock: mutex_unlock(&anx78xx->lock); return num_modes; } static const struct drm_connector_helper_funcs anx78xx_connector_helper_funcs = { .get_modes = anx78xx_get_modes, }; static enum drm_connector_status anx78xx_detect(struct drm_connector *connector, bool force) { struct anx78xx *anx78xx = connector_to_anx78xx(connector); if (!gpiod_get_value(anx78xx->pdata.gpiod_hpd)) return connector_status_disconnected; return connector_status_connected; } static const struct drm_connector_funcs anx78xx_connector_funcs = { .fill_modes = drm_helper_probe_single_connector_modes, .detect = anx78xx_detect, .destroy = drm_connector_cleanup, .reset = drm_atomic_helper_connector_reset, .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state, .atomic_destroy_state = drm_atomic_helper_connector_destroy_state, }; static int anx78xx_bridge_attach(struct drm_bridge *bridge, enum drm_bridge_attach_flags flags) { struct anx78xx *anx78xx = bridge_to_anx78xx(bridge); int err; if (flags & DRM_BRIDGE_ATTACH_NO_CONNECTOR) { DRM_ERROR("Fix bridge driver to make connector optional!"); return -EINVAL; } if (!bridge->encoder) { DRM_ERROR("Parent encoder object not found"); return -ENODEV; } /* Register aux channel */ anx78xx->aux.name = "DP-AUX"; anx78xx->aux.dev = &anx78xx->client->dev; anx78xx->aux.drm_dev = bridge->dev; anx78xx->aux.transfer = anx78xx_aux_transfer; err = drm_dp_aux_register(&anx78xx->aux); if (err < 0) { DRM_ERROR("Failed to register aux channel: %d\n", err); return err; } err = drm_connector_init(bridge->dev, &anx78xx->connector, &anx78xx_connector_funcs, DRM_MODE_CONNECTOR_DisplayPort); if (err) { DRM_ERROR("Failed to initialize connector: %d\n", err); goto aux_unregister; } drm_connector_helper_add(&anx78xx->connector, &anx78xx_connector_helper_funcs); anx78xx->connector.polled = DRM_CONNECTOR_POLL_HPD; err = drm_connector_attach_encoder(&anx78xx->connector, bridge->encoder); if (err) { DRM_ERROR("Failed to link up connector to encoder: %d\n", err); goto connector_cleanup; } err = drm_connector_register(&anx78xx->connector); if (err) { DRM_ERROR("Failed to register connector: %d\n", err); goto connector_cleanup; } return 0; connector_cleanup: drm_connector_cleanup(&anx78xx->connector); aux_unregister: drm_dp_aux_unregister(&anx78xx->aux); return err; } static void anx78xx_bridge_detach(struct drm_bridge *bridge) { drm_dp_aux_unregister(&bridge_to_anx78xx(bridge)->aux); } static enum drm_mode_status anx78xx_bridge_mode_valid(struct drm_bridge *bridge, const struct drm_display_info *info, const struct drm_display_mode *mode) { if (mode->flags & DRM_MODE_FLAG_INTERLACE) return MODE_NO_INTERLACE; /* Max 1200p at 5.4 Ghz, one lane */ if (mode->clock > 154000) return MODE_CLOCK_HIGH; return MODE_OK; } static void anx78xx_bridge_disable(struct drm_bridge *bridge) { struct anx78xx *anx78xx = bridge_to_anx78xx(bridge); /* Power off all modules except configuration registers access */ anx78xx_set_bits(anx78xx->map[I2C_IDX_TX_P2], SP_POWERDOWN_CTRL_REG, SP_HDCP_PD | SP_AUDIO_PD | SP_VIDEO_PD | SP_LINK_PD); } static void anx78xx_bridge_mode_set(struct drm_bridge *bridge, const struct drm_display_mode *mode, const struct drm_display_mode *adjusted_mode) { struct anx78xx *anx78xx = bridge_to_anx78xx(bridge); struct hdmi_avi_infoframe frame; int err; if (WARN_ON(!anx78xx->powered)) return; mutex_lock(&anx78xx->lock); err = drm_hdmi_avi_infoframe_from_display_mode(&frame, &anx78xx->connector, adjusted_mode); if (err) { DRM_ERROR("Failed to setup AVI infoframe: %d\n", err); goto unlock; } err = anx78xx_send_video_infoframe(anx78xx, &frame); if (err) DRM_ERROR("Failed to send AVI infoframe: %d\n", err); unlock: mutex_unlock(&anx78xx->lock); } static void anx78xx_bridge_enable(struct drm_bridge *bridge) { struct anx78xx *anx78xx = bridge_to_anx78xx(bridge); int err; err = anx78xx_start(anx78xx); if (err) { DRM_ERROR("Failed to initialize: %d\n", err); return; } err = anx78xx_set_hpd(anx78xx); if (err) DRM_ERROR("Failed to set HPD: %d\n", err); } static const struct drm_bridge_funcs anx78xx_bridge_funcs = { .attach = anx78xx_bridge_attach, .detach = anx78xx_bridge_detach, .mode_valid = anx78xx_bridge_mode_valid, .disable = anx78xx_bridge_disable, .mode_set = anx78xx_bridge_mode_set, .enable = anx78xx_bridge_enable, }; static irqreturn_t anx78xx_hpd_threaded_handler(int irq, void *data) { struct anx78xx *anx78xx = data; int err; if (anx78xx->powered) return IRQ_HANDLED; mutex_lock(&anx78xx->lock); /* Cable is pulled, power on the chip */ anx78xx_poweron(anx78xx); err = anx78xx_enable_interrupts(anx78xx); if (err) DRM_ERROR("Failed to enable interrupts: %d\n", err); mutex_unlock(&anx78xx->lock); return IRQ_HANDLED; } static int anx78xx_handle_dp_int_1(struct anx78xx *anx78xx, u8 irq) { int err; DRM_DEBUG_KMS("Handle DP interrupt 1: %02x\n", irq); err = regmap_write(anx78xx->map[I2C_IDX_TX_P2], SP_DP_INT_STATUS1_REG, irq); if (err) return err; if (irq & SP_TRAINING_FINISH) { DRM_DEBUG_KMS("IRQ: hardware link training finished\n"); err = anx78xx_config_dp_output(anx78xx); } return err; } static bool anx78xx_handle_common_int_4(struct anx78xx *anx78xx, u8 irq) { bool event = false; int err; DRM_DEBUG_KMS("Handle common interrupt 4: %02x\n", irq); err = regmap_write(anx78xx->map[I2C_IDX_TX_P2], SP_COMMON_INT_STATUS4_REG, irq); if (err) { DRM_ERROR("Failed to write SP_COMMON_INT_STATUS4 %d\n", err); return event; } if (irq & SP_HPD_LOST) { DRM_DEBUG_KMS("IRQ: Hot plug detect - cable is pulled out\n"); event = true; anx78xx_poweroff(anx78xx); /* Free cached EDID */ kfree(anx78xx->edid); anx78xx->edid = NULL; } else if (irq & SP_HPD_PLUG) { DRM_DEBUG_KMS("IRQ: Hot plug detect - cable plug\n"); event = true; } return event; } static void anx78xx_handle_hdmi_int_1(struct anx78xx *anx78xx, u8 irq) { unsigned int value; int err; DRM_DEBUG_KMS("Handle HDMI interrupt 1: %02x\n", irq); err = regmap_write(anx78xx->map[I2C_IDX_RX_P0], SP_INT_STATUS1_REG, irq); if (err) { DRM_ERROR("Write HDMI int 1 failed: %d\n", err); return; } if ((irq & SP_CKDT_CHG) || (irq & SP_SCDT_CHG)) { DRM_DEBUG_KMS("IRQ: HDMI input detected\n"); err = regmap_read(anx78xx->map[I2C_IDX_RX_P0], SP_SYSTEM_STATUS_REG, &value); if (err) { DRM_ERROR("Read system status reg failed: %d\n", err); return; } if (!(value & SP_TMDS_CLOCK_DET)) { DRM_DEBUG_KMS("IRQ: *** Waiting for HDMI clock ***\n"); return; } if (!(value & SP_TMDS_DE_DET)) { DRM_DEBUG_KMS("IRQ: *** Waiting for HDMI signal ***\n"); return; } err = anx78xx_dp_link_training(anx78xx); if (err) DRM_ERROR("Failed to start link training: %d\n", err); } } static irqreturn_t anx78xx_intp_threaded_handler(int unused, void *data) { struct anx78xx *anx78xx = data; bool event = false; unsigned int irq; int err; mutex_lock(&anx78xx->lock); err = regmap_read(anx78xx->map[I2C_IDX_TX_P2], SP_DP_INT_STATUS1_REG, &irq); if (err) { DRM_ERROR("Failed to read DP interrupt 1 status: %d\n", err); goto unlock; } if (irq) anx78xx_handle_dp_int_1(anx78xx, irq); err = regmap_read(anx78xx->map[I2C_IDX_TX_P2], SP_COMMON_INT_STATUS4_REG, &irq); if (err) { DRM_ERROR("Failed to read common interrupt 4 status: %d\n", err); goto unlock; } if (irq) event = anx78xx_handle_common_int_4(anx78xx, irq); /* Make sure we are still powered after handle HPD events */ if (!anx78xx->powered) goto unlock; err = regmap_read(anx78xx->map[I2C_IDX_RX_P0], SP_INT_STATUS1_REG, &irq); if (err) { DRM_ERROR("Failed to read HDMI int 1 status: %d\n", err); goto unlock; } if (irq) anx78xx_handle_hdmi_int_1(anx78xx, irq); unlock: mutex_unlock(&anx78xx->lock); if (event) drm_helper_hpd_irq_event(anx78xx->connector.dev); return IRQ_HANDLED; } static void unregister_i2c_dummy_clients(struct anx78xx *anx78xx) { unsigned int i; for (i = 0; i < ARRAY_SIZE(anx78xx->i2c_dummy); i++) i2c_unregister_device(anx78xx->i2c_dummy[i]); } static const struct regmap_config anx78xx_regmap_config = { .reg_bits = 8, .val_bits = 8, }; static const u16 anx78xx_chipid_list[] = { 0x7808, 0x7812, 0x7814, 0x7818, }; static int anx78xx_i2c_probe(struct i2c_client *client) { struct anx78xx *anx78xx; struct anx78xx_platform_data *pdata; unsigned int i, idl, idh, version; const u8 *i2c_addresses; bool found = false; int err; anx78xx = devm_kzalloc(&client->dev, sizeof(*anx78xx), GFP_KERNEL); if (!anx78xx) return -ENOMEM; pdata = &anx78xx->pdata; mutex_init(&anx78xx->lock); #if IS_ENABLED(CONFIG_OF) anx78xx->bridge.of_node = client->dev.of_node; #endif anx78xx->client = client; i2c_set_clientdata(client, anx78xx); err = anx78xx_init_pdata(anx78xx); if (err) { if (err != -EPROBE_DEFER) DRM_ERROR("Failed to initialize pdata: %d\n", err); return err; } pdata->hpd_irq = gpiod_to_irq(pdata->gpiod_hpd); if (pdata->hpd_irq < 0) { DRM_ERROR("Failed to get HPD IRQ: %d\n", pdata->hpd_irq); return -ENODEV; } pdata->intp_irq = client->irq; if (!pdata->intp_irq) { DRM_ERROR("Failed to get CABLE_DET and INTP IRQ\n"); return -ENODEV; } /* Map slave addresses of ANX7814 */ i2c_addresses = device_get_match_data(&client->dev); for (i = 0; i < I2C_NUM_ADDRESSES; i++) { struct i2c_client *i2c_dummy; i2c_dummy = i2c_new_dummy_device(client->adapter, i2c_addresses[i] >> 1); if (IS_ERR(i2c_dummy)) { err = PTR_ERR(i2c_dummy); DRM_ERROR("Failed to reserve I2C bus %02x: %d\n", i2c_addresses[i], err); goto err_unregister_i2c; } anx78xx->i2c_dummy[i] = i2c_dummy; anx78xx->map[i] = devm_regmap_init_i2c(anx78xx->i2c_dummy[i], &anx78xx_regmap_config); if (IS_ERR(anx78xx->map[i])) { err = PTR_ERR(anx78xx->map[i]); DRM_ERROR("Failed regmap initialization %02x\n", i2c_addresses[i]); goto err_unregister_i2c; } } /* Look for supported chip ID */ anx78xx_poweron(anx78xx); err = regmap_read(anx78xx->map[I2C_IDX_TX_P2], SP_DEVICE_IDL_REG, &idl); if (err) goto err_poweroff; err = regmap_read(anx78xx->map[I2C_IDX_TX_P2], SP_DEVICE_IDH_REG, &idh); if (err) goto err_poweroff; anx78xx->chipid = (u8)idl | ((u8)idh << 8); err = regmap_read(anx78xx->map[I2C_IDX_TX_P2], SP_DEVICE_VERSION_REG, &version); if (err) goto err_poweroff; for (i = 0; i < ARRAY_SIZE(anx78xx_chipid_list); i++) { if (anx78xx->chipid == anx78xx_chipid_list[i]) { DRM_INFO("Found ANX%x (ver. %d) SlimPort Transmitter\n", anx78xx->chipid, version); found = true; break; } } if (!found) { DRM_ERROR("ANX%x (ver. %d) not supported by this driver\n", anx78xx->chipid, version); err = -ENODEV; goto err_poweroff; } err = devm_request_threaded_irq(&client->dev, pdata->hpd_irq, NULL, anx78xx_hpd_threaded_handler, IRQF_TRIGGER_RISING | IRQF_ONESHOT, "anx78xx-hpd", anx78xx); if (err) { DRM_ERROR("Failed to request CABLE_DET threaded IRQ: %d\n", err); goto err_poweroff; } err = devm_request_threaded_irq(&client->dev, pdata->intp_irq, NULL, anx78xx_intp_threaded_handler, IRQF_TRIGGER_RISING | IRQF_ONESHOT, "anx78xx-intp", anx78xx); if (err) { DRM_ERROR("Failed to request INTP threaded IRQ: %d\n", err); goto err_poweroff; } anx78xx->bridge.funcs = &anx78xx_bridge_funcs; drm_bridge_add(&anx78xx->bridge); /* If cable is pulled out, just poweroff and wait for HPD event */ if (!gpiod_get_value(anx78xx->pdata.gpiod_hpd)) anx78xx_poweroff(anx78xx); return 0; err_poweroff: anx78xx_poweroff(anx78xx); err_unregister_i2c: unregister_i2c_dummy_clients(anx78xx); return err; } static void anx78xx_i2c_remove(struct i2c_client *client) { struct anx78xx *anx78xx = i2c_get_clientdata(client); drm_bridge_remove(&anx78xx->bridge); unregister_i2c_dummy_clients(anx78xx); kfree(anx78xx->edid); } static const struct i2c_device_id anx78xx_id[] = { { "anx7814", 0 }, { /* sentinel */ } }; MODULE_DEVICE_TABLE(i2c, anx78xx_id); static const struct of_device_id anx78xx_match_table[] = { { .compatible = "analogix,anx7808", .data = anx7808_i2c_addresses }, { .compatible = "analogix,anx7812", .data = anx781x_i2c_addresses }, { .compatible = "analogix,anx7814", .data = anx781x_i2c_addresses }, { .compatible = "analogix,anx7818", .data = anx781x_i2c_addresses }, { /* sentinel */ }, }; MODULE_DEVICE_TABLE(of, anx78xx_match_table); static struct i2c_driver anx78xx_driver = { .driver = { .name = "anx7814", .of_match_table = anx78xx_match_table, }, .probe = anx78xx_i2c_probe, .remove = anx78xx_i2c_remove, .id_table = anx78xx_id, }; module_i2c_driver(anx78xx_driver); MODULE_DESCRIPTION("ANX78xx SlimPort Transmitter driver"); MODULE_AUTHOR("Enric Balletbo i Serra <[email protected]>"); MODULE_LICENSE("GPL v2");
linux-master
drivers/gpu/drm/bridge/analogix/analogix-anx78xx.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * Analogix DP (Display port) core register interface driver. * * Copyright (C) 2012 Samsung Electronics Co., Ltd. * Author: Jingoo Han <[email protected]> */ #include <linux/delay.h> #include <linux/device.h> #include <linux/gpio/consumer.h> #include <linux/io.h> #include <linux/iopoll.h> #include <drm/bridge/analogix_dp.h> #include "analogix_dp_core.h" #include "analogix_dp_reg.h" #define COMMON_INT_MASK_1 0 #define COMMON_INT_MASK_2 0 #define COMMON_INT_MASK_3 0 #define COMMON_INT_MASK_4 (HOTPLUG_CHG | HPD_LOST | PLUG) #define INT_STA_MASK INT_HPD void analogix_dp_enable_video_mute(struct analogix_dp_device *dp, bool enable) { u32 reg; if (enable) { reg = readl(dp->reg_base + ANALOGIX_DP_VIDEO_CTL_1); reg |= HDCP_VIDEO_MUTE; writel(reg, dp->reg_base + ANALOGIX_DP_VIDEO_CTL_1); } else { reg = readl(dp->reg_base + ANALOGIX_DP_VIDEO_CTL_1); reg &= ~HDCP_VIDEO_MUTE; writel(reg, dp->reg_base + ANALOGIX_DP_VIDEO_CTL_1); } } void analogix_dp_stop_video(struct analogix_dp_device *dp) { u32 reg; reg = readl(dp->reg_base + ANALOGIX_DP_VIDEO_CTL_1); reg &= ~VIDEO_EN; writel(reg, dp->reg_base + ANALOGIX_DP_VIDEO_CTL_1); } void analogix_dp_lane_swap(struct analogix_dp_device *dp, bool enable) { u32 reg; if (enable) reg = LANE3_MAP_LOGIC_LANE_0 | LANE2_MAP_LOGIC_LANE_1 | LANE1_MAP_LOGIC_LANE_2 | LANE0_MAP_LOGIC_LANE_3; else reg = LANE3_MAP_LOGIC_LANE_3 | LANE2_MAP_LOGIC_LANE_2 | LANE1_MAP_LOGIC_LANE_1 | LANE0_MAP_LOGIC_LANE_0; writel(reg, dp->reg_base + ANALOGIX_DP_LANE_MAP); } void analogix_dp_init_analog_param(struct analogix_dp_device *dp) { u32 reg; reg = TX_TERMINAL_CTRL_50_OHM; writel(reg, dp->reg_base + ANALOGIX_DP_ANALOG_CTL_1); reg = SEL_24M | TX_DVDD_BIT_1_0625V; writel(reg, dp->reg_base + ANALOGIX_DP_ANALOG_CTL_2); if (dp->plat_data && is_rockchip(dp->plat_data->dev_type)) { reg = REF_CLK_24M; if (dp->plat_data->dev_type == RK3288_DP) reg ^= REF_CLK_MASK; writel(reg, dp->reg_base + ANALOGIX_DP_PLL_REG_1); writel(0x95, dp->reg_base + ANALOGIX_DP_PLL_REG_2); writel(0x40, dp->reg_base + ANALOGIX_DP_PLL_REG_3); writel(0x58, dp->reg_base + ANALOGIX_DP_PLL_REG_4); writel(0x22, dp->reg_base + ANALOGIX_DP_PLL_REG_5); } reg = DRIVE_DVDD_BIT_1_0625V | VCO_BIT_600_MICRO; writel(reg, dp->reg_base + ANALOGIX_DP_ANALOG_CTL_3); reg = PD_RING_OSC | AUX_TERMINAL_CTRL_50_OHM | TX_CUR1_2X | TX_CUR_16_MA; writel(reg, dp->reg_base + ANALOGIX_DP_PLL_FILTER_CTL_1); reg = CH3_AMP_400_MV | CH2_AMP_400_MV | CH1_AMP_400_MV | CH0_AMP_400_MV; writel(reg, dp->reg_base + ANALOGIX_DP_TX_AMP_TUNING_CTL); } void analogix_dp_init_interrupt(struct analogix_dp_device *dp) { /* Set interrupt pin assertion polarity as high */ writel(INT_POL1 | INT_POL0, dp->reg_base + ANALOGIX_DP_INT_CTL); /* Clear pending regisers */ writel(0xff, dp->reg_base + ANALOGIX_DP_COMMON_INT_STA_1); writel(0x4f, dp->reg_base + ANALOGIX_DP_COMMON_INT_STA_2); writel(0xe0, dp->reg_base + ANALOGIX_DP_COMMON_INT_STA_3); writel(0xe7, dp->reg_base + ANALOGIX_DP_COMMON_INT_STA_4); writel(0x63, dp->reg_base + ANALOGIX_DP_INT_STA); /* 0:mask,1: unmask */ writel(0x00, dp->reg_base + ANALOGIX_DP_COMMON_INT_MASK_1); writel(0x00, dp->reg_base + ANALOGIX_DP_COMMON_INT_MASK_2); writel(0x00, dp->reg_base + ANALOGIX_DP_COMMON_INT_MASK_3); writel(0x00, dp->reg_base + ANALOGIX_DP_COMMON_INT_MASK_4); writel(0x00, dp->reg_base + ANALOGIX_DP_INT_STA_MASK); } void analogix_dp_reset(struct analogix_dp_device *dp) { u32 reg; analogix_dp_stop_video(dp); analogix_dp_enable_video_mute(dp, 0); if (dp->plat_data && is_rockchip(dp->plat_data->dev_type)) reg = RK_VID_CAP_FUNC_EN_N | RK_VID_FIFO_FUNC_EN_N | SW_FUNC_EN_N; else reg = MASTER_VID_FUNC_EN_N | SLAVE_VID_FUNC_EN_N | AUD_FIFO_FUNC_EN_N | AUD_FUNC_EN_N | HDCP_FUNC_EN_N | SW_FUNC_EN_N; writel(reg, dp->reg_base + ANALOGIX_DP_FUNC_EN_1); reg = SSC_FUNC_EN_N | AUX_FUNC_EN_N | SERDES_FIFO_FUNC_EN_N | LS_CLK_DOMAIN_FUNC_EN_N; writel(reg, dp->reg_base + ANALOGIX_DP_FUNC_EN_2); usleep_range(20, 30); analogix_dp_lane_swap(dp, 0); writel(0x0, dp->reg_base + ANALOGIX_DP_SYS_CTL_1); writel(0x40, dp->reg_base + ANALOGIX_DP_SYS_CTL_2); writel(0x0, dp->reg_base + ANALOGIX_DP_SYS_CTL_3); writel(0x0, dp->reg_base + ANALOGIX_DP_SYS_CTL_4); writel(0x0, dp->reg_base + ANALOGIX_DP_PKT_SEND_CTL); writel(0x0, dp->reg_base + ANALOGIX_DP_HDCP_CTL); writel(0x5e, dp->reg_base + ANALOGIX_DP_HPD_DEGLITCH_L); writel(0x1a, dp->reg_base + ANALOGIX_DP_HPD_DEGLITCH_H); writel(0x10, dp->reg_base + ANALOGIX_DP_LINK_DEBUG_CTL); writel(0x0, dp->reg_base + ANALOGIX_DP_PHY_TEST); writel(0x0, dp->reg_base + ANALOGIX_DP_VIDEO_FIFO_THRD); writel(0x20, dp->reg_base + ANALOGIX_DP_AUDIO_MARGIN); writel(0x4, dp->reg_base + ANALOGIX_DP_M_VID_GEN_FILTER_TH); writel(0x2, dp->reg_base + ANALOGIX_DP_M_AUD_GEN_FILTER_TH); writel(0x00000101, dp->reg_base + ANALOGIX_DP_SOC_GENERAL_CTL); } void analogix_dp_swreset(struct analogix_dp_device *dp) { writel(RESET_DP_TX, dp->reg_base + ANALOGIX_DP_TX_SW_RESET); } void analogix_dp_config_interrupt(struct analogix_dp_device *dp) { u32 reg; /* 0: mask, 1: unmask */ reg = COMMON_INT_MASK_1; writel(reg, dp->reg_base + ANALOGIX_DP_COMMON_INT_MASK_1); reg = COMMON_INT_MASK_2; writel(reg, dp->reg_base + ANALOGIX_DP_COMMON_INT_MASK_2); reg = COMMON_INT_MASK_3; writel(reg, dp->reg_base + ANALOGIX_DP_COMMON_INT_MASK_3); reg = COMMON_INT_MASK_4; writel(reg, dp->reg_base + ANALOGIX_DP_COMMON_INT_MASK_4); reg = INT_STA_MASK; writel(reg, dp->reg_base + ANALOGIX_DP_INT_STA_MASK); } void analogix_dp_mute_hpd_interrupt(struct analogix_dp_device *dp) { u32 reg; /* 0: mask, 1: unmask */ reg = readl(dp->reg_base + ANALOGIX_DP_COMMON_INT_MASK_4); reg &= ~COMMON_INT_MASK_4; writel(reg, dp->reg_base + ANALOGIX_DP_COMMON_INT_MASK_4); reg = readl(dp->reg_base + ANALOGIX_DP_INT_STA_MASK); reg &= ~INT_STA_MASK; writel(reg, dp->reg_base + ANALOGIX_DP_INT_STA_MASK); } void analogix_dp_unmute_hpd_interrupt(struct analogix_dp_device *dp) { u32 reg; /* 0: mask, 1: unmask */ reg = COMMON_INT_MASK_4; writel(reg, dp->reg_base + ANALOGIX_DP_COMMON_INT_MASK_4); reg = INT_STA_MASK; writel(reg, dp->reg_base + ANALOGIX_DP_INT_STA_MASK); } enum pll_status analogix_dp_get_pll_lock_status(struct analogix_dp_device *dp) { u32 reg; reg = readl(dp->reg_base + ANALOGIX_DP_DEBUG_CTL); if (reg & PLL_LOCK) return PLL_LOCKED; else return PLL_UNLOCKED; } void analogix_dp_set_pll_power_down(struct analogix_dp_device *dp, bool enable) { u32 reg; u32 mask = DP_PLL_PD; u32 pd_addr = ANALOGIX_DP_PLL_CTL; if (dp->plat_data && is_rockchip(dp->plat_data->dev_type)) { pd_addr = ANALOGIX_DP_PD; mask = RK_PLL_PD; } reg = readl(dp->reg_base + pd_addr); if (enable) reg |= mask; else reg &= ~mask; writel(reg, dp->reg_base + pd_addr); } void analogix_dp_set_analog_power_down(struct analogix_dp_device *dp, enum analog_power_block block, bool enable) { u32 reg; u32 phy_pd_addr = ANALOGIX_DP_PHY_PD; u32 mask; if (dp->plat_data && is_rockchip(dp->plat_data->dev_type)) phy_pd_addr = ANALOGIX_DP_PD; switch (block) { case AUX_BLOCK: if (dp->plat_data && is_rockchip(dp->plat_data->dev_type)) mask = RK_AUX_PD; else mask = AUX_PD; reg = readl(dp->reg_base + phy_pd_addr); if (enable) reg |= mask; else reg &= ~mask; writel(reg, dp->reg_base + phy_pd_addr); break; case CH0_BLOCK: mask = CH0_PD; reg = readl(dp->reg_base + phy_pd_addr); if (enable) reg |= mask; else reg &= ~mask; writel(reg, dp->reg_base + phy_pd_addr); break; case CH1_BLOCK: mask = CH1_PD; reg = readl(dp->reg_base + phy_pd_addr); if (enable) reg |= mask; else reg &= ~mask; writel(reg, dp->reg_base + phy_pd_addr); break; case CH2_BLOCK: mask = CH2_PD; reg = readl(dp->reg_base + phy_pd_addr); if (enable) reg |= mask; else reg &= ~mask; writel(reg, dp->reg_base + phy_pd_addr); break; case CH3_BLOCK: mask = CH3_PD; reg = readl(dp->reg_base + phy_pd_addr); if (enable) reg |= mask; else reg &= ~mask; writel(reg, dp->reg_base + phy_pd_addr); break; case ANALOG_TOTAL: /* * There is no bit named DP_PHY_PD, so We used DP_INC_BG * to power off everything instead of DP_PHY_PD in * Rockchip */ if (dp->plat_data && is_rockchip(dp->plat_data->dev_type)) mask = DP_INC_BG; else mask = DP_PHY_PD; reg = readl(dp->reg_base + phy_pd_addr); if (enable) reg |= mask; else reg &= ~mask; writel(reg, dp->reg_base + phy_pd_addr); if (dp->plat_data && is_rockchip(dp->plat_data->dev_type)) usleep_range(10, 15); break; case POWER_ALL: if (enable) { reg = DP_ALL_PD; writel(reg, dp->reg_base + phy_pd_addr); } else { reg = DP_ALL_PD; writel(reg, dp->reg_base + phy_pd_addr); usleep_range(10, 15); reg &= ~DP_INC_BG; writel(reg, dp->reg_base + phy_pd_addr); usleep_range(10, 15); writel(0x00, dp->reg_base + phy_pd_addr); } break; default: break; } } int analogix_dp_init_analog_func(struct analogix_dp_device *dp) { u32 reg; int timeout_loop = 0; analogix_dp_set_analog_power_down(dp, POWER_ALL, 0); reg = PLL_LOCK_CHG; writel(reg, dp->reg_base + ANALOGIX_DP_COMMON_INT_STA_1); reg = readl(dp->reg_base + ANALOGIX_DP_DEBUG_CTL); reg &= ~(F_PLL_LOCK | PLL_LOCK_CTRL); writel(reg, dp->reg_base + ANALOGIX_DP_DEBUG_CTL); /* Power up PLL */ if (analogix_dp_get_pll_lock_status(dp) == PLL_UNLOCKED) { analogix_dp_set_pll_power_down(dp, 0); while (analogix_dp_get_pll_lock_status(dp) == PLL_UNLOCKED) { timeout_loop++; if (DP_TIMEOUT_LOOP_COUNT < timeout_loop) { dev_err(dp->dev, "failed to get pll lock status\n"); return -ETIMEDOUT; } usleep_range(10, 20); } } /* Enable Serdes FIFO function and Link symbol clock domain module */ reg = readl(dp->reg_base + ANALOGIX_DP_FUNC_EN_2); reg &= ~(SERDES_FIFO_FUNC_EN_N | LS_CLK_DOMAIN_FUNC_EN_N | AUX_FUNC_EN_N); writel(reg, dp->reg_base + ANALOGIX_DP_FUNC_EN_2); return 0; } void analogix_dp_clear_hotplug_interrupts(struct analogix_dp_device *dp) { u32 reg; if (dp->hpd_gpiod) return; reg = HOTPLUG_CHG | HPD_LOST | PLUG; writel(reg, dp->reg_base + ANALOGIX_DP_COMMON_INT_STA_4); reg = INT_HPD; writel(reg, dp->reg_base + ANALOGIX_DP_INT_STA); } void analogix_dp_init_hpd(struct analogix_dp_device *dp) { u32 reg; if (dp->hpd_gpiod) return; analogix_dp_clear_hotplug_interrupts(dp); reg = readl(dp->reg_base + ANALOGIX_DP_SYS_CTL_3); reg &= ~(F_HPD | HPD_CTRL); writel(reg, dp->reg_base + ANALOGIX_DP_SYS_CTL_3); } void analogix_dp_force_hpd(struct analogix_dp_device *dp) { u32 reg; reg = readl(dp->reg_base + ANALOGIX_DP_SYS_CTL_3); reg = (F_HPD | HPD_CTRL); writel(reg, dp->reg_base + ANALOGIX_DP_SYS_CTL_3); } enum dp_irq_type analogix_dp_get_irq_type(struct analogix_dp_device *dp) { u32 reg; if (dp->hpd_gpiod) { reg = gpiod_get_value(dp->hpd_gpiod); if (reg) return DP_IRQ_TYPE_HP_CABLE_IN; else return DP_IRQ_TYPE_HP_CABLE_OUT; } else { /* Parse hotplug interrupt status register */ reg = readl(dp->reg_base + ANALOGIX_DP_COMMON_INT_STA_4); if (reg & PLUG) return DP_IRQ_TYPE_HP_CABLE_IN; if (reg & HPD_LOST) return DP_IRQ_TYPE_HP_CABLE_OUT; if (reg & HOTPLUG_CHG) return DP_IRQ_TYPE_HP_CHANGE; return DP_IRQ_TYPE_UNKNOWN; } } void analogix_dp_reset_aux(struct analogix_dp_device *dp) { u32 reg; /* Disable AUX channel module */ reg = readl(dp->reg_base + ANALOGIX_DP_FUNC_EN_2); reg |= AUX_FUNC_EN_N; writel(reg, dp->reg_base + ANALOGIX_DP_FUNC_EN_2); } void analogix_dp_init_aux(struct analogix_dp_device *dp) { u32 reg; /* Clear inerrupts related to AUX channel */ reg = RPLY_RECEIV | AUX_ERR; writel(reg, dp->reg_base + ANALOGIX_DP_INT_STA); analogix_dp_set_analog_power_down(dp, AUX_BLOCK, true); usleep_range(10, 11); analogix_dp_set_analog_power_down(dp, AUX_BLOCK, false); analogix_dp_reset_aux(dp); /* AUX_BIT_PERIOD_EXPECTED_DELAY doesn't apply to Rockchip IP */ if (dp->plat_data && is_rockchip(dp->plat_data->dev_type)) reg = 0; else reg = AUX_BIT_PERIOD_EXPECTED_DELAY(3); /* Disable AUX transaction H/W retry */ reg |= AUX_HW_RETRY_COUNT_SEL(0) | AUX_HW_RETRY_INTERVAL_600_MICROSECONDS; writel(reg, dp->reg_base + ANALOGIX_DP_AUX_HW_RETRY_CTL); /* Receive AUX Channel DEFER commands equal to DEFFER_COUNT*64 */ reg = DEFER_CTRL_EN | DEFER_COUNT(1); writel(reg, dp->reg_base + ANALOGIX_DP_AUX_CH_DEFER_CTL); /* Enable AUX channel module */ reg = readl(dp->reg_base + ANALOGIX_DP_FUNC_EN_2); reg &= ~AUX_FUNC_EN_N; writel(reg, dp->reg_base + ANALOGIX_DP_FUNC_EN_2); } int analogix_dp_get_plug_in_status(struct analogix_dp_device *dp) { u32 reg; if (dp->hpd_gpiod) { if (gpiod_get_value(dp->hpd_gpiod)) return 0; } else { reg = readl(dp->reg_base + ANALOGIX_DP_SYS_CTL_3); if (reg & HPD_STATUS) return 0; } return -EINVAL; } void analogix_dp_enable_sw_function(struct analogix_dp_device *dp) { u32 reg; reg = readl(dp->reg_base + ANALOGIX_DP_FUNC_EN_1); reg &= ~SW_FUNC_EN_N; writel(reg, dp->reg_base + ANALOGIX_DP_FUNC_EN_1); } void analogix_dp_set_link_bandwidth(struct analogix_dp_device *dp, u32 bwtype) { u32 reg; reg = bwtype; if ((bwtype == DP_LINK_BW_2_7) || (bwtype == DP_LINK_BW_1_62)) writel(reg, dp->reg_base + ANALOGIX_DP_LINK_BW_SET); } void analogix_dp_get_link_bandwidth(struct analogix_dp_device *dp, u32 *bwtype) { u32 reg; reg = readl(dp->reg_base + ANALOGIX_DP_LINK_BW_SET); *bwtype = reg; } void analogix_dp_set_lane_count(struct analogix_dp_device *dp, u32 count) { u32 reg; reg = count; writel(reg, dp->reg_base + ANALOGIX_DP_LANE_COUNT_SET); } void analogix_dp_get_lane_count(struct analogix_dp_device *dp, u32 *count) { u32 reg; reg = readl(dp->reg_base + ANALOGIX_DP_LANE_COUNT_SET); *count = reg; } void analogix_dp_enable_enhanced_mode(struct analogix_dp_device *dp, bool enable) { u32 reg; if (enable) { reg = readl(dp->reg_base + ANALOGIX_DP_SYS_CTL_4); reg |= ENHANCED; writel(reg, dp->reg_base + ANALOGIX_DP_SYS_CTL_4); } else { reg = readl(dp->reg_base + ANALOGIX_DP_SYS_CTL_4); reg &= ~ENHANCED; writel(reg, dp->reg_base + ANALOGIX_DP_SYS_CTL_4); } } void analogix_dp_set_training_pattern(struct analogix_dp_device *dp, enum pattern_set pattern) { u32 reg; switch (pattern) { case PRBS7: reg = SCRAMBLING_ENABLE | LINK_QUAL_PATTERN_SET_PRBS7; writel(reg, dp->reg_base + ANALOGIX_DP_TRAINING_PTN_SET); break; case D10_2: reg = SCRAMBLING_ENABLE | LINK_QUAL_PATTERN_SET_D10_2; writel(reg, dp->reg_base + ANALOGIX_DP_TRAINING_PTN_SET); break; case TRAINING_PTN1: reg = SCRAMBLING_DISABLE | SW_TRAINING_PATTERN_SET_PTN1; writel(reg, dp->reg_base + ANALOGIX_DP_TRAINING_PTN_SET); break; case TRAINING_PTN2: reg = SCRAMBLING_DISABLE | SW_TRAINING_PATTERN_SET_PTN2; writel(reg, dp->reg_base + ANALOGIX_DP_TRAINING_PTN_SET); break; case DP_NONE: reg = SCRAMBLING_ENABLE | LINK_QUAL_PATTERN_SET_DISABLE | SW_TRAINING_PATTERN_SET_NORMAL; writel(reg, dp->reg_base + ANALOGIX_DP_TRAINING_PTN_SET); break; default: break; } } void analogix_dp_set_lane0_pre_emphasis(struct analogix_dp_device *dp, u32 level) { u32 reg; reg = readl(dp->reg_base + ANALOGIX_DP_LN0_LINK_TRAINING_CTL); reg &= ~PRE_EMPHASIS_SET_MASK; reg |= level << PRE_EMPHASIS_SET_SHIFT; writel(reg, dp->reg_base + ANALOGIX_DP_LN0_LINK_TRAINING_CTL); } void analogix_dp_set_lane1_pre_emphasis(struct analogix_dp_device *dp, u32 level) { u32 reg; reg = readl(dp->reg_base + ANALOGIX_DP_LN1_LINK_TRAINING_CTL); reg &= ~PRE_EMPHASIS_SET_MASK; reg |= level << PRE_EMPHASIS_SET_SHIFT; writel(reg, dp->reg_base + ANALOGIX_DP_LN1_LINK_TRAINING_CTL); } void analogix_dp_set_lane2_pre_emphasis(struct analogix_dp_device *dp, u32 level) { u32 reg; reg = readl(dp->reg_base + ANALOGIX_DP_LN2_LINK_TRAINING_CTL); reg &= ~PRE_EMPHASIS_SET_MASK; reg |= level << PRE_EMPHASIS_SET_SHIFT; writel(reg, dp->reg_base + ANALOGIX_DP_LN2_LINK_TRAINING_CTL); } void analogix_dp_set_lane3_pre_emphasis(struct analogix_dp_device *dp, u32 level) { u32 reg; reg = readl(dp->reg_base + ANALOGIX_DP_LN3_LINK_TRAINING_CTL); reg &= ~PRE_EMPHASIS_SET_MASK; reg |= level << PRE_EMPHASIS_SET_SHIFT; writel(reg, dp->reg_base + ANALOGIX_DP_LN3_LINK_TRAINING_CTL); } void analogix_dp_set_lane0_link_training(struct analogix_dp_device *dp, u32 training_lane) { u32 reg; reg = training_lane; writel(reg, dp->reg_base + ANALOGIX_DP_LN0_LINK_TRAINING_CTL); } void analogix_dp_set_lane1_link_training(struct analogix_dp_device *dp, u32 training_lane) { u32 reg; reg = training_lane; writel(reg, dp->reg_base + ANALOGIX_DP_LN1_LINK_TRAINING_CTL); } void analogix_dp_set_lane2_link_training(struct analogix_dp_device *dp, u32 training_lane) { u32 reg; reg = training_lane; writel(reg, dp->reg_base + ANALOGIX_DP_LN2_LINK_TRAINING_CTL); } void analogix_dp_set_lane3_link_training(struct analogix_dp_device *dp, u32 training_lane) { u32 reg; reg = training_lane; writel(reg, dp->reg_base + ANALOGIX_DP_LN3_LINK_TRAINING_CTL); } u32 analogix_dp_get_lane0_link_training(struct analogix_dp_device *dp) { return readl(dp->reg_base + ANALOGIX_DP_LN0_LINK_TRAINING_CTL); } u32 analogix_dp_get_lane1_link_training(struct analogix_dp_device *dp) { return readl(dp->reg_base + ANALOGIX_DP_LN1_LINK_TRAINING_CTL); } u32 analogix_dp_get_lane2_link_training(struct analogix_dp_device *dp) { return readl(dp->reg_base + ANALOGIX_DP_LN2_LINK_TRAINING_CTL); } u32 analogix_dp_get_lane3_link_training(struct analogix_dp_device *dp) { return readl(dp->reg_base + ANALOGIX_DP_LN3_LINK_TRAINING_CTL); } void analogix_dp_reset_macro(struct analogix_dp_device *dp) { u32 reg; reg = readl(dp->reg_base + ANALOGIX_DP_PHY_TEST); reg |= MACRO_RST; writel(reg, dp->reg_base + ANALOGIX_DP_PHY_TEST); /* 10 us is the minimum reset time. */ usleep_range(10, 20); reg &= ~MACRO_RST; writel(reg, dp->reg_base + ANALOGIX_DP_PHY_TEST); } void analogix_dp_init_video(struct analogix_dp_device *dp) { u32 reg; reg = VSYNC_DET | VID_FORMAT_CHG | VID_CLK_CHG; writel(reg, dp->reg_base + ANALOGIX_DP_COMMON_INT_STA_1); reg = 0x0; writel(reg, dp->reg_base + ANALOGIX_DP_SYS_CTL_1); reg = CHA_CRI(4) | CHA_CTRL; writel(reg, dp->reg_base + ANALOGIX_DP_SYS_CTL_2); reg = 0x0; writel(reg, dp->reg_base + ANALOGIX_DP_SYS_CTL_3); reg = VID_HRES_TH(2) | VID_VRES_TH(0); writel(reg, dp->reg_base + ANALOGIX_DP_VIDEO_CTL_8); } void analogix_dp_set_video_color_format(struct analogix_dp_device *dp) { u32 reg; /* Configure the input color depth, color space, dynamic range */ reg = (dp->video_info.dynamic_range << IN_D_RANGE_SHIFT) | (dp->video_info.color_depth << IN_BPC_SHIFT) | (dp->video_info.color_space << IN_COLOR_F_SHIFT); writel(reg, dp->reg_base + ANALOGIX_DP_VIDEO_CTL_2); /* Set Input Color YCbCr Coefficients to ITU601 or ITU709 */ reg = readl(dp->reg_base + ANALOGIX_DP_VIDEO_CTL_3); reg &= ~IN_YC_COEFFI_MASK; if (dp->video_info.ycbcr_coeff) reg |= IN_YC_COEFFI_ITU709; else reg |= IN_YC_COEFFI_ITU601; writel(reg, dp->reg_base + ANALOGIX_DP_VIDEO_CTL_3); } int analogix_dp_is_slave_video_stream_clock_on(struct analogix_dp_device *dp) { u32 reg; reg = readl(dp->reg_base + ANALOGIX_DP_SYS_CTL_1); writel(reg, dp->reg_base + ANALOGIX_DP_SYS_CTL_1); reg = readl(dp->reg_base + ANALOGIX_DP_SYS_CTL_1); if (!(reg & DET_STA)) { dev_dbg(dp->dev, "Input stream clock not detected.\n"); return -EINVAL; } reg = readl(dp->reg_base + ANALOGIX_DP_SYS_CTL_2); writel(reg, dp->reg_base + ANALOGIX_DP_SYS_CTL_2); reg = readl(dp->reg_base + ANALOGIX_DP_SYS_CTL_2); dev_dbg(dp->dev, "wait SYS_CTL_2.\n"); if (reg & CHA_STA) { dev_dbg(dp->dev, "Input stream clk is changing\n"); return -EINVAL; } return 0; } void analogix_dp_set_video_cr_mn(struct analogix_dp_device *dp, enum clock_recovery_m_value_type type, u32 m_value, u32 n_value) { u32 reg; if (type == REGISTER_M) { reg = readl(dp->reg_base + ANALOGIX_DP_SYS_CTL_4); reg |= FIX_M_VID; writel(reg, dp->reg_base + ANALOGIX_DP_SYS_CTL_4); reg = m_value & 0xff; writel(reg, dp->reg_base + ANALOGIX_DP_M_VID_0); reg = (m_value >> 8) & 0xff; writel(reg, dp->reg_base + ANALOGIX_DP_M_VID_1); reg = (m_value >> 16) & 0xff; writel(reg, dp->reg_base + ANALOGIX_DP_M_VID_2); reg = n_value & 0xff; writel(reg, dp->reg_base + ANALOGIX_DP_N_VID_0); reg = (n_value >> 8) & 0xff; writel(reg, dp->reg_base + ANALOGIX_DP_N_VID_1); reg = (n_value >> 16) & 0xff; writel(reg, dp->reg_base + ANALOGIX_DP_N_VID_2); } else { reg = readl(dp->reg_base + ANALOGIX_DP_SYS_CTL_4); reg &= ~FIX_M_VID; writel(reg, dp->reg_base + ANALOGIX_DP_SYS_CTL_4); writel(0x00, dp->reg_base + ANALOGIX_DP_N_VID_0); writel(0x80, dp->reg_base + ANALOGIX_DP_N_VID_1); writel(0x00, dp->reg_base + ANALOGIX_DP_N_VID_2); } } void analogix_dp_set_video_timing_mode(struct analogix_dp_device *dp, u32 type) { u32 reg; if (type == VIDEO_TIMING_FROM_CAPTURE) { reg = readl(dp->reg_base + ANALOGIX_DP_VIDEO_CTL_10); reg &= ~FORMAT_SEL; writel(reg, dp->reg_base + ANALOGIX_DP_VIDEO_CTL_10); } else { reg = readl(dp->reg_base + ANALOGIX_DP_VIDEO_CTL_10); reg |= FORMAT_SEL; writel(reg, dp->reg_base + ANALOGIX_DP_VIDEO_CTL_10); } } void analogix_dp_enable_video_master(struct analogix_dp_device *dp, bool enable) { u32 reg; if (enable) { reg = readl(dp->reg_base + ANALOGIX_DP_SOC_GENERAL_CTL); reg &= ~VIDEO_MODE_MASK; reg |= VIDEO_MASTER_MODE_EN | VIDEO_MODE_MASTER_MODE; writel(reg, dp->reg_base + ANALOGIX_DP_SOC_GENERAL_CTL); } else { reg = readl(dp->reg_base + ANALOGIX_DP_SOC_GENERAL_CTL); reg &= ~VIDEO_MODE_MASK; reg |= VIDEO_MODE_SLAVE_MODE; writel(reg, dp->reg_base + ANALOGIX_DP_SOC_GENERAL_CTL); } } void analogix_dp_start_video(struct analogix_dp_device *dp) { u32 reg; reg = readl(dp->reg_base + ANALOGIX_DP_VIDEO_CTL_1); reg |= VIDEO_EN; writel(reg, dp->reg_base + ANALOGIX_DP_VIDEO_CTL_1); } int analogix_dp_is_video_stream_on(struct analogix_dp_device *dp) { u32 reg; reg = readl(dp->reg_base + ANALOGIX_DP_SYS_CTL_3); writel(reg, dp->reg_base + ANALOGIX_DP_SYS_CTL_3); reg = readl(dp->reg_base + ANALOGIX_DP_SYS_CTL_3); if (!(reg & STRM_VALID)) { dev_dbg(dp->dev, "Input video stream is not detected.\n"); return -EINVAL; } return 0; } void analogix_dp_config_video_slave_mode(struct analogix_dp_device *dp) { u32 reg; reg = readl(dp->reg_base + ANALOGIX_DP_FUNC_EN_1); if (dp->plat_data && is_rockchip(dp->plat_data->dev_type)) { reg &= ~(RK_VID_CAP_FUNC_EN_N | RK_VID_FIFO_FUNC_EN_N); } else { reg &= ~(MASTER_VID_FUNC_EN_N | SLAVE_VID_FUNC_EN_N); reg |= MASTER_VID_FUNC_EN_N; } writel(reg, dp->reg_base + ANALOGIX_DP_FUNC_EN_1); reg = readl(dp->reg_base + ANALOGIX_DP_VIDEO_CTL_10); reg &= ~INTERACE_SCAN_CFG; reg |= (dp->video_info.interlaced << 2); writel(reg, dp->reg_base + ANALOGIX_DP_VIDEO_CTL_10); reg = readl(dp->reg_base + ANALOGIX_DP_VIDEO_CTL_10); reg &= ~VSYNC_POLARITY_CFG; reg |= (dp->video_info.v_sync_polarity << 1); writel(reg, dp->reg_base + ANALOGIX_DP_VIDEO_CTL_10); reg = readl(dp->reg_base + ANALOGIX_DP_VIDEO_CTL_10); reg &= ~HSYNC_POLARITY_CFG; reg |= (dp->video_info.h_sync_polarity << 0); writel(reg, dp->reg_base + ANALOGIX_DP_VIDEO_CTL_10); reg = AUDIO_MODE_SPDIF_MODE | VIDEO_MODE_SLAVE_MODE; writel(reg, dp->reg_base + ANALOGIX_DP_SOC_GENERAL_CTL); } void analogix_dp_enable_scrambling(struct analogix_dp_device *dp) { u32 reg; reg = readl(dp->reg_base + ANALOGIX_DP_TRAINING_PTN_SET); reg &= ~SCRAMBLING_DISABLE; writel(reg, dp->reg_base + ANALOGIX_DP_TRAINING_PTN_SET); } void analogix_dp_disable_scrambling(struct analogix_dp_device *dp) { u32 reg; reg = readl(dp->reg_base + ANALOGIX_DP_TRAINING_PTN_SET); reg |= SCRAMBLING_DISABLE; writel(reg, dp->reg_base + ANALOGIX_DP_TRAINING_PTN_SET); } void analogix_dp_enable_psr_crc(struct analogix_dp_device *dp) { writel(PSR_VID_CRC_ENABLE, dp->reg_base + ANALOGIX_DP_CRC_CON); } static ssize_t analogix_dp_get_psr_status(struct analogix_dp_device *dp) { ssize_t val; u8 status; val = drm_dp_dpcd_readb(&dp->aux, DP_PSR_STATUS, &status); if (val < 0) { dev_err(dp->dev, "PSR_STATUS read failed ret=%zd", val); return val; } return status; } int analogix_dp_send_psr_spd(struct analogix_dp_device *dp, struct dp_sdp *vsc, bool blocking) { unsigned int val; int ret; ssize_t psr_status; /* don't send info frame */ val = readl(dp->reg_base + ANALOGIX_DP_PKT_SEND_CTL); val &= ~IF_EN; writel(val, dp->reg_base + ANALOGIX_DP_PKT_SEND_CTL); /* configure single frame update mode */ writel(PSR_FRAME_UP_TYPE_BURST | PSR_CRC_SEL_HARDWARE, dp->reg_base + ANALOGIX_DP_PSR_FRAME_UPDATE_CTRL); /* configure VSC HB0~HB3 */ writel(vsc->sdp_header.HB0, dp->reg_base + ANALOGIX_DP_SPD_HB0); writel(vsc->sdp_header.HB1, dp->reg_base + ANALOGIX_DP_SPD_HB1); writel(vsc->sdp_header.HB2, dp->reg_base + ANALOGIX_DP_SPD_HB2); writel(vsc->sdp_header.HB3, dp->reg_base + ANALOGIX_DP_SPD_HB3); /* configure reused VSC PB0~PB3, magic number from vendor */ writel(0x00, dp->reg_base + ANALOGIX_DP_SPD_PB0); writel(0x16, dp->reg_base + ANALOGIX_DP_SPD_PB1); writel(0xCE, dp->reg_base + ANALOGIX_DP_SPD_PB2); writel(0x5D, dp->reg_base + ANALOGIX_DP_SPD_PB3); /* configure DB0 / DB1 values */ writel(vsc->db[0], dp->reg_base + ANALOGIX_DP_VSC_SHADOW_DB0); writel(vsc->db[1], dp->reg_base + ANALOGIX_DP_VSC_SHADOW_DB1); /* set reuse spd inforframe */ val = readl(dp->reg_base + ANALOGIX_DP_VIDEO_CTL_3); val |= REUSE_SPD_EN; writel(val, dp->reg_base + ANALOGIX_DP_VIDEO_CTL_3); /* mark info frame update */ val = readl(dp->reg_base + ANALOGIX_DP_PKT_SEND_CTL); val = (val | IF_UP) & ~IF_EN; writel(val, dp->reg_base + ANALOGIX_DP_PKT_SEND_CTL); /* send info frame */ val = readl(dp->reg_base + ANALOGIX_DP_PKT_SEND_CTL); val |= IF_EN; writel(val, dp->reg_base + ANALOGIX_DP_PKT_SEND_CTL); if (!blocking) return 0; /* * db[1]!=0: entering PSR, wait for fully active remote frame buffer. * db[1]==0: exiting PSR, wait for either * (a) ACTIVE_RESYNC - the sink "must display the * incoming active frames from the Source device with no visible * glitches and/or artifacts", even though timings may still be * re-synchronizing; or * (b) INACTIVE - the transition is fully complete. */ ret = readx_poll_timeout(analogix_dp_get_psr_status, dp, psr_status, psr_status >= 0 && ((vsc->db[1] && psr_status == DP_PSR_SINK_ACTIVE_RFB) || (!vsc->db[1] && (psr_status == DP_PSR_SINK_ACTIVE_RESYNC || psr_status == DP_PSR_SINK_INACTIVE))), 1500, DP_TIMEOUT_PSR_LOOP_MS * 1000); if (ret) { dev_warn(dp->dev, "Failed to apply PSR %d\n", ret); return ret; } return 0; } ssize_t analogix_dp_transfer(struct analogix_dp_device *dp, struct drm_dp_aux_msg *msg) { u32 reg; u32 status_reg; u8 *buffer = msg->buffer; unsigned int i; int num_transferred = 0; int ret; /* Buffer size of AUX CH is 16 bytes */ if (WARN_ON(msg->size > 16)) return -E2BIG; /* Clear AUX CH data buffer */ reg = BUF_CLR; writel(reg, dp->reg_base + ANALOGIX_DP_BUFFER_DATA_CTL); switch (msg->request & ~DP_AUX_I2C_MOT) { case DP_AUX_I2C_WRITE: reg = AUX_TX_COMM_WRITE | AUX_TX_COMM_I2C_TRANSACTION; if (msg->request & DP_AUX_I2C_MOT) reg |= AUX_TX_COMM_MOT; break; case DP_AUX_I2C_READ: reg = AUX_TX_COMM_READ | AUX_TX_COMM_I2C_TRANSACTION; if (msg->request & DP_AUX_I2C_MOT) reg |= AUX_TX_COMM_MOT; break; case DP_AUX_NATIVE_WRITE: reg = AUX_TX_COMM_WRITE | AUX_TX_COMM_DP_TRANSACTION; break; case DP_AUX_NATIVE_READ: reg = AUX_TX_COMM_READ | AUX_TX_COMM_DP_TRANSACTION; break; default: return -EINVAL; } reg |= AUX_LENGTH(msg->size); writel(reg, dp->reg_base + ANALOGIX_DP_AUX_CH_CTL_1); /* Select DPCD device address */ reg = AUX_ADDR_7_0(msg->address); writel(reg, dp->reg_base + ANALOGIX_DP_AUX_ADDR_7_0); reg = AUX_ADDR_15_8(msg->address); writel(reg, dp->reg_base + ANALOGIX_DP_AUX_ADDR_15_8); reg = AUX_ADDR_19_16(msg->address); writel(reg, dp->reg_base + ANALOGIX_DP_AUX_ADDR_19_16); if (!(msg->request & DP_AUX_I2C_READ)) { for (i = 0; i < msg->size; i++) { reg = buffer[i]; writel(reg, dp->reg_base + ANALOGIX_DP_BUF_DATA_0 + 4 * i); num_transferred++; } } /* Enable AUX CH operation */ reg = AUX_EN; /* Zero-sized messages specify address-only transactions. */ if (msg->size < 1) reg |= ADDR_ONLY; writel(reg, dp->reg_base + ANALOGIX_DP_AUX_CH_CTL_2); ret = readx_poll_timeout(readl, dp->reg_base + ANALOGIX_DP_AUX_CH_CTL_2, reg, !(reg & AUX_EN), 25, 500 * 1000); if (ret) { dev_err(dp->dev, "AUX CH enable timeout!\n"); goto aux_error; } /* TODO: Wait for an interrupt instead of looping? */ /* Is AUX CH command reply received? */ ret = readx_poll_timeout(readl, dp->reg_base + ANALOGIX_DP_INT_STA, reg, reg & RPLY_RECEIV, 10, 20 * 1000); if (ret) { dev_err(dp->dev, "AUX CH cmd reply timeout!\n"); goto aux_error; } /* Clear interrupt source for AUX CH command reply */ writel(RPLY_RECEIV, dp->reg_base + ANALOGIX_DP_INT_STA); /* Clear interrupt source for AUX CH access error */ reg = readl(dp->reg_base + ANALOGIX_DP_INT_STA); status_reg = readl(dp->reg_base + ANALOGIX_DP_AUX_CH_STA); if ((reg & AUX_ERR) || (status_reg & AUX_STATUS_MASK)) { writel(AUX_ERR, dp->reg_base + ANALOGIX_DP_INT_STA); dev_warn(dp->dev, "AUX CH error happened: %#x (%d)\n", status_reg & AUX_STATUS_MASK, !!(reg & AUX_ERR)); goto aux_error; } if (msg->request & DP_AUX_I2C_READ) { for (i = 0; i < msg->size; i++) { reg = readl(dp->reg_base + ANALOGIX_DP_BUF_DATA_0 + 4 * i); buffer[i] = (unsigned char)reg; num_transferred++; } } /* Check if Rx sends defer */ reg = readl(dp->reg_base + ANALOGIX_DP_AUX_RX_COMM); if (reg == AUX_RX_COMM_AUX_DEFER) msg->reply = DP_AUX_NATIVE_REPLY_DEFER; else if (reg == AUX_RX_COMM_I2C_DEFER) msg->reply = DP_AUX_I2C_REPLY_DEFER; else if ((msg->request & ~DP_AUX_I2C_MOT) == DP_AUX_I2C_WRITE || (msg->request & ~DP_AUX_I2C_MOT) == DP_AUX_I2C_READ) msg->reply = DP_AUX_I2C_REPLY_ACK; else if ((msg->request & ~DP_AUX_I2C_MOT) == DP_AUX_NATIVE_WRITE || (msg->request & ~DP_AUX_I2C_MOT) == DP_AUX_NATIVE_READ) msg->reply = DP_AUX_NATIVE_REPLY_ACK; return num_transferred > 0 ? num_transferred : -EBUSY; aux_error: /* if aux err happen, reset aux */ analogix_dp_init_aux(dp); return -EREMOTEIO; }
linux-master
drivers/gpu/drm/bridge/analogix/analogix_dp_reg.c
/* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright(c) 2016, Analogix Semiconductor. * Copyright(c) 2017, Icenowy Zheng <[email protected]> * * Based on anx7808 driver obtained from chromeos with copyright: * Copyright(c) 2013, Google Inc. */ #include <linux/delay.h> #include <linux/err.h> #include <linux/gpio/consumer.h> #include <linux/i2c.h> #include <linux/interrupt.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/of_platform.h> #include <linux/regmap.h> #include <linux/regulator/consumer.h> #include <linux/types.h> #include <drm/display/drm_dp_helper.h> #include <drm/drm_atomic_helper.h> #include <drm/drm_bridge.h> #include <drm/drm_crtc.h> #include <drm/drm_edid.h> #include <drm/drm_of.h> #include <drm/drm_panel.h> #include <drm/drm_print.h> #include <drm/drm_probe_helper.h> #include "analogix-i2c-dptx.h" #include "analogix-i2c-txcommon.h" #define POLL_DELAY 50000 /* us */ #define POLL_TIMEOUT 5000000 /* us */ #define I2C_IDX_DPTX 0 #define I2C_IDX_TXCOM 1 static const u8 anx6345_i2c_addresses[] = { [I2C_IDX_DPTX] = 0x70, [I2C_IDX_TXCOM] = 0x72, }; #define I2C_NUM_ADDRESSES ARRAY_SIZE(anx6345_i2c_addresses) struct anx6345 { struct drm_dp_aux aux; struct drm_bridge bridge; struct i2c_client *client; struct edid *edid; struct drm_connector connector; struct drm_panel *panel; struct regulator *dvdd12; struct regulator *dvdd25; struct gpio_desc *gpiod_reset; struct mutex lock; /* protect EDID access */ /* I2C Slave addresses of ANX6345 are mapped as DPTX and SYS */ struct i2c_client *i2c_clients[I2C_NUM_ADDRESSES]; struct regmap *map[I2C_NUM_ADDRESSES]; u16 chipid; u8 dpcd[DP_RECEIVER_CAP_SIZE]; bool powered; }; static inline struct anx6345 *connector_to_anx6345(struct drm_connector *c) { return container_of(c, struct anx6345, connector); } static inline struct anx6345 *bridge_to_anx6345(struct drm_bridge *bridge) { return container_of(bridge, struct anx6345, bridge); } static int anx6345_set_bits(struct regmap *map, u8 reg, u8 mask) { return regmap_update_bits(map, reg, mask, mask); } static int anx6345_clear_bits(struct regmap *map, u8 reg, u8 mask) { return regmap_update_bits(map, reg, mask, 0); } static ssize_t anx6345_aux_transfer(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg) { struct anx6345 *anx6345 = container_of(aux, struct anx6345, aux); return anx_dp_aux_transfer(anx6345->map[I2C_IDX_DPTX], msg); } static int anx6345_dp_link_training(struct anx6345 *anx6345) { unsigned int value; u8 dp_bw, dpcd[2]; int err; err = anx6345_clear_bits(anx6345->map[I2C_IDX_TXCOM], SP_POWERDOWN_CTRL_REG, SP_TOTAL_PD); if (err) return err; err = drm_dp_dpcd_readb(&anx6345->aux, DP_MAX_LINK_RATE, &dp_bw); if (err < 0) return err; switch (dp_bw) { case DP_LINK_BW_1_62: case DP_LINK_BW_2_7: break; default: DRM_DEBUG_KMS("DP bandwidth (%#02x) not supported\n", dp_bw); return -EINVAL; } err = anx6345_set_bits(anx6345->map[I2C_IDX_TXCOM], SP_VID_CTRL1_REG, SP_VIDEO_MUTE); if (err) return err; err = anx6345_clear_bits(anx6345->map[I2C_IDX_TXCOM], SP_VID_CTRL1_REG, SP_VIDEO_EN); if (err) return err; /* Get DPCD info */ err = drm_dp_dpcd_read(&anx6345->aux, DP_DPCD_REV, &anx6345->dpcd, DP_RECEIVER_CAP_SIZE); if (err < 0) { DRM_ERROR("Failed to read DPCD: %d\n", err); return err; } /* Clear channel x SERDES power down */ err = anx6345_clear_bits(anx6345->map[I2C_IDX_DPTX], SP_DP_ANALOG_POWER_DOWN_REG, SP_CH0_PD); if (err) return err; /* * Power up the sink (DP_SET_POWER register is only available on DPCD * v1.1 and later). */ if (anx6345->dpcd[DP_DPCD_REV] >= 0x11) { err = drm_dp_dpcd_readb(&anx6345->aux, DP_SET_POWER, &dpcd[0]); if (err < 0) { DRM_ERROR("Failed to read DP_SET_POWER register: %d\n", err); return err; } dpcd[0] &= ~DP_SET_POWER_MASK; dpcd[0] |= DP_SET_POWER_D0; err = drm_dp_dpcd_writeb(&anx6345->aux, DP_SET_POWER, dpcd[0]); if (err < 0) { DRM_ERROR("Failed to power up DisplayPort link: %d\n", err); return err; } /* * According to the DP 1.1 specification, a "Sink Device must * exit the power saving state within 1 ms" (Section 2.5.3.1, * Table 5-52, "Sink Control Field" (register 0x600). */ usleep_range(1000, 2000); } /* Possibly enable downspread on the sink */ err = regmap_write(anx6345->map[I2C_IDX_DPTX], SP_DP_DOWNSPREAD_CTRL1_REG, 0); if (err) return err; if (anx6345->dpcd[DP_MAX_DOWNSPREAD] & DP_MAX_DOWNSPREAD_0_5) { DRM_DEBUG("Enable downspread on the sink\n"); /* 4000PPM */ err = regmap_write(anx6345->map[I2C_IDX_DPTX], SP_DP_DOWNSPREAD_CTRL1_REG, 8); if (err) return err; err = drm_dp_dpcd_writeb(&anx6345->aux, DP_DOWNSPREAD_CTRL, DP_SPREAD_AMP_0_5); if (err < 0) return err; } else { err = drm_dp_dpcd_writeb(&anx6345->aux, DP_DOWNSPREAD_CTRL, 0); if (err < 0) return err; } /* Set the lane count and the link rate on the sink */ if (drm_dp_enhanced_frame_cap(anx6345->dpcd)) err = anx6345_set_bits(anx6345->map[I2C_IDX_DPTX], SP_DP_SYSTEM_CTRL_BASE + 4, SP_ENHANCED_MODE); else err = anx6345_clear_bits(anx6345->map[I2C_IDX_DPTX], SP_DP_SYSTEM_CTRL_BASE + 4, SP_ENHANCED_MODE); if (err) return err; dpcd[0] = dp_bw; err = regmap_write(anx6345->map[I2C_IDX_DPTX], SP_DP_MAIN_LINK_BW_SET_REG, dpcd[0]); if (err) return err; dpcd[1] = drm_dp_max_lane_count(anx6345->dpcd); err = regmap_write(anx6345->map[I2C_IDX_DPTX], SP_DP_LANE_COUNT_SET_REG, dpcd[1]); if (err) return err; if (drm_dp_enhanced_frame_cap(anx6345->dpcd)) dpcd[1] |= DP_LANE_COUNT_ENHANCED_FRAME_EN; err = drm_dp_dpcd_write(&anx6345->aux, DP_LINK_BW_SET, dpcd, sizeof(dpcd)); if (err < 0) { DRM_ERROR("Failed to configure link: %d\n", err); return err; } /* Start training on the source */ err = regmap_write(anx6345->map[I2C_IDX_DPTX], SP_DP_LT_CTRL_REG, SP_LT_EN); if (err) return err; return regmap_read_poll_timeout(anx6345->map[I2C_IDX_DPTX], SP_DP_LT_CTRL_REG, value, !(value & SP_DP_LT_INPROGRESS), POLL_DELAY, POLL_TIMEOUT); } static int anx6345_tx_initialization(struct anx6345 *anx6345) { int err, i; /* FIXME: colordepth is hardcoded for now */ err = regmap_write(anx6345->map[I2C_IDX_TXCOM], SP_VID_CTRL2_REG, SP_IN_BPC_6BIT << SP_IN_BPC_SHIFT); if (err) return err; err = regmap_write(anx6345->map[I2C_IDX_DPTX], SP_DP_PLL_CTRL_REG, 0); if (err) return err; err = regmap_write(anx6345->map[I2C_IDX_TXCOM], SP_ANALOG_DEBUG1_REG, 0); if (err) return err; err = regmap_write(anx6345->map[I2C_IDX_DPTX], SP_DP_LINK_DEBUG_CTRL_REG, SP_NEW_PRBS7 | SP_M_VID_DEBUG); if (err) return err; err = regmap_write(anx6345->map[I2C_IDX_DPTX], SP_DP_ANALOG_POWER_DOWN_REG, 0); if (err) return err; /* Force HPD */ err = anx6345_set_bits(anx6345->map[I2C_IDX_DPTX], SP_DP_SYSTEM_CTRL_BASE + 3, SP_HPD_FORCE | SP_HPD_CTRL); if (err) return err; for (i = 0; i < 4; i++) { /* 4 lanes */ err = regmap_write(anx6345->map[I2C_IDX_DPTX], SP_DP_LANE0_LT_CTRL_REG + i, 0); if (err) return err; } /* Reset AUX */ err = anx6345_set_bits(anx6345->map[I2C_IDX_TXCOM], SP_RESET_CTRL2_REG, SP_AUX_RST); if (err) return err; return anx6345_clear_bits(anx6345->map[I2C_IDX_TXCOM], SP_RESET_CTRL2_REG, SP_AUX_RST); } static void anx6345_poweron(struct anx6345 *anx6345) { int err; /* Ensure reset is asserted before starting power on sequence */ gpiod_set_value_cansleep(anx6345->gpiod_reset, 1); usleep_range(1000, 2000); err = regulator_enable(anx6345->dvdd12); if (err) { DRM_ERROR("Failed to enable dvdd12 regulator: %d\n", err); return; } /* T1 - delay between VDD12 and VDD25 should be 0-2ms */ usleep_range(1000, 2000); err = regulator_enable(anx6345->dvdd25); if (err) { DRM_ERROR("Failed to enable dvdd25 regulator: %d\n", err); return; } /* T2 - delay between RESETN and all power rail stable, * should be 2-5ms */ usleep_range(2000, 5000); gpiod_set_value_cansleep(anx6345->gpiod_reset, 0); /* Power on registers module */ anx6345_set_bits(anx6345->map[I2C_IDX_TXCOM], SP_POWERDOWN_CTRL_REG, SP_HDCP_PD | SP_AUDIO_PD | SP_VIDEO_PD | SP_LINK_PD); anx6345_clear_bits(anx6345->map[I2C_IDX_TXCOM], SP_POWERDOWN_CTRL_REG, SP_REGISTER_PD | SP_TOTAL_PD); if (anx6345->panel) drm_panel_prepare(anx6345->panel); anx6345->powered = true; } static void anx6345_poweroff(struct anx6345 *anx6345) { int err; gpiod_set_value_cansleep(anx6345->gpiod_reset, 1); usleep_range(1000, 2000); if (anx6345->panel) drm_panel_unprepare(anx6345->panel); err = regulator_disable(anx6345->dvdd25); if (err) { DRM_ERROR("Failed to disable dvdd25 regulator: %d\n", err); return; } usleep_range(5000, 10000); err = regulator_disable(anx6345->dvdd12); if (err) { DRM_ERROR("Failed to disable dvdd12 regulator: %d\n", err); return; } usleep_range(1000, 2000); anx6345->powered = false; } static int anx6345_start(struct anx6345 *anx6345) { int err; if (!anx6345->powered) anx6345_poweron(anx6345); /* Power on needed modules */ err = anx6345_clear_bits(anx6345->map[I2C_IDX_TXCOM], SP_POWERDOWN_CTRL_REG, SP_VIDEO_PD | SP_LINK_PD); err = anx6345_tx_initialization(anx6345); if (err) { DRM_ERROR("Failed eDP transmitter initialization: %d\n", err); anx6345_poweroff(anx6345); return err; } err = anx6345_dp_link_training(anx6345); if (err) { DRM_ERROR("Failed link training: %d\n", err); anx6345_poweroff(anx6345); return err; } /* * This delay seems to help keep the hardware in a good state. Without * it, there are times where it fails silently. */ usleep_range(10000, 15000); return 0; } static int anx6345_config_dp_output(struct anx6345 *anx6345) { int err; err = anx6345_clear_bits(anx6345->map[I2C_IDX_TXCOM], SP_VID_CTRL1_REG, SP_VIDEO_MUTE); if (err) return err; /* Enable DP output */ err = anx6345_set_bits(anx6345->map[I2C_IDX_TXCOM], SP_VID_CTRL1_REG, SP_VIDEO_EN); if (err) return err; /* Force stream valid */ return anx6345_set_bits(anx6345->map[I2C_IDX_DPTX], SP_DP_SYSTEM_CTRL_BASE + 3, SP_STRM_FORCE | SP_STRM_CTRL); } static int anx6345_get_downstream_info(struct anx6345 *anx6345) { u8 value; int err; err = drm_dp_dpcd_readb(&anx6345->aux, DP_SINK_COUNT, &value); if (err < 0) { DRM_ERROR("Get sink count failed %d\n", err); return err; } if (!DP_GET_SINK_COUNT(value)) { DRM_ERROR("Downstream disconnected\n"); return -EIO; } return 0; } static int anx6345_get_modes(struct drm_connector *connector) { struct anx6345 *anx6345 = connector_to_anx6345(connector); int err, num_modes = 0; bool power_off = false; mutex_lock(&anx6345->lock); if (!anx6345->edid) { if (!anx6345->powered) { anx6345_poweron(anx6345); power_off = true; } err = anx6345_get_downstream_info(anx6345); if (err) { DRM_ERROR("Failed to get downstream info: %d\n", err); goto unlock; } anx6345->edid = drm_get_edid(connector, &anx6345->aux.ddc); if (!anx6345->edid) DRM_ERROR("Failed to read EDID from panel\n"); err = drm_connector_update_edid_property(connector, anx6345->edid); if (err) { DRM_ERROR("Failed to update EDID property: %d\n", err); goto unlock; } } num_modes += drm_add_edid_modes(connector, anx6345->edid); /* Driver currently supports only 6bpc */ connector->display_info.bpc = 6; unlock: if (power_off) anx6345_poweroff(anx6345); mutex_unlock(&anx6345->lock); if (!num_modes && anx6345->panel) num_modes += drm_panel_get_modes(anx6345->panel, connector); return num_modes; } static const struct drm_connector_helper_funcs anx6345_connector_helper_funcs = { .get_modes = anx6345_get_modes, }; static void anx6345_connector_destroy(struct drm_connector *connector) { drm_connector_cleanup(connector); } static const struct drm_connector_funcs anx6345_connector_funcs = { .fill_modes = drm_helper_probe_single_connector_modes, .destroy = anx6345_connector_destroy, .reset = drm_atomic_helper_connector_reset, .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state, .atomic_destroy_state = drm_atomic_helper_connector_destroy_state, }; static int anx6345_bridge_attach(struct drm_bridge *bridge, enum drm_bridge_attach_flags flags) { struct anx6345 *anx6345 = bridge_to_anx6345(bridge); int err; if (flags & DRM_BRIDGE_ATTACH_NO_CONNECTOR) { DRM_ERROR("Fix bridge driver to make connector optional!"); return -EINVAL; } if (!bridge->encoder) { DRM_ERROR("Parent encoder object not found"); return -ENODEV; } /* Register aux channel */ anx6345->aux.name = "DP-AUX"; anx6345->aux.dev = &anx6345->client->dev; anx6345->aux.drm_dev = bridge->dev; anx6345->aux.transfer = anx6345_aux_transfer; err = drm_dp_aux_register(&anx6345->aux); if (err < 0) { DRM_ERROR("Failed to register aux channel: %d\n", err); return err; } err = drm_connector_init(bridge->dev, &anx6345->connector, &anx6345_connector_funcs, DRM_MODE_CONNECTOR_eDP); if (err) { DRM_ERROR("Failed to initialize connector: %d\n", err); goto aux_unregister; } drm_connector_helper_add(&anx6345->connector, &anx6345_connector_helper_funcs); anx6345->connector.polled = DRM_CONNECTOR_POLL_HPD; err = drm_connector_attach_encoder(&anx6345->connector, bridge->encoder); if (err) { DRM_ERROR("Failed to link up connector to encoder: %d\n", err); goto connector_cleanup; } err = drm_connector_register(&anx6345->connector); if (err) { DRM_ERROR("Failed to register connector: %d\n", err); goto connector_cleanup; } return 0; connector_cleanup: drm_connector_cleanup(&anx6345->connector); aux_unregister: drm_dp_aux_unregister(&anx6345->aux); return err; } static void anx6345_bridge_detach(struct drm_bridge *bridge) { drm_dp_aux_unregister(&bridge_to_anx6345(bridge)->aux); } static enum drm_mode_status anx6345_bridge_mode_valid(struct drm_bridge *bridge, const struct drm_display_info *info, const struct drm_display_mode *mode) { if (mode->flags & DRM_MODE_FLAG_INTERLACE) return MODE_NO_INTERLACE; /* Max 1200p at 5.4 Ghz, one lane */ if (mode->clock > 154000) return MODE_CLOCK_HIGH; return MODE_OK; } static void anx6345_bridge_disable(struct drm_bridge *bridge) { struct anx6345 *anx6345 = bridge_to_anx6345(bridge); /* Power off all modules except configuration registers access */ anx6345_set_bits(anx6345->map[I2C_IDX_TXCOM], SP_POWERDOWN_CTRL_REG, SP_HDCP_PD | SP_AUDIO_PD | SP_VIDEO_PD | SP_LINK_PD); if (anx6345->panel) drm_panel_disable(anx6345->panel); if (anx6345->powered) anx6345_poweroff(anx6345); } static void anx6345_bridge_enable(struct drm_bridge *bridge) { struct anx6345 *anx6345 = bridge_to_anx6345(bridge); int err; if (anx6345->panel) drm_panel_enable(anx6345->panel); err = anx6345_start(anx6345); if (err) { DRM_ERROR("Failed to initialize: %d\n", err); return; } err = anx6345_config_dp_output(anx6345); if (err) DRM_ERROR("Failed to enable DP output: %d\n", err); } static const struct drm_bridge_funcs anx6345_bridge_funcs = { .attach = anx6345_bridge_attach, .detach = anx6345_bridge_detach, .mode_valid = anx6345_bridge_mode_valid, .disable = anx6345_bridge_disable, .enable = anx6345_bridge_enable, }; static void unregister_i2c_dummy_clients(struct anx6345 *anx6345) { unsigned int i; for (i = 1; i < ARRAY_SIZE(anx6345->i2c_clients); i++) if (anx6345->i2c_clients[i] && anx6345->i2c_clients[i]->addr != anx6345->client->addr) i2c_unregister_device(anx6345->i2c_clients[i]); } static const struct regmap_config anx6345_regmap_config = { .reg_bits = 8, .val_bits = 8, .max_register = 0xff, .cache_type = REGCACHE_NONE, }; static const u16 anx6345_chipid_list[] = { 0x6345, }; static bool anx6345_get_chip_id(struct anx6345 *anx6345) { unsigned int i, idl, idh, version; if (regmap_read(anx6345->map[I2C_IDX_TXCOM], SP_DEVICE_IDL_REG, &idl)) return false; if (regmap_read(anx6345->map[I2C_IDX_TXCOM], SP_DEVICE_IDH_REG, &idh)) return false; anx6345->chipid = (u8)idl | ((u8)idh << 8); if (regmap_read(anx6345->map[I2C_IDX_TXCOM], SP_DEVICE_VERSION_REG, &version)) return false; for (i = 0; i < ARRAY_SIZE(anx6345_chipid_list); i++) { if (anx6345->chipid == anx6345_chipid_list[i]) { DRM_INFO("Found ANX%x (ver. %d) eDP Transmitter\n", anx6345->chipid, version); return true; } } DRM_ERROR("ANX%x (ver. %d) not supported by this driver\n", anx6345->chipid, version); return false; } static int anx6345_i2c_probe(struct i2c_client *client) { struct anx6345 *anx6345; struct device *dev; int i, err; anx6345 = devm_kzalloc(&client->dev, sizeof(*anx6345), GFP_KERNEL); if (!anx6345) return -ENOMEM; mutex_init(&anx6345->lock); anx6345->bridge.of_node = client->dev.of_node; anx6345->client = client; i2c_set_clientdata(client, anx6345); dev = &anx6345->client->dev; err = drm_of_find_panel_or_bridge(client->dev.of_node, 1, 0, &anx6345->panel, NULL); if (err == -EPROBE_DEFER) return err; if (err) DRM_DEBUG("No panel found\n"); /* 1.2V digital core power regulator */ anx6345->dvdd12 = devm_regulator_get(dev, "dvdd12"); if (IS_ERR(anx6345->dvdd12)) { if (PTR_ERR(anx6345->dvdd12) != -EPROBE_DEFER) DRM_ERROR("Failed to get dvdd12 supply (%ld)\n", PTR_ERR(anx6345->dvdd12)); return PTR_ERR(anx6345->dvdd12); } /* 2.5V digital core power regulator */ anx6345->dvdd25 = devm_regulator_get(dev, "dvdd25"); if (IS_ERR(anx6345->dvdd25)) { if (PTR_ERR(anx6345->dvdd25) != -EPROBE_DEFER) DRM_ERROR("Failed to get dvdd25 supply (%ld)\n", PTR_ERR(anx6345->dvdd25)); return PTR_ERR(anx6345->dvdd25); } /* GPIO for chip reset */ anx6345->gpiod_reset = devm_gpiod_get(dev, "reset", GPIOD_OUT_LOW); if (IS_ERR(anx6345->gpiod_reset)) { DRM_ERROR("Reset gpio not found\n"); return PTR_ERR(anx6345->gpiod_reset); } /* Map slave addresses of ANX6345 */ for (i = 0; i < I2C_NUM_ADDRESSES; i++) { if (anx6345_i2c_addresses[i] >> 1 != client->addr) anx6345->i2c_clients[i] = i2c_new_dummy_device(client->adapter, anx6345_i2c_addresses[i] >> 1); else anx6345->i2c_clients[i] = client; if (IS_ERR(anx6345->i2c_clients[i])) { err = PTR_ERR(anx6345->i2c_clients[i]); DRM_ERROR("Failed to reserve I2C bus %02x\n", anx6345_i2c_addresses[i]); goto err_unregister_i2c; } anx6345->map[i] = devm_regmap_init_i2c(anx6345->i2c_clients[i], &anx6345_regmap_config); if (IS_ERR(anx6345->map[i])) { err = PTR_ERR(anx6345->map[i]); DRM_ERROR("Failed regmap initialization %02x\n", anx6345_i2c_addresses[i]); goto err_unregister_i2c; } } /* Look for supported chip ID */ anx6345_poweron(anx6345); if (anx6345_get_chip_id(anx6345)) { anx6345->bridge.funcs = &anx6345_bridge_funcs; drm_bridge_add(&anx6345->bridge); return 0; } else { anx6345_poweroff(anx6345); err = -ENODEV; } err_unregister_i2c: unregister_i2c_dummy_clients(anx6345); return err; } static void anx6345_i2c_remove(struct i2c_client *client) { struct anx6345 *anx6345 = i2c_get_clientdata(client); drm_bridge_remove(&anx6345->bridge); unregister_i2c_dummy_clients(anx6345); kfree(anx6345->edid); mutex_destroy(&anx6345->lock); } static const struct i2c_device_id anx6345_id[] = { { "anx6345", 0 }, { /* sentinel */ } }; MODULE_DEVICE_TABLE(i2c, anx6345_id); static const struct of_device_id anx6345_match_table[] = { { .compatible = "analogix,anx6345", }, { /* sentinel */ }, }; MODULE_DEVICE_TABLE(of, anx6345_match_table); static struct i2c_driver anx6345_driver = { .driver = { .name = "anx6345", .of_match_table = anx6345_match_table, }, .probe = anx6345_i2c_probe, .remove = anx6345_i2c_remove, .id_table = anx6345_id, }; module_i2c_driver(anx6345_driver); MODULE_DESCRIPTION("ANX6345 eDP Transmitter driver"); MODULE_AUTHOR("Icenowy Zheng <[email protected]>"); MODULE_LICENSE("GPL v2");
linux-master
drivers/gpu/drm/bridge/analogix/analogix-anx6345.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * Analogix DP (Display Port) core interface driver. * * Copyright (C) 2012 Samsung Electronics Co., Ltd. * Author: Jingoo Han <[email protected]> */ #include <linux/clk.h> #include <linux/component.h> #include <linux/err.h> #include <linux/gpio/consumer.h> #include <linux/interrupt.h> #include <linux/io.h> #include <linux/iopoll.h> #include <linux/module.h> #include <linux/of.h> #include <linux/phy/phy.h> #include <linux/platform_device.h> #include <drm/bridge/analogix_dp.h> #include <drm/drm_atomic.h> #include <drm/drm_atomic_helper.h> #include <drm/drm_bridge.h> #include <drm/drm_crtc.h> #include <drm/drm_device.h> #include <drm/drm_edid.h> #include <drm/drm_panel.h> #include <drm/drm_print.h> #include <drm/drm_probe_helper.h> #include "analogix_dp_core.h" #include "analogix_dp_reg.h" #define to_dp(nm) container_of(nm, struct analogix_dp_device, nm) static const bool verify_fast_training; struct bridge_init { struct i2c_client *client; struct device_node *node; }; static int analogix_dp_init_dp(struct analogix_dp_device *dp) { int ret; analogix_dp_reset(dp); analogix_dp_swreset(dp); analogix_dp_init_analog_param(dp); analogix_dp_init_interrupt(dp); /* SW defined function Normal operation */ analogix_dp_enable_sw_function(dp); analogix_dp_config_interrupt(dp); ret = analogix_dp_init_analog_func(dp); if (ret) return ret; analogix_dp_init_hpd(dp); analogix_dp_init_aux(dp); return 0; } static int analogix_dp_detect_hpd(struct analogix_dp_device *dp) { int timeout_loop = 0; while (timeout_loop < DP_TIMEOUT_LOOP_COUNT) { if (analogix_dp_get_plug_in_status(dp) == 0) return 0; timeout_loop++; usleep_range(1000, 1100); } /* * Some edp screen do not have hpd signal, so we can't just * return failed when hpd plug in detect failed, DT property * "force-hpd" would indicate whether driver need this. */ if (!dp->force_hpd) return -ETIMEDOUT; /* * The eDP TRM indicate that if HPD_STATUS(RO) is 0, AUX CH * will not work, so we need to give a force hpd action to * set HPD_STATUS manually. */ dev_dbg(dp->dev, "failed to get hpd plug status, try to force hpd\n"); analogix_dp_force_hpd(dp); if (analogix_dp_get_plug_in_status(dp) != 0) { dev_err(dp->dev, "failed to get hpd plug in status\n"); return -EINVAL; } dev_dbg(dp->dev, "success to get plug in status after force hpd\n"); return 0; } static bool analogix_dp_detect_sink_psr(struct analogix_dp_device *dp) { unsigned char psr_version; int ret; ret = drm_dp_dpcd_readb(&dp->aux, DP_PSR_SUPPORT, &psr_version); if (ret != 1) { dev_err(dp->dev, "failed to get PSR version, disable it\n"); return false; } dev_dbg(dp->dev, "Panel PSR version : %x\n", psr_version); return psr_version & DP_PSR_IS_SUPPORTED; } static int analogix_dp_enable_sink_psr(struct analogix_dp_device *dp) { unsigned char psr_en; int ret; /* Disable psr function */ ret = drm_dp_dpcd_readb(&dp->aux, DP_PSR_EN_CFG, &psr_en); if (ret != 1) { dev_err(dp->dev, "failed to get psr config\n"); goto end; } psr_en &= ~DP_PSR_ENABLE; ret = drm_dp_dpcd_writeb(&dp->aux, DP_PSR_EN_CFG, psr_en); if (ret != 1) { dev_err(dp->dev, "failed to disable panel psr\n"); goto end; } /* Main-Link transmitter remains active during PSR active states */ psr_en = DP_PSR_CRC_VERIFICATION; ret = drm_dp_dpcd_writeb(&dp->aux, DP_PSR_EN_CFG, psr_en); if (ret != 1) { dev_err(dp->dev, "failed to set panel psr\n"); goto end; } /* Enable psr function */ psr_en = DP_PSR_ENABLE | DP_PSR_CRC_VERIFICATION; ret = drm_dp_dpcd_writeb(&dp->aux, DP_PSR_EN_CFG, psr_en); if (ret != 1) { dev_err(dp->dev, "failed to set panel psr\n"); goto end; } analogix_dp_enable_psr_crc(dp); dp->psr_supported = true; return 0; end: dev_err(dp->dev, "enable psr fail, force to disable psr\n"); return ret; } static int analogix_dp_enable_rx_to_enhanced_mode(struct analogix_dp_device *dp, bool enable) { u8 data; int ret; ret = drm_dp_dpcd_readb(&dp->aux, DP_LANE_COUNT_SET, &data); if (ret != 1) return ret; if (enable) ret = drm_dp_dpcd_writeb(&dp->aux, DP_LANE_COUNT_SET, DP_LANE_COUNT_ENHANCED_FRAME_EN | DPCD_LANE_COUNT_SET(data)); else ret = drm_dp_dpcd_writeb(&dp->aux, DP_LANE_COUNT_SET, DPCD_LANE_COUNT_SET(data)); return ret < 0 ? ret : 0; } static int analogix_dp_is_enhanced_mode_available(struct analogix_dp_device *dp, u8 *enhanced_mode_support) { u8 data; int ret; ret = drm_dp_dpcd_readb(&dp->aux, DP_MAX_LANE_COUNT, &data); if (ret != 1) { *enhanced_mode_support = 0; return ret; } *enhanced_mode_support = DPCD_ENHANCED_FRAME_CAP(data); return 0; } static int analogix_dp_set_enhanced_mode(struct analogix_dp_device *dp) { u8 data; int ret; ret = analogix_dp_is_enhanced_mode_available(dp, &data); if (ret < 0) return ret; ret = analogix_dp_enable_rx_to_enhanced_mode(dp, data); if (ret < 0) return ret; analogix_dp_enable_enhanced_mode(dp, data); return 0; } static int analogix_dp_training_pattern_dis(struct analogix_dp_device *dp) { int ret; analogix_dp_set_training_pattern(dp, DP_NONE); ret = drm_dp_dpcd_writeb(&dp->aux, DP_TRAINING_PATTERN_SET, DP_TRAINING_PATTERN_DISABLE); return ret < 0 ? ret : 0; } static void analogix_dp_set_lane_lane_pre_emphasis(struct analogix_dp_device *dp, int pre_emphasis, int lane) { switch (lane) { case 0: analogix_dp_set_lane0_pre_emphasis(dp, pre_emphasis); break; case 1: analogix_dp_set_lane1_pre_emphasis(dp, pre_emphasis); break; case 2: analogix_dp_set_lane2_pre_emphasis(dp, pre_emphasis); break; case 3: analogix_dp_set_lane3_pre_emphasis(dp, pre_emphasis); break; } } static int analogix_dp_link_start(struct analogix_dp_device *dp) { u8 buf[4]; int lane, lane_count, pll_tries, retval; lane_count = dp->link_train.lane_count; dp->link_train.lt_state = CLOCK_RECOVERY; dp->link_train.eq_loop = 0; for (lane = 0; lane < lane_count; lane++) dp->link_train.cr_loop[lane] = 0; /* Set link rate and count as you want to establish*/ analogix_dp_set_link_bandwidth(dp, dp->link_train.link_rate); analogix_dp_set_lane_count(dp, dp->link_train.lane_count); /* Setup RX configuration */ buf[0] = dp->link_train.link_rate; buf[1] = dp->link_train.lane_count; retval = drm_dp_dpcd_write(&dp->aux, DP_LINK_BW_SET, buf, 2); if (retval < 0) return retval; /* set enhanced mode if available */ retval = analogix_dp_set_enhanced_mode(dp); if (retval < 0) { dev_err(dp->dev, "failed to set enhance mode\n"); return retval; } /* Set TX pre-emphasis to minimum */ for (lane = 0; lane < lane_count; lane++) analogix_dp_set_lane_lane_pre_emphasis(dp, PRE_EMPHASIS_LEVEL_0, lane); /* Wait for PLL lock */ pll_tries = 0; while (analogix_dp_get_pll_lock_status(dp) == PLL_UNLOCKED) { if (pll_tries == DP_TIMEOUT_LOOP_COUNT) { dev_err(dp->dev, "Wait for PLL lock timed out\n"); return -ETIMEDOUT; } pll_tries++; usleep_range(90, 120); } /* Set training pattern 1 */ analogix_dp_set_training_pattern(dp, TRAINING_PTN1); /* Set RX training pattern */ retval = drm_dp_dpcd_writeb(&dp->aux, DP_TRAINING_PATTERN_SET, DP_LINK_SCRAMBLING_DISABLE | DP_TRAINING_PATTERN_1); if (retval < 0) return retval; for (lane = 0; lane < lane_count; lane++) buf[lane] = DP_TRAIN_PRE_EMPH_LEVEL_0 | DP_TRAIN_VOLTAGE_SWING_LEVEL_0; retval = drm_dp_dpcd_write(&dp->aux, DP_TRAINING_LANE0_SET, buf, lane_count); if (retval < 0) return retval; return 0; } static unsigned char analogix_dp_get_lane_status(u8 link_status[2], int lane) { int shift = (lane & 1) * 4; u8 link_value = link_status[lane >> 1]; return (link_value >> shift) & 0xf; } static int analogix_dp_clock_recovery_ok(u8 link_status[2], int lane_count) { int lane; u8 lane_status; for (lane = 0; lane < lane_count; lane++) { lane_status = analogix_dp_get_lane_status(link_status, lane); if ((lane_status & DP_LANE_CR_DONE) == 0) return -EINVAL; } return 0; } static int analogix_dp_channel_eq_ok(u8 link_status[2], u8 link_align, int lane_count) { int lane; u8 lane_status; if ((link_align & DP_INTERLANE_ALIGN_DONE) == 0) return -EINVAL; for (lane = 0; lane < lane_count; lane++) { lane_status = analogix_dp_get_lane_status(link_status, lane); lane_status &= DP_CHANNEL_EQ_BITS; if (lane_status != DP_CHANNEL_EQ_BITS) return -EINVAL; } return 0; } static unsigned char analogix_dp_get_adjust_request_voltage(u8 adjust_request[2], int lane) { int shift = (lane & 1) * 4; u8 link_value = adjust_request[lane >> 1]; return (link_value >> shift) & 0x3; } static unsigned char analogix_dp_get_adjust_request_pre_emphasis( u8 adjust_request[2], int lane) { int shift = (lane & 1) * 4; u8 link_value = adjust_request[lane >> 1]; return ((link_value >> shift) & 0xc) >> 2; } static void analogix_dp_set_lane_link_training(struct analogix_dp_device *dp, u8 training_lane_set, int lane) { switch (lane) { case 0: analogix_dp_set_lane0_link_training(dp, training_lane_set); break; case 1: analogix_dp_set_lane1_link_training(dp, training_lane_set); break; case 2: analogix_dp_set_lane2_link_training(dp, training_lane_set); break; case 3: analogix_dp_set_lane3_link_training(dp, training_lane_set); break; } } static unsigned int analogix_dp_get_lane_link_training(struct analogix_dp_device *dp, int lane) { u32 reg; switch (lane) { case 0: reg = analogix_dp_get_lane0_link_training(dp); break; case 1: reg = analogix_dp_get_lane1_link_training(dp); break; case 2: reg = analogix_dp_get_lane2_link_training(dp); break; case 3: reg = analogix_dp_get_lane3_link_training(dp); break; default: WARN_ON(1); return 0; } return reg; } static void analogix_dp_reduce_link_rate(struct analogix_dp_device *dp) { analogix_dp_training_pattern_dis(dp); analogix_dp_set_enhanced_mode(dp); dp->link_train.lt_state = FAILED; } static void analogix_dp_get_adjust_training_lane(struct analogix_dp_device *dp, u8 adjust_request[2]) { int lane, lane_count; u8 voltage_swing, pre_emphasis, training_lane; lane_count = dp->link_train.lane_count; for (lane = 0; lane < lane_count; lane++) { voltage_swing = analogix_dp_get_adjust_request_voltage( adjust_request, lane); pre_emphasis = analogix_dp_get_adjust_request_pre_emphasis( adjust_request, lane); training_lane = DPCD_VOLTAGE_SWING_SET(voltage_swing) | DPCD_PRE_EMPHASIS_SET(pre_emphasis); if (voltage_swing == VOLTAGE_LEVEL_3) training_lane |= DP_TRAIN_MAX_SWING_REACHED; if (pre_emphasis == PRE_EMPHASIS_LEVEL_3) training_lane |= DP_TRAIN_MAX_PRE_EMPHASIS_REACHED; dp->link_train.training_lane[lane] = training_lane; } } static int analogix_dp_process_clock_recovery(struct analogix_dp_device *dp) { int lane, lane_count, retval; u8 voltage_swing, pre_emphasis, training_lane; u8 link_status[2], adjust_request[2]; usleep_range(100, 101); lane_count = dp->link_train.lane_count; retval = drm_dp_dpcd_read(&dp->aux, DP_LANE0_1_STATUS, link_status, 2); if (retval < 0) return retval; retval = drm_dp_dpcd_read(&dp->aux, DP_ADJUST_REQUEST_LANE0_1, adjust_request, 2); if (retval < 0) return retval; if (analogix_dp_clock_recovery_ok(link_status, lane_count) == 0) { /* set training pattern 2 for EQ */ analogix_dp_set_training_pattern(dp, TRAINING_PTN2); retval = drm_dp_dpcd_writeb(&dp->aux, DP_TRAINING_PATTERN_SET, DP_LINK_SCRAMBLING_DISABLE | DP_TRAINING_PATTERN_2); if (retval < 0) return retval; dev_dbg(dp->dev, "Link Training Clock Recovery success\n"); dp->link_train.lt_state = EQUALIZER_TRAINING; } else { for (lane = 0; lane < lane_count; lane++) { training_lane = analogix_dp_get_lane_link_training( dp, lane); voltage_swing = analogix_dp_get_adjust_request_voltage( adjust_request, lane); pre_emphasis = analogix_dp_get_adjust_request_pre_emphasis( adjust_request, lane); if (DPCD_VOLTAGE_SWING_GET(training_lane) == voltage_swing && DPCD_PRE_EMPHASIS_GET(training_lane) == pre_emphasis) dp->link_train.cr_loop[lane]++; if (dp->link_train.cr_loop[lane] == MAX_CR_LOOP || voltage_swing == VOLTAGE_LEVEL_3 || pre_emphasis == PRE_EMPHASIS_LEVEL_3) { dev_err(dp->dev, "CR Max reached (%d,%d,%d)\n", dp->link_train.cr_loop[lane], voltage_swing, pre_emphasis); analogix_dp_reduce_link_rate(dp); return -EIO; } } } analogix_dp_get_adjust_training_lane(dp, adjust_request); for (lane = 0; lane < lane_count; lane++) analogix_dp_set_lane_link_training(dp, dp->link_train.training_lane[lane], lane); retval = drm_dp_dpcd_write(&dp->aux, DP_TRAINING_LANE0_SET, dp->link_train.training_lane, lane_count); if (retval < 0) return retval; return 0; } static int analogix_dp_process_equalizer_training(struct analogix_dp_device *dp) { int lane, lane_count, retval; u32 reg; u8 link_align, link_status[2], adjust_request[2]; usleep_range(400, 401); lane_count = dp->link_train.lane_count; retval = drm_dp_dpcd_read(&dp->aux, DP_LANE0_1_STATUS, link_status, 2); if (retval < 0) return retval; if (analogix_dp_clock_recovery_ok(link_status, lane_count)) { analogix_dp_reduce_link_rate(dp); return -EIO; } retval = drm_dp_dpcd_read(&dp->aux, DP_ADJUST_REQUEST_LANE0_1, adjust_request, 2); if (retval < 0) return retval; retval = drm_dp_dpcd_readb(&dp->aux, DP_LANE_ALIGN_STATUS_UPDATED, &link_align); if (retval < 0) return retval; analogix_dp_get_adjust_training_lane(dp, adjust_request); if (!analogix_dp_channel_eq_ok(link_status, link_align, lane_count)) { /* traing pattern Set to Normal */ retval = analogix_dp_training_pattern_dis(dp); if (retval < 0) return retval; dev_dbg(dp->dev, "Link Training success!\n"); analogix_dp_get_link_bandwidth(dp, &reg); dp->link_train.link_rate = reg; dev_dbg(dp->dev, "final bandwidth = %.2x\n", dp->link_train.link_rate); analogix_dp_get_lane_count(dp, &reg); dp->link_train.lane_count = reg; dev_dbg(dp->dev, "final lane count = %.2x\n", dp->link_train.lane_count); dp->link_train.lt_state = FINISHED; return 0; } /* not all locked */ dp->link_train.eq_loop++; if (dp->link_train.eq_loop > MAX_EQ_LOOP) { dev_err(dp->dev, "EQ Max loop\n"); analogix_dp_reduce_link_rate(dp); return -EIO; } for (lane = 0; lane < lane_count; lane++) analogix_dp_set_lane_link_training(dp, dp->link_train.training_lane[lane], lane); retval = drm_dp_dpcd_write(&dp->aux, DP_TRAINING_LANE0_SET, dp->link_train.training_lane, lane_count); if (retval < 0) return retval; return 0; } static void analogix_dp_get_max_rx_bandwidth(struct analogix_dp_device *dp, u8 *bandwidth) { u8 data; /* * For DP rev.1.1, Maximum link rate of Main Link lanes * 0x06 = 1.62 Gbps, 0x0a = 2.7 Gbps * For DP rev.1.2, Maximum link rate of Main Link lanes * 0x06 = 1.62 Gbps, 0x0a = 2.7 Gbps, 0x14 = 5.4Gbps */ drm_dp_dpcd_readb(&dp->aux, DP_MAX_LINK_RATE, &data); *bandwidth = data; } static void analogix_dp_get_max_rx_lane_count(struct analogix_dp_device *dp, u8 *lane_count) { u8 data; /* * For DP rev.1.1, Maximum number of Main Link lanes * 0x01 = 1 lane, 0x02 = 2 lanes, 0x04 = 4 lanes */ drm_dp_dpcd_readb(&dp->aux, DP_MAX_LANE_COUNT, &data); *lane_count = DPCD_MAX_LANE_COUNT(data); } static int analogix_dp_full_link_train(struct analogix_dp_device *dp, u32 max_lanes, u32 max_rate) { int retval = 0; bool training_finished = false; /* * MACRO_RST must be applied after the PLL_LOCK to avoid * the DP inter pair skew issue for at least 10 us */ analogix_dp_reset_macro(dp); /* Initialize by reading RX's DPCD */ analogix_dp_get_max_rx_bandwidth(dp, &dp->link_train.link_rate); analogix_dp_get_max_rx_lane_count(dp, &dp->link_train.lane_count); if ((dp->link_train.link_rate != DP_LINK_BW_1_62) && (dp->link_train.link_rate != DP_LINK_BW_2_7) && (dp->link_train.link_rate != DP_LINK_BW_5_4)) { dev_err(dp->dev, "Rx Max Link Rate is abnormal :%x !\n", dp->link_train.link_rate); dp->link_train.link_rate = DP_LINK_BW_1_62; } if (dp->link_train.lane_count == 0) { dev_err(dp->dev, "Rx Max Lane count is abnormal :%x !\n", dp->link_train.lane_count); dp->link_train.lane_count = (u8)LANE_COUNT1; } /* Setup TX lane count & rate */ if (dp->link_train.lane_count > max_lanes) dp->link_train.lane_count = max_lanes; if (dp->link_train.link_rate > max_rate) dp->link_train.link_rate = max_rate; /* All DP analog module power up */ analogix_dp_set_analog_power_down(dp, POWER_ALL, 0); dp->link_train.lt_state = START; /* Process here */ while (!retval && !training_finished) { switch (dp->link_train.lt_state) { case START: retval = analogix_dp_link_start(dp); if (retval) dev_err(dp->dev, "LT link start failed!\n"); break; case CLOCK_RECOVERY: retval = analogix_dp_process_clock_recovery(dp); if (retval) dev_err(dp->dev, "LT CR failed!\n"); break; case EQUALIZER_TRAINING: retval = analogix_dp_process_equalizer_training(dp); if (retval) dev_err(dp->dev, "LT EQ failed!\n"); break; case FINISHED: training_finished = 1; break; case FAILED: return -EREMOTEIO; } } if (retval) dev_err(dp->dev, "eDP link training failed (%d)\n", retval); return retval; } static int analogix_dp_fast_link_train(struct analogix_dp_device *dp) { int i, ret; u8 link_align, link_status[2]; enum pll_status status; analogix_dp_reset_macro(dp); analogix_dp_set_link_bandwidth(dp, dp->link_train.link_rate); analogix_dp_set_lane_count(dp, dp->link_train.lane_count); for (i = 0; i < dp->link_train.lane_count; i++) { analogix_dp_set_lane_link_training(dp, dp->link_train.training_lane[i], i); } ret = readx_poll_timeout(analogix_dp_get_pll_lock_status, dp, status, status != PLL_UNLOCKED, 120, 120 * DP_TIMEOUT_LOOP_COUNT); if (ret) { DRM_DEV_ERROR(dp->dev, "Wait for pll lock failed %d\n", ret); return ret; } /* source Set training pattern 1 */ analogix_dp_set_training_pattern(dp, TRAINING_PTN1); /* From DP spec, pattern must be on-screen for a minimum 500us */ usleep_range(500, 600); analogix_dp_set_training_pattern(dp, TRAINING_PTN2); /* From DP spec, pattern must be on-screen for a minimum 500us */ usleep_range(500, 600); /* TODO: enhanced_mode?*/ analogix_dp_set_training_pattern(dp, DP_NONE); /* * Useful for debugging issues with fast link training, disable for more * speed */ if (verify_fast_training) { ret = drm_dp_dpcd_readb(&dp->aux, DP_LANE_ALIGN_STATUS_UPDATED, &link_align); if (ret < 0) { DRM_DEV_ERROR(dp->dev, "Read align status failed %d\n", ret); return ret; } ret = drm_dp_dpcd_read(&dp->aux, DP_LANE0_1_STATUS, link_status, 2); if (ret < 0) { DRM_DEV_ERROR(dp->dev, "Read link status failed %d\n", ret); return ret; } if (analogix_dp_clock_recovery_ok(link_status, dp->link_train.lane_count)) { DRM_DEV_ERROR(dp->dev, "Clock recovery failed\n"); analogix_dp_reduce_link_rate(dp); return -EIO; } if (analogix_dp_channel_eq_ok(link_status, link_align, dp->link_train.lane_count)) { DRM_DEV_ERROR(dp->dev, "Channel EQ failed\n"); analogix_dp_reduce_link_rate(dp); return -EIO; } } return 0; } static int analogix_dp_train_link(struct analogix_dp_device *dp) { if (dp->fast_train_enable) return analogix_dp_fast_link_train(dp); return analogix_dp_full_link_train(dp, dp->video_info.max_lane_count, dp->video_info.max_link_rate); } static int analogix_dp_config_video(struct analogix_dp_device *dp) { int timeout_loop = 0; int done_count = 0; analogix_dp_config_video_slave_mode(dp); analogix_dp_set_video_color_format(dp); if (analogix_dp_get_pll_lock_status(dp) == PLL_UNLOCKED) { dev_err(dp->dev, "PLL is not locked yet.\n"); return -EINVAL; } for (;;) { timeout_loop++; if (analogix_dp_is_slave_video_stream_clock_on(dp) == 0) break; if (timeout_loop > DP_TIMEOUT_LOOP_COUNT) { dev_err(dp->dev, "Timeout of slave video streamclk ok\n"); return -ETIMEDOUT; } usleep_range(1000, 1001); } /* Set to use the register calculated M/N video */ analogix_dp_set_video_cr_mn(dp, CALCULATED_M, 0, 0); /* For video bist, Video timing must be generated by register */ analogix_dp_set_video_timing_mode(dp, VIDEO_TIMING_FROM_CAPTURE); /* Disable video mute */ analogix_dp_enable_video_mute(dp, 0); /* Configure video slave mode */ analogix_dp_enable_video_master(dp, 0); /* Enable video */ analogix_dp_start_video(dp); timeout_loop = 0; for (;;) { timeout_loop++; if (analogix_dp_is_video_stream_on(dp) == 0) { done_count++; if (done_count > 10) break; } else if (done_count) { done_count = 0; } if (timeout_loop > DP_TIMEOUT_LOOP_COUNT) { dev_warn(dp->dev, "Ignoring timeout of video streamclk ok\n"); break; } usleep_range(1000, 1001); } return 0; } static int analogix_dp_enable_scramble(struct analogix_dp_device *dp, bool enable) { u8 data; int ret; if (enable) { analogix_dp_enable_scrambling(dp); ret = drm_dp_dpcd_readb(&dp->aux, DP_TRAINING_PATTERN_SET, &data); if (ret != 1) return ret; ret = drm_dp_dpcd_writeb(&dp->aux, DP_TRAINING_PATTERN_SET, (u8)(data & ~DP_LINK_SCRAMBLING_DISABLE)); } else { analogix_dp_disable_scrambling(dp); ret = drm_dp_dpcd_readb(&dp->aux, DP_TRAINING_PATTERN_SET, &data); if (ret != 1) return ret; ret = drm_dp_dpcd_writeb(&dp->aux, DP_TRAINING_PATTERN_SET, (u8)(data | DP_LINK_SCRAMBLING_DISABLE)); } return ret < 0 ? ret : 0; } static irqreturn_t analogix_dp_hardirq(int irq, void *arg) { struct analogix_dp_device *dp = arg; irqreturn_t ret = IRQ_NONE; enum dp_irq_type irq_type; irq_type = analogix_dp_get_irq_type(dp); if (irq_type != DP_IRQ_TYPE_UNKNOWN) { analogix_dp_mute_hpd_interrupt(dp); ret = IRQ_WAKE_THREAD; } return ret; } static irqreturn_t analogix_dp_irq_thread(int irq, void *arg) { struct analogix_dp_device *dp = arg; enum dp_irq_type irq_type; irq_type = analogix_dp_get_irq_type(dp); if (irq_type & DP_IRQ_TYPE_HP_CABLE_IN || irq_type & DP_IRQ_TYPE_HP_CABLE_OUT) { dev_dbg(dp->dev, "Detected cable status changed!\n"); if (dp->drm_dev) drm_helper_hpd_irq_event(dp->drm_dev); } if (irq_type != DP_IRQ_TYPE_UNKNOWN) { analogix_dp_clear_hotplug_interrupts(dp); analogix_dp_unmute_hpd_interrupt(dp); } return IRQ_HANDLED; } static int analogix_dp_fast_link_train_detection(struct analogix_dp_device *dp) { int ret; u8 spread; ret = drm_dp_dpcd_readb(&dp->aux, DP_MAX_DOWNSPREAD, &spread); if (ret != 1) { dev_err(dp->dev, "failed to read downspread %d\n", ret); return ret; } dp->fast_train_enable = !!(spread & DP_NO_AUX_HANDSHAKE_LINK_TRAINING); dev_dbg(dp->dev, "fast link training %s\n", dp->fast_train_enable ? "supported" : "unsupported"); return 0; } static int analogix_dp_commit(struct analogix_dp_device *dp) { int ret; /* Keep the panel disabled while we configure video */ if (dp->plat_data->panel) { if (drm_panel_disable(dp->plat_data->panel)) DRM_ERROR("failed to disable the panel\n"); } ret = analogix_dp_train_link(dp); if (ret) { dev_err(dp->dev, "unable to do link train, ret=%d\n", ret); return ret; } ret = analogix_dp_enable_scramble(dp, 1); if (ret < 0) { dev_err(dp->dev, "can not enable scramble\n"); return ret; } analogix_dp_init_video(dp); ret = analogix_dp_config_video(dp); if (ret) { dev_err(dp->dev, "unable to config video\n"); return ret; } /* Safe to enable the panel now */ if (dp->plat_data->panel) { ret = drm_panel_enable(dp->plat_data->panel); if (ret) { DRM_ERROR("failed to enable the panel\n"); return ret; } } /* Check whether panel supports fast training */ ret = analogix_dp_fast_link_train_detection(dp); if (ret) return ret; if (analogix_dp_detect_sink_psr(dp)) { ret = analogix_dp_enable_sink_psr(dp); if (ret) return ret; } return ret; } static int analogix_dp_enable_psr(struct analogix_dp_device *dp) { struct dp_sdp psr_vsc; int ret; u8 sink; ret = drm_dp_dpcd_readb(&dp->aux, DP_PSR_STATUS, &sink); if (ret != 1) DRM_DEV_ERROR(dp->dev, "Failed to read psr status %d\n", ret); else if (sink == DP_PSR_SINK_ACTIVE_RFB) return 0; /* Prepare VSC packet as per EDP 1.4 spec, Table 6.9 */ memset(&psr_vsc, 0, sizeof(psr_vsc)); psr_vsc.sdp_header.HB0 = 0; psr_vsc.sdp_header.HB1 = 0x7; psr_vsc.sdp_header.HB2 = 0x2; psr_vsc.sdp_header.HB3 = 0x8; psr_vsc.db[0] = 0; psr_vsc.db[1] = EDP_VSC_PSR_STATE_ACTIVE | EDP_VSC_PSR_CRC_VALUES_VALID; ret = analogix_dp_send_psr_spd(dp, &psr_vsc, true); if (!ret) analogix_dp_set_analog_power_down(dp, POWER_ALL, true); return ret; } static int analogix_dp_disable_psr(struct analogix_dp_device *dp) { struct dp_sdp psr_vsc; int ret; u8 sink; analogix_dp_set_analog_power_down(dp, POWER_ALL, false); ret = drm_dp_dpcd_writeb(&dp->aux, DP_SET_POWER, DP_SET_POWER_D0); if (ret != 1) { DRM_DEV_ERROR(dp->dev, "Failed to set DP Power0 %d\n", ret); return ret; } ret = drm_dp_dpcd_readb(&dp->aux, DP_PSR_STATUS, &sink); if (ret != 1) { DRM_DEV_ERROR(dp->dev, "Failed to read psr status %d\n", ret); return ret; } else if (sink == DP_PSR_SINK_INACTIVE) { DRM_DEV_ERROR(dp->dev, "sink inactive, skip disable psr"); return 0; } ret = analogix_dp_train_link(dp); if (ret) { DRM_DEV_ERROR(dp->dev, "Failed to train the link %d\n", ret); return ret; } /* Prepare VSC packet as per EDP 1.4 spec, Table 6.9 */ memset(&psr_vsc, 0, sizeof(psr_vsc)); psr_vsc.sdp_header.HB0 = 0; psr_vsc.sdp_header.HB1 = 0x7; psr_vsc.sdp_header.HB2 = 0x2; psr_vsc.sdp_header.HB3 = 0x8; psr_vsc.db[0] = 0; psr_vsc.db[1] = 0; return analogix_dp_send_psr_spd(dp, &psr_vsc, true); } /* * This function is a bit of a catch-all for panel preparation, hopefully * simplifying the logic of functions that need to prepare/unprepare the panel * below. * * If @prepare is true, this function will prepare the panel. Conversely, if it * is false, the panel will be unprepared. * * If @is_modeset_prepare is true, the function will disregard the current state * of the panel and either prepare/unprepare the panel based on @prepare. Once * it finishes, it will update dp->panel_is_modeset to reflect the current state * of the panel. */ static int analogix_dp_prepare_panel(struct analogix_dp_device *dp, bool prepare, bool is_modeset_prepare) { int ret = 0; if (!dp->plat_data->panel) return 0; mutex_lock(&dp->panel_lock); /* * Exit early if this is a temporary prepare/unprepare and we're already * modeset (since we neither want to prepare twice or unprepare early). */ if (dp->panel_is_modeset && !is_modeset_prepare) goto out; if (prepare) ret = drm_panel_prepare(dp->plat_data->panel); else ret = drm_panel_unprepare(dp->plat_data->panel); if (ret) goto out; if (is_modeset_prepare) dp->panel_is_modeset = prepare; out: mutex_unlock(&dp->panel_lock); return ret; } static int analogix_dp_get_modes(struct drm_connector *connector) { struct analogix_dp_device *dp = to_dp(connector); struct edid *edid; int ret, num_modes = 0; if (dp->plat_data->panel) { num_modes += drm_panel_get_modes(dp->plat_data->panel, connector); } else { ret = analogix_dp_prepare_panel(dp, true, false); if (ret) { DRM_ERROR("Failed to prepare panel (%d)\n", ret); return 0; } edid = drm_get_edid(connector, &dp->aux.ddc); if (edid) { drm_connector_update_edid_property(&dp->connector, edid); num_modes += drm_add_edid_modes(&dp->connector, edid); kfree(edid); } ret = analogix_dp_prepare_panel(dp, false, false); if (ret) DRM_ERROR("Failed to unprepare panel (%d)\n", ret); } if (dp->plat_data->get_modes) num_modes += dp->plat_data->get_modes(dp->plat_data, connector); return num_modes; } static struct drm_encoder * analogix_dp_best_encoder(struct drm_connector *connector) { struct analogix_dp_device *dp = to_dp(connector); return dp->encoder; } static int analogix_dp_atomic_check(struct drm_connector *connector, struct drm_atomic_state *state) { struct analogix_dp_device *dp = to_dp(connector); struct drm_connector_state *conn_state; struct drm_crtc_state *crtc_state; conn_state = drm_atomic_get_new_connector_state(state, connector); if (WARN_ON(!conn_state)) return -ENODEV; conn_state->self_refresh_aware = true; if (!conn_state->crtc) return 0; crtc_state = drm_atomic_get_new_crtc_state(state, conn_state->crtc); if (!crtc_state) return 0; if (crtc_state->self_refresh_active && !dp->psr_supported) return -EINVAL; return 0; } static const struct drm_connector_helper_funcs analogix_dp_connector_helper_funcs = { .get_modes = analogix_dp_get_modes, .best_encoder = analogix_dp_best_encoder, .atomic_check = analogix_dp_atomic_check, }; static enum drm_connector_status analogix_dp_detect(struct drm_connector *connector, bool force) { struct analogix_dp_device *dp = to_dp(connector); enum drm_connector_status status = connector_status_disconnected; int ret; if (dp->plat_data->panel) return connector_status_connected; ret = analogix_dp_prepare_panel(dp, true, false); if (ret) { DRM_ERROR("Failed to prepare panel (%d)\n", ret); return connector_status_disconnected; } if (!analogix_dp_detect_hpd(dp)) status = connector_status_connected; ret = analogix_dp_prepare_panel(dp, false, false); if (ret) DRM_ERROR("Failed to unprepare panel (%d)\n", ret); return status; } static const struct drm_connector_funcs analogix_dp_connector_funcs = { .fill_modes = drm_helper_probe_single_connector_modes, .detect = analogix_dp_detect, .destroy = drm_connector_cleanup, .reset = drm_atomic_helper_connector_reset, .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state, .atomic_destroy_state = drm_atomic_helper_connector_destroy_state, }; static int analogix_dp_bridge_attach(struct drm_bridge *bridge, enum drm_bridge_attach_flags flags) { struct analogix_dp_device *dp = bridge->driver_private; struct drm_encoder *encoder = dp->encoder; struct drm_connector *connector = NULL; int ret = 0; if (flags & DRM_BRIDGE_ATTACH_NO_CONNECTOR) { DRM_ERROR("Fix bridge driver to make connector optional!"); return -EINVAL; } if (!bridge->encoder) { DRM_ERROR("Parent encoder object not found"); return -ENODEV; } if (!dp->plat_data->skip_connector) { connector = &dp->connector; connector->polled = DRM_CONNECTOR_POLL_HPD; ret = drm_connector_init(dp->drm_dev, connector, &analogix_dp_connector_funcs, DRM_MODE_CONNECTOR_eDP); if (ret) { DRM_ERROR("Failed to initialize connector with drm\n"); return ret; } drm_connector_helper_add(connector, &analogix_dp_connector_helper_funcs); drm_connector_attach_encoder(connector, encoder); } /* * NOTE: the connector registration is implemented in analogix * platform driver, that to say connector would be exist after * plat_data->attch return, that's why we record the connector * point after plat attached. */ if (dp->plat_data->attach) { ret = dp->plat_data->attach(dp->plat_data, bridge, connector); if (ret) { DRM_ERROR("Failed at platform attach func\n"); return ret; } } return 0; } static struct drm_crtc *analogix_dp_get_old_crtc(struct analogix_dp_device *dp, struct drm_atomic_state *state) { struct drm_encoder *encoder = dp->encoder; struct drm_connector *connector; struct drm_connector_state *conn_state; connector = drm_atomic_get_old_connector_for_encoder(state, encoder); if (!connector) return NULL; conn_state = drm_atomic_get_old_connector_state(state, connector); if (!conn_state) return NULL; return conn_state->crtc; } static struct drm_crtc *analogix_dp_get_new_crtc(struct analogix_dp_device *dp, struct drm_atomic_state *state) { struct drm_encoder *encoder = dp->encoder; struct drm_connector *connector; struct drm_connector_state *conn_state; connector = drm_atomic_get_new_connector_for_encoder(state, encoder); if (!connector) return NULL; conn_state = drm_atomic_get_new_connector_state(state, connector); if (!conn_state) return NULL; return conn_state->crtc; } static void analogix_dp_bridge_atomic_pre_enable(struct drm_bridge *bridge, struct drm_bridge_state *old_bridge_state) { struct drm_atomic_state *old_state = old_bridge_state->base.state; struct analogix_dp_device *dp = bridge->driver_private; struct drm_crtc *crtc; struct drm_crtc_state *old_crtc_state; int ret; crtc = analogix_dp_get_new_crtc(dp, old_state); if (!crtc) return; old_crtc_state = drm_atomic_get_old_crtc_state(old_state, crtc); /* Don't touch the panel if we're coming back from PSR */ if (old_crtc_state && old_crtc_state->self_refresh_active) return; ret = analogix_dp_prepare_panel(dp, true, true); if (ret) DRM_ERROR("failed to setup the panel ret = %d\n", ret); } static int analogix_dp_set_bridge(struct analogix_dp_device *dp) { int ret; pm_runtime_get_sync(dp->dev); ret = clk_prepare_enable(dp->clock); if (ret < 0) { DRM_ERROR("Failed to prepare_enable the clock clk [%d]\n", ret); goto out_dp_clk_pre; } if (dp->plat_data->power_on_start) dp->plat_data->power_on_start(dp->plat_data); phy_power_on(dp->phy); ret = analogix_dp_init_dp(dp); if (ret) goto out_dp_init; /* * According to DP spec v1.3 chap 3.5.1.2 Link Training, * We should first make sure the HPD signal is asserted high by device * when we want to establish a link with it. */ ret = analogix_dp_detect_hpd(dp); if (ret) { DRM_ERROR("failed to get hpd single ret = %d\n", ret); goto out_dp_init; } ret = analogix_dp_commit(dp); if (ret) { DRM_ERROR("dp commit error, ret = %d\n", ret); goto out_dp_init; } if (dp->plat_data->power_on_end) dp->plat_data->power_on_end(dp->plat_data); enable_irq(dp->irq); return 0; out_dp_init: phy_power_off(dp->phy); if (dp->plat_data->power_off) dp->plat_data->power_off(dp->plat_data); clk_disable_unprepare(dp->clock); out_dp_clk_pre: pm_runtime_put_sync(dp->dev); return ret; } static void analogix_dp_bridge_atomic_enable(struct drm_bridge *bridge, struct drm_bridge_state *old_bridge_state) { struct drm_atomic_state *old_state = old_bridge_state->base.state; struct analogix_dp_device *dp = bridge->driver_private; struct drm_crtc *crtc; struct drm_crtc_state *old_crtc_state; int timeout_loop = 0; int ret; crtc = analogix_dp_get_new_crtc(dp, old_state); if (!crtc) return; old_crtc_state = drm_atomic_get_old_crtc_state(old_state, crtc); /* Not a full enable, just disable PSR and continue */ if (old_crtc_state && old_crtc_state->self_refresh_active) { ret = analogix_dp_disable_psr(dp); if (ret) DRM_ERROR("Failed to disable psr %d\n", ret); return; } if (dp->dpms_mode == DRM_MODE_DPMS_ON) return; while (timeout_loop < MAX_PLL_LOCK_LOOP) { if (analogix_dp_set_bridge(dp) == 0) { dp->dpms_mode = DRM_MODE_DPMS_ON; return; } dev_err(dp->dev, "failed to set bridge, retry: %d\n", timeout_loop); timeout_loop++; usleep_range(10, 11); } dev_err(dp->dev, "too many times retry set bridge, give it up\n"); } static void analogix_dp_bridge_disable(struct drm_bridge *bridge) { struct analogix_dp_device *dp = bridge->driver_private; int ret; if (dp->dpms_mode != DRM_MODE_DPMS_ON) return; if (dp->plat_data->panel) { if (drm_panel_disable(dp->plat_data->panel)) { DRM_ERROR("failed to disable the panel\n"); return; } } disable_irq(dp->irq); if (dp->plat_data->power_off) dp->plat_data->power_off(dp->plat_data); analogix_dp_set_analog_power_down(dp, POWER_ALL, 1); phy_power_off(dp->phy); clk_disable_unprepare(dp->clock); pm_runtime_put_sync(dp->dev); ret = analogix_dp_prepare_panel(dp, false, true); if (ret) DRM_ERROR("failed to setup the panel ret = %d\n", ret); dp->fast_train_enable = false; dp->psr_supported = false; dp->dpms_mode = DRM_MODE_DPMS_OFF; } static void analogix_dp_bridge_atomic_disable(struct drm_bridge *bridge, struct drm_bridge_state *old_bridge_state) { struct drm_atomic_state *old_state = old_bridge_state->base.state; struct analogix_dp_device *dp = bridge->driver_private; struct drm_crtc *old_crtc, *new_crtc; struct drm_crtc_state *old_crtc_state = NULL; struct drm_crtc_state *new_crtc_state = NULL; int ret; new_crtc = analogix_dp_get_new_crtc(dp, old_state); if (!new_crtc) goto out; new_crtc_state = drm_atomic_get_new_crtc_state(old_state, new_crtc); if (!new_crtc_state) goto out; /* Don't do a full disable on PSR transitions */ if (new_crtc_state->self_refresh_active) return; out: old_crtc = analogix_dp_get_old_crtc(dp, old_state); if (old_crtc) { old_crtc_state = drm_atomic_get_old_crtc_state(old_state, old_crtc); /* When moving from PSR to fully disabled, exit PSR first. */ if (old_crtc_state && old_crtc_state->self_refresh_active) { ret = analogix_dp_disable_psr(dp); if (ret) DRM_ERROR("Failed to disable psr (%d)\n", ret); } } analogix_dp_bridge_disable(bridge); } static void analogix_dp_bridge_atomic_post_disable(struct drm_bridge *bridge, struct drm_bridge_state *old_bridge_state) { struct drm_atomic_state *old_state = old_bridge_state->base.state; struct analogix_dp_device *dp = bridge->driver_private; struct drm_crtc *crtc; struct drm_crtc_state *new_crtc_state; int ret; crtc = analogix_dp_get_new_crtc(dp, old_state); if (!crtc) return; new_crtc_state = drm_atomic_get_new_crtc_state(old_state, crtc); if (!new_crtc_state || !new_crtc_state->self_refresh_active) return; ret = analogix_dp_enable_psr(dp); if (ret) DRM_ERROR("Failed to enable psr (%d)\n", ret); } static void analogix_dp_bridge_mode_set(struct drm_bridge *bridge, const struct drm_display_mode *orig_mode, const struct drm_display_mode *mode) { struct analogix_dp_device *dp = bridge->driver_private; struct drm_display_info *display_info = &dp->connector.display_info; struct video_info *video = &dp->video_info; struct device_node *dp_node = dp->dev->of_node; int vic; /* Input video interlaces & hsync pol & vsync pol */ video->interlaced = !!(mode->flags & DRM_MODE_FLAG_INTERLACE); video->v_sync_polarity = !!(mode->flags & DRM_MODE_FLAG_NVSYNC); video->h_sync_polarity = !!(mode->flags & DRM_MODE_FLAG_NHSYNC); /* Input video dynamic_range & colorimetry */ vic = drm_match_cea_mode(mode); if ((vic == 6) || (vic == 7) || (vic == 21) || (vic == 22) || (vic == 2) || (vic == 3) || (vic == 17) || (vic == 18)) { video->dynamic_range = CEA; video->ycbcr_coeff = COLOR_YCBCR601; } else if (vic) { video->dynamic_range = CEA; video->ycbcr_coeff = COLOR_YCBCR709; } else { video->dynamic_range = VESA; video->ycbcr_coeff = COLOR_YCBCR709; } /* Input vide bpc and color_formats */ switch (display_info->bpc) { case 12: video->color_depth = COLOR_12; break; case 10: video->color_depth = COLOR_10; break; case 8: video->color_depth = COLOR_8; break; case 6: video->color_depth = COLOR_6; break; default: video->color_depth = COLOR_8; break; } if (display_info->color_formats & DRM_COLOR_FORMAT_YCBCR444) video->color_space = COLOR_YCBCR444; else if (display_info->color_formats & DRM_COLOR_FORMAT_YCBCR422) video->color_space = COLOR_YCBCR422; else video->color_space = COLOR_RGB; /* * NOTE: those property parsing code is used for providing backward * compatibility for samsung platform. * Due to we used the "of_property_read_u32" interfaces, when this * property isn't present, the "video_info" can keep the original * values and wouldn't be modified. */ of_property_read_u32(dp_node, "samsung,color-space", &video->color_space); of_property_read_u32(dp_node, "samsung,dynamic-range", &video->dynamic_range); of_property_read_u32(dp_node, "samsung,ycbcr-coeff", &video->ycbcr_coeff); of_property_read_u32(dp_node, "samsung,color-depth", &video->color_depth); if (of_property_read_bool(dp_node, "hsync-active-high")) video->h_sync_polarity = true; if (of_property_read_bool(dp_node, "vsync-active-high")) video->v_sync_polarity = true; if (of_property_read_bool(dp_node, "interlaced")) video->interlaced = true; } static const struct drm_bridge_funcs analogix_dp_bridge_funcs = { .atomic_duplicate_state = drm_atomic_helper_bridge_duplicate_state, .atomic_destroy_state = drm_atomic_helper_bridge_destroy_state, .atomic_reset = drm_atomic_helper_bridge_reset, .atomic_pre_enable = analogix_dp_bridge_atomic_pre_enable, .atomic_enable = analogix_dp_bridge_atomic_enable, .atomic_disable = analogix_dp_bridge_atomic_disable, .atomic_post_disable = analogix_dp_bridge_atomic_post_disable, .mode_set = analogix_dp_bridge_mode_set, .attach = analogix_dp_bridge_attach, }; static int analogix_dp_create_bridge(struct drm_device *drm_dev, struct analogix_dp_device *dp) { struct drm_bridge *bridge; bridge = devm_kzalloc(drm_dev->dev, sizeof(*bridge), GFP_KERNEL); if (!bridge) { DRM_ERROR("failed to allocate for drm bridge\n"); return -ENOMEM; } dp->bridge = bridge; bridge->driver_private = dp; bridge->funcs = &analogix_dp_bridge_funcs; return drm_bridge_attach(dp->encoder, bridge, NULL, 0); } static int analogix_dp_dt_parse_pdata(struct analogix_dp_device *dp) { struct device_node *dp_node = dp->dev->of_node; struct video_info *video_info = &dp->video_info; switch (dp->plat_data->dev_type) { case RK3288_DP: case RK3399_EDP: /* * Like Rk3288 DisplayPort TRM indicate that "Main link * containing 4 physical lanes of 2.7/1.62 Gbps/lane". */ video_info->max_link_rate = 0x0A; video_info->max_lane_count = 0x04; break; case EXYNOS_DP: /* * NOTE: those property parseing code is used for * providing backward compatibility for samsung platform. */ of_property_read_u32(dp_node, "samsung,link-rate", &video_info->max_link_rate); of_property_read_u32(dp_node, "samsung,lane-count", &video_info->max_lane_count); break; } return 0; } static ssize_t analogix_dpaux_transfer(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg) { struct analogix_dp_device *dp = to_dp(aux); int ret; pm_runtime_get_sync(dp->dev); ret = analogix_dp_detect_hpd(dp); if (ret) goto out; ret = analogix_dp_transfer(dp, msg); out: pm_runtime_mark_last_busy(dp->dev); pm_runtime_put_autosuspend(dp->dev); return ret; } struct analogix_dp_device * analogix_dp_probe(struct device *dev, struct analogix_dp_plat_data *plat_data) { struct platform_device *pdev = to_platform_device(dev); struct analogix_dp_device *dp; struct resource *res; unsigned int irq_flags; int ret; if (!plat_data) { dev_err(dev, "Invalided input plat_data\n"); return ERR_PTR(-EINVAL); } dp = devm_kzalloc(dev, sizeof(struct analogix_dp_device), GFP_KERNEL); if (!dp) return ERR_PTR(-ENOMEM); dp->dev = &pdev->dev; dp->dpms_mode = DRM_MODE_DPMS_OFF; mutex_init(&dp->panel_lock); dp->panel_is_modeset = false; /* * platform dp driver need containor_of the plat_data to get * the driver private data, so we need to store the point of * plat_data, not the context of plat_data. */ dp->plat_data = plat_data; ret = analogix_dp_dt_parse_pdata(dp); if (ret) return ERR_PTR(ret); dp->phy = devm_phy_get(dp->dev, "dp"); if (IS_ERR(dp->phy)) { dev_err(dp->dev, "no DP phy configured\n"); ret = PTR_ERR(dp->phy); if (ret) { /* * phy itself is not enabled, so we can move forward * assigning NULL to phy pointer. */ if (ret == -ENOSYS || ret == -ENODEV) dp->phy = NULL; else return ERR_PTR(ret); } } dp->clock = devm_clk_get(&pdev->dev, "dp"); if (IS_ERR(dp->clock)) { dev_err(&pdev->dev, "failed to get clock\n"); return ERR_CAST(dp->clock); } clk_prepare_enable(dp->clock); res = platform_get_resource(pdev, IORESOURCE_MEM, 0); dp->reg_base = devm_ioremap_resource(&pdev->dev, res); if (IS_ERR(dp->reg_base)) { ret = PTR_ERR(dp->reg_base); goto err_disable_clk; } dp->force_hpd = of_property_read_bool(dev->of_node, "force-hpd"); /* Try two different names */ dp->hpd_gpiod = devm_gpiod_get_optional(dev, "hpd", GPIOD_IN); if (!dp->hpd_gpiod) dp->hpd_gpiod = devm_gpiod_get_optional(dev, "samsung,hpd", GPIOD_IN); if (IS_ERR(dp->hpd_gpiod)) { dev_err(dev, "error getting HDP GPIO: %ld\n", PTR_ERR(dp->hpd_gpiod)); ret = PTR_ERR(dp->hpd_gpiod); goto err_disable_clk; } if (dp->hpd_gpiod) { /* * Set up the hotplug GPIO from the device tree as an interrupt. * Simply specifying a different interrupt in the device tree * doesn't work since we handle hotplug rather differently when * using a GPIO. We also need the actual GPIO specifier so * that we can get the current state of the GPIO. */ dp->irq = gpiod_to_irq(dp->hpd_gpiod); irq_flags = IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING; } else { dp->irq = platform_get_irq(pdev, 0); irq_flags = 0; } if (dp->irq == -ENXIO) { dev_err(&pdev->dev, "failed to get irq\n"); ret = -ENODEV; goto err_disable_clk; } ret = devm_request_threaded_irq(&pdev->dev, dp->irq, analogix_dp_hardirq, analogix_dp_irq_thread, irq_flags, "analogix-dp", dp); if (ret) { dev_err(&pdev->dev, "failed to request irq\n"); goto err_disable_clk; } disable_irq(dp->irq); return dp; err_disable_clk: clk_disable_unprepare(dp->clock); return ERR_PTR(ret); } EXPORT_SYMBOL_GPL(analogix_dp_probe); int analogix_dp_bind(struct analogix_dp_device *dp, struct drm_device *drm_dev) { int ret; dp->drm_dev = drm_dev; dp->encoder = dp->plat_data->encoder; dp->aux.name = "DP-AUX"; dp->aux.transfer = analogix_dpaux_transfer; dp->aux.dev = dp->dev; dp->aux.drm_dev = drm_dev; ret = drm_dp_aux_register(&dp->aux); if (ret) return ret; pm_runtime_use_autosuspend(dp->dev); pm_runtime_set_autosuspend_delay(dp->dev, 100); pm_runtime_enable(dp->dev); ret = analogix_dp_create_bridge(drm_dev, dp); if (ret) { DRM_ERROR("failed to create bridge (%d)\n", ret); goto err_disable_pm_runtime; } return 0; err_disable_pm_runtime: pm_runtime_dont_use_autosuspend(dp->dev); pm_runtime_disable(dp->dev); drm_dp_aux_unregister(&dp->aux); return ret; } EXPORT_SYMBOL_GPL(analogix_dp_bind); void analogix_dp_unbind(struct analogix_dp_device *dp) { analogix_dp_bridge_disable(dp->bridge); dp->connector.funcs->destroy(&dp->connector); if (dp->plat_data->panel) { if (drm_panel_unprepare(dp->plat_data->panel)) DRM_ERROR("failed to turnoff the panel\n"); } drm_dp_aux_unregister(&dp->aux); pm_runtime_dont_use_autosuspend(dp->dev); pm_runtime_disable(dp->dev); } EXPORT_SYMBOL_GPL(analogix_dp_unbind); void analogix_dp_remove(struct analogix_dp_device *dp) { clk_disable_unprepare(dp->clock); } EXPORT_SYMBOL_GPL(analogix_dp_remove); #ifdef CONFIG_PM int analogix_dp_suspend(struct analogix_dp_device *dp) { clk_disable_unprepare(dp->clock); return 0; } EXPORT_SYMBOL_GPL(analogix_dp_suspend); int analogix_dp_resume(struct analogix_dp_device *dp) { int ret; ret = clk_prepare_enable(dp->clock); if (ret < 0) { DRM_ERROR("Failed to prepare_enable the clock clk [%d]\n", ret); return ret; } return 0; } EXPORT_SYMBOL_GPL(analogix_dp_resume); #endif int analogix_dp_start_crc(struct drm_connector *connector) { struct analogix_dp_device *dp = to_dp(connector); if (!connector->state->crtc) { DRM_ERROR("Connector %s doesn't currently have a CRTC.\n", connector->name); return -EINVAL; } return drm_dp_start_crc(&dp->aux, connector->state->crtc); } EXPORT_SYMBOL_GPL(analogix_dp_start_crc); int analogix_dp_stop_crc(struct drm_connector *connector) { struct analogix_dp_device *dp = to_dp(connector); return drm_dp_stop_crc(&dp->aux); } EXPORT_SYMBOL_GPL(analogix_dp_stop_crc); MODULE_AUTHOR("Jingoo Han <[email protected]>"); MODULE_DESCRIPTION("Analogix DP Core Driver"); MODULE_LICENSE("GPL v2");
linux-master
drivers/gpu/drm/bridge/analogix/analogix_dp_core.c
// SPDX-License-Identifier: GPL-2.0-only /* * Copyright(c) 2020, Analogix Semiconductor. All rights reserved. * */ #include <linux/gcd.h> #include <linux/gpio/consumer.h> #include <linux/i2c.h> #include <linux/interrupt.h> #include <linux/iopoll.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/mutex.h> #include <linux/pm_runtime.h> #include <linux/regulator/consumer.h> #include <linux/slab.h> #include <linux/types.h> #include <linux/workqueue.h> #include <linux/of_graph.h> #include <linux/of_platform.h> #include <drm/display/drm_dp_aux_bus.h> #include <drm/display/drm_dp_helper.h> #include <drm/display/drm_hdcp_helper.h> #include <drm/drm_atomic_helper.h> #include <drm/drm_bridge.h> #include <drm/drm_edid.h> #include <drm/drm_mipi_dsi.h> #include <drm/drm_of.h> #include <drm/drm_panel.h> #include <drm/drm_print.h> #include <drm/drm_probe_helper.h> #include <media/v4l2-fwnode.h> #include <sound/hdmi-codec.h> #include <video/display_timing.h> #include "anx7625.h" /* * There is a sync issue while access I2C register between AP(CPU) and * internal firmware(OCM), to avoid the race condition, AP should access * the reserved slave address before slave address occurs changes. */ static int i2c_access_workaround(struct anx7625_data *ctx, struct i2c_client *client) { u8 offset; struct device *dev = &client->dev; int ret; if (client == ctx->last_client) return 0; ctx->last_client = client; if (client == ctx->i2c.tcpc_client) offset = RSVD_00_ADDR; else if (client == ctx->i2c.tx_p0_client) offset = RSVD_D1_ADDR; else if (client == ctx->i2c.tx_p1_client) offset = RSVD_60_ADDR; else if (client == ctx->i2c.rx_p0_client) offset = RSVD_39_ADDR; else if (client == ctx->i2c.rx_p1_client) offset = RSVD_7F_ADDR; else offset = RSVD_00_ADDR; ret = i2c_smbus_write_byte_data(client, offset, 0x00); if (ret < 0) DRM_DEV_ERROR(dev, "fail to access i2c id=%x\n:%x", client->addr, offset); return ret; } static int anx7625_reg_read(struct anx7625_data *ctx, struct i2c_client *client, u8 reg_addr) { int ret; struct device *dev = &client->dev; i2c_access_workaround(ctx, client); ret = i2c_smbus_read_byte_data(client, reg_addr); if (ret < 0) DRM_DEV_ERROR(dev, "read i2c fail id=%x:%x\n", client->addr, reg_addr); return ret; } static int anx7625_reg_block_read(struct anx7625_data *ctx, struct i2c_client *client, u8 reg_addr, u8 len, u8 *buf) { int ret; struct device *dev = &client->dev; i2c_access_workaround(ctx, client); ret = i2c_smbus_read_i2c_block_data(client, reg_addr, len, buf); if (ret < 0) DRM_DEV_ERROR(dev, "read i2c block fail id=%x:%x\n", client->addr, reg_addr); return ret; } static int anx7625_reg_write(struct anx7625_data *ctx, struct i2c_client *client, u8 reg_addr, u8 reg_val) { int ret; struct device *dev = &client->dev; i2c_access_workaround(ctx, client); ret = i2c_smbus_write_byte_data(client, reg_addr, reg_val); if (ret < 0) DRM_DEV_ERROR(dev, "fail to write i2c id=%x\n:%x", client->addr, reg_addr); return ret; } static int anx7625_reg_block_write(struct anx7625_data *ctx, struct i2c_client *client, u8 reg_addr, u8 len, u8 *buf) { int ret; struct device *dev = &client->dev; i2c_access_workaround(ctx, client); ret = i2c_smbus_write_i2c_block_data(client, reg_addr, len, buf); if (ret < 0) dev_err(dev, "write i2c block failed id=%x\n:%x", client->addr, reg_addr); return ret; } static int anx7625_write_or(struct anx7625_data *ctx, struct i2c_client *client, u8 offset, u8 mask) { int val; val = anx7625_reg_read(ctx, client, offset); if (val < 0) return val; return anx7625_reg_write(ctx, client, offset, (val | (mask))); } static int anx7625_write_and(struct anx7625_data *ctx, struct i2c_client *client, u8 offset, u8 mask) { int val; val = anx7625_reg_read(ctx, client, offset); if (val < 0) return val; return anx7625_reg_write(ctx, client, offset, (val & (mask))); } static int anx7625_write_and_or(struct anx7625_data *ctx, struct i2c_client *client, u8 offset, u8 and_mask, u8 or_mask) { int val; val = anx7625_reg_read(ctx, client, offset); if (val < 0) return val; return anx7625_reg_write(ctx, client, offset, (val & and_mask) | (or_mask)); } static int anx7625_config_bit_matrix(struct anx7625_data *ctx) { int i, ret; ret = anx7625_write_or(ctx, ctx->i2c.tx_p2_client, AUDIO_CONTROL_REGISTER, 0x80); for (i = 0; i < 13; i++) ret |= anx7625_reg_write(ctx, ctx->i2c.tx_p2_client, VIDEO_BIT_MATRIX_12 + i, 0x18 + i); return ret; } static int anx7625_read_ctrl_status_p0(struct anx7625_data *ctx) { return anx7625_reg_read(ctx, ctx->i2c.rx_p0_client, AP_AUX_CTRL_STATUS); } static int wait_aux_op_finish(struct anx7625_data *ctx) { struct device *dev = ctx->dev; int val; int ret; ret = readx_poll_timeout(anx7625_read_ctrl_status_p0, ctx, val, (!(val & AP_AUX_CTRL_OP_EN) || (val < 0)), 2000, 2000 * 150); if (ret) { DRM_DEV_ERROR(dev, "aux operation fail!\n"); return -EIO; } val = anx7625_reg_read(ctx, ctx->i2c.rx_p0_client, AP_AUX_CTRL_STATUS); if (val < 0 || (val & 0x0F)) { DRM_DEV_ERROR(dev, "aux status %02x\n", val); return -EIO; } return 0; } static int anx7625_aux_trans(struct anx7625_data *ctx, u8 op, u32 address, u8 len, u8 *buf) { struct device *dev = ctx->dev; int ret; u8 addrh, addrm, addrl; u8 cmd; bool is_write = !(op & DP_AUX_I2C_READ); if (len > DP_AUX_MAX_PAYLOAD_BYTES) { dev_err(dev, "exceed aux buffer len.\n"); return -EINVAL; } if (!len) return len; addrl = address & 0xFF; addrm = (address >> 8) & 0xFF; addrh = (address >> 16) & 0xFF; if (!is_write) op &= ~DP_AUX_I2C_MOT; cmd = DPCD_CMD(len, op); /* Set command and length */ ret = anx7625_reg_write(ctx, ctx->i2c.rx_p0_client, AP_AUX_COMMAND, cmd); /* Set aux access address */ ret |= anx7625_reg_write(ctx, ctx->i2c.rx_p0_client, AP_AUX_ADDR_7_0, addrl); ret |= anx7625_reg_write(ctx, ctx->i2c.rx_p0_client, AP_AUX_ADDR_15_8, addrm); ret |= anx7625_reg_write(ctx, ctx->i2c.rx_p0_client, AP_AUX_ADDR_19_16, addrh); if (is_write) ret |= anx7625_reg_block_write(ctx, ctx->i2c.rx_p0_client, AP_AUX_BUFF_START, len, buf); /* Enable aux access */ ret |= anx7625_write_or(ctx, ctx->i2c.rx_p0_client, AP_AUX_CTRL_STATUS, AP_AUX_CTRL_OP_EN); if (ret < 0) { dev_err(dev, "cannot access aux related register.\n"); return -EIO; } ret = wait_aux_op_finish(ctx); if (ret < 0) { dev_err(dev, "aux IO error: wait aux op finish.\n"); return ret; } /* Write done */ if (is_write) return len; /* Read done, read out dpcd data */ ret = anx7625_reg_block_read(ctx, ctx->i2c.rx_p0_client, AP_AUX_BUFF_START, len, buf); if (ret < 0) { dev_err(dev, "read dpcd register failed\n"); return -EIO; } return len; } static int anx7625_video_mute_control(struct anx7625_data *ctx, u8 status) { int ret; if (status) { /* Set mute on flag */ ret = anx7625_write_or(ctx, ctx->i2c.rx_p0_client, AP_AV_STATUS, AP_MIPI_MUTE); /* Clear mipi RX en */ ret |= anx7625_write_and(ctx, ctx->i2c.rx_p0_client, AP_AV_STATUS, (u8)~AP_MIPI_RX_EN); } else { /* Mute off flag */ ret = anx7625_write_and(ctx, ctx->i2c.rx_p0_client, AP_AV_STATUS, (u8)~AP_MIPI_MUTE); /* Set MIPI RX EN */ ret |= anx7625_write_or(ctx, ctx->i2c.rx_p0_client, AP_AV_STATUS, AP_MIPI_RX_EN); } return ret; } /* Reduction of fraction a/b */ static void anx7625_reduction_of_a_fraction(unsigned long *a, unsigned long *b) { unsigned long gcd_num; unsigned long tmp_a, tmp_b; u32 i = 1; gcd_num = gcd(*a, *b); *a /= gcd_num; *b /= gcd_num; tmp_a = *a; tmp_b = *b; while ((*a > MAX_UNSIGNED_24BIT) || (*b > MAX_UNSIGNED_24BIT)) { i++; *a = tmp_a / i; *b = tmp_b / i; } /* * In the end, make a, b larger to have higher ODFC PLL * output frequency accuracy */ while ((*a < MAX_UNSIGNED_24BIT) && (*b < MAX_UNSIGNED_24BIT)) { *a <<= 1; *b <<= 1; } *a >>= 1; *b >>= 1; } static int anx7625_calculate_m_n(u32 pixelclock, unsigned long *m, unsigned long *n, u8 *post_divider) { if (pixelclock > PLL_OUT_FREQ_ABS_MAX / POST_DIVIDER_MIN) { /* Pixel clock frequency is too high */ DRM_ERROR("pixelclock too high, act(%d), maximum(%lu)\n", pixelclock, PLL_OUT_FREQ_ABS_MAX / POST_DIVIDER_MIN); return -EINVAL; } if (pixelclock < PLL_OUT_FREQ_ABS_MIN / POST_DIVIDER_MAX) { /* Pixel clock frequency is too low */ DRM_ERROR("pixelclock too low, act(%d), maximum(%lu)\n", pixelclock, PLL_OUT_FREQ_ABS_MIN / POST_DIVIDER_MAX); return -EINVAL; } for (*post_divider = 1; pixelclock < (PLL_OUT_FREQ_MIN / (*post_divider));) *post_divider += 1; if (*post_divider > POST_DIVIDER_MAX) { for (*post_divider = 1; (pixelclock < (PLL_OUT_FREQ_ABS_MIN / (*post_divider)));) *post_divider += 1; if (*post_divider > POST_DIVIDER_MAX) { DRM_ERROR("cannot find property post_divider(%d)\n", *post_divider); return -EDOM; } } /* Patch to improve the accuracy */ if (*post_divider == 7) { /* 27,000,000 is not divisible by 7 */ *post_divider = 8; } else if (*post_divider == 11) { /* 27,000,000 is not divisible by 11 */ *post_divider = 12; } else if ((*post_divider == 13) || (*post_divider == 14)) { /* 27,000,000 is not divisible by 13 or 14 */ *post_divider = 15; } if (pixelclock * (*post_divider) > PLL_OUT_FREQ_ABS_MAX) { DRM_ERROR("act clock(%u) large than maximum(%lu)\n", pixelclock * (*post_divider), PLL_OUT_FREQ_ABS_MAX); return -EDOM; } *m = pixelclock; *n = XTAL_FRQ / (*post_divider); anx7625_reduction_of_a_fraction(m, n); return 0; } static int anx7625_odfc_config(struct anx7625_data *ctx, u8 post_divider) { int ret; struct device *dev = ctx->dev; /* Config input reference clock frequency 27MHz/19.2MHz */ ret = anx7625_write_and(ctx, ctx->i2c.rx_p1_client, MIPI_DIGITAL_PLL_16, ~(REF_CLK_27000KHZ << MIPI_FREF_D_IND)); ret |= anx7625_write_or(ctx, ctx->i2c.rx_p1_client, MIPI_DIGITAL_PLL_16, (REF_CLK_27000KHZ << MIPI_FREF_D_IND)); /* Post divider */ ret |= anx7625_write_and(ctx, ctx->i2c.rx_p1_client, MIPI_DIGITAL_PLL_8, 0x0f); ret |= anx7625_write_or(ctx, ctx->i2c.rx_p1_client, MIPI_DIGITAL_PLL_8, post_divider << 4); /* Add patch for MIS2-125 (5pcs ANX7625 fail ATE MBIST test) */ ret |= anx7625_write_and(ctx, ctx->i2c.rx_p1_client, MIPI_DIGITAL_PLL_7, ~MIPI_PLL_VCO_TUNE_REG_VAL); /* Reset ODFC PLL */ ret |= anx7625_write_and(ctx, ctx->i2c.rx_p1_client, MIPI_DIGITAL_PLL_7, ~MIPI_PLL_RESET_N); ret |= anx7625_write_or(ctx, ctx->i2c.rx_p1_client, MIPI_DIGITAL_PLL_7, MIPI_PLL_RESET_N); if (ret < 0) DRM_DEV_ERROR(dev, "IO error.\n"); return ret; } /* * The MIPI source video data exist large variation (e.g. 59Hz ~ 61Hz), * anx7625 defined K ratio for matching MIPI input video clock and * DP output video clock. Increase K value can match bigger video data * variation. IVO panel has small variation than DP CTS spec, need * decrease the K value. */ static int anx7625_set_k_value(struct anx7625_data *ctx) { struct edid *edid = (struct edid *)ctx->slimport_edid_p.edid_raw_data; if (edid->mfg_id[0] == IVO_MID0 && edid->mfg_id[1] == IVO_MID1) return anx7625_reg_write(ctx, ctx->i2c.rx_p1_client, MIPI_DIGITAL_ADJ_1, 0x3B); return anx7625_reg_write(ctx, ctx->i2c.rx_p1_client, MIPI_DIGITAL_ADJ_1, 0x3D); } static int anx7625_dsi_video_timing_config(struct anx7625_data *ctx) { struct device *dev = ctx->dev; unsigned long m, n; u16 htotal; int ret; u8 post_divider = 0; ret = anx7625_calculate_m_n(ctx->dt.pixelclock.min * 1000, &m, &n, &post_divider); if (ret) { DRM_DEV_ERROR(dev, "cannot get property m n value.\n"); return ret; } DRM_DEV_DEBUG_DRIVER(dev, "compute M(%lu), N(%lu), divider(%d).\n", m, n, post_divider); /* Configure pixel clock */ ret = anx7625_reg_write(ctx, ctx->i2c.rx_p0_client, PIXEL_CLOCK_L, (ctx->dt.pixelclock.min / 1000) & 0xFF); ret |= anx7625_reg_write(ctx, ctx->i2c.rx_p0_client, PIXEL_CLOCK_H, (ctx->dt.pixelclock.min / 1000) >> 8); /* Lane count */ ret |= anx7625_write_and(ctx, ctx->i2c.rx_p1_client, MIPI_LANE_CTRL_0, 0xfc); ret |= anx7625_write_or(ctx, ctx->i2c.rx_p1_client, MIPI_LANE_CTRL_0, ctx->pdata.mipi_lanes - 1); /* Htotal */ htotal = ctx->dt.hactive.min + ctx->dt.hfront_porch.min + ctx->dt.hback_porch.min + ctx->dt.hsync_len.min; ret |= anx7625_reg_write(ctx, ctx->i2c.rx_p2_client, HORIZONTAL_TOTAL_PIXELS_L, htotal & 0xFF); ret |= anx7625_reg_write(ctx, ctx->i2c.rx_p2_client, HORIZONTAL_TOTAL_PIXELS_H, htotal >> 8); /* Hactive */ ret |= anx7625_reg_write(ctx, ctx->i2c.rx_p2_client, HORIZONTAL_ACTIVE_PIXELS_L, ctx->dt.hactive.min & 0xFF); ret |= anx7625_reg_write(ctx, ctx->i2c.rx_p2_client, HORIZONTAL_ACTIVE_PIXELS_H, ctx->dt.hactive.min >> 8); /* HFP */ ret |= anx7625_reg_write(ctx, ctx->i2c.rx_p2_client, HORIZONTAL_FRONT_PORCH_L, ctx->dt.hfront_porch.min); ret |= anx7625_reg_write(ctx, ctx->i2c.rx_p2_client, HORIZONTAL_FRONT_PORCH_H, ctx->dt.hfront_porch.min >> 8); /* HWS */ ret |= anx7625_reg_write(ctx, ctx->i2c.rx_p2_client, HORIZONTAL_SYNC_WIDTH_L, ctx->dt.hsync_len.min); ret |= anx7625_reg_write(ctx, ctx->i2c.rx_p2_client, HORIZONTAL_SYNC_WIDTH_H, ctx->dt.hsync_len.min >> 8); /* HBP */ ret |= anx7625_reg_write(ctx, ctx->i2c.rx_p2_client, HORIZONTAL_BACK_PORCH_L, ctx->dt.hback_porch.min); ret |= anx7625_reg_write(ctx, ctx->i2c.rx_p2_client, HORIZONTAL_BACK_PORCH_H, ctx->dt.hback_porch.min >> 8); /* Vactive */ ret |= anx7625_reg_write(ctx, ctx->i2c.rx_p2_client, ACTIVE_LINES_L, ctx->dt.vactive.min); ret |= anx7625_reg_write(ctx, ctx->i2c.rx_p2_client, ACTIVE_LINES_H, ctx->dt.vactive.min >> 8); /* VFP */ ret |= anx7625_reg_write(ctx, ctx->i2c.rx_p2_client, VERTICAL_FRONT_PORCH, ctx->dt.vfront_porch.min); /* VWS */ ret |= anx7625_reg_write(ctx, ctx->i2c.rx_p2_client, VERTICAL_SYNC_WIDTH, ctx->dt.vsync_len.min); /* VBP */ ret |= anx7625_reg_write(ctx, ctx->i2c.rx_p2_client, VERTICAL_BACK_PORCH, ctx->dt.vback_porch.min); /* M value */ ret |= anx7625_reg_write(ctx, ctx->i2c.rx_p1_client, MIPI_PLL_M_NUM_23_16, (m >> 16) & 0xff); ret |= anx7625_reg_write(ctx, ctx->i2c.rx_p1_client, MIPI_PLL_M_NUM_15_8, (m >> 8) & 0xff); ret |= anx7625_reg_write(ctx, ctx->i2c.rx_p1_client, MIPI_PLL_M_NUM_7_0, (m & 0xff)); /* N value */ ret |= anx7625_reg_write(ctx, ctx->i2c.rx_p1_client, MIPI_PLL_N_NUM_23_16, (n >> 16) & 0xff); ret |= anx7625_reg_write(ctx, ctx->i2c.rx_p1_client, MIPI_PLL_N_NUM_15_8, (n >> 8) & 0xff); ret |= anx7625_reg_write(ctx, ctx->i2c.rx_p1_client, MIPI_PLL_N_NUM_7_0, (n & 0xff)); anx7625_set_k_value(ctx); ret |= anx7625_odfc_config(ctx, post_divider - 1); if (ret < 0) DRM_DEV_ERROR(dev, "mipi dsi setup IO error.\n"); return ret; } static int anx7625_swap_dsi_lane3(struct anx7625_data *ctx) { int val; struct device *dev = ctx->dev; /* Swap MIPI-DSI data lane 3 P and N */ val = anx7625_reg_read(ctx, ctx->i2c.rx_p1_client, MIPI_SWAP); if (val < 0) { DRM_DEV_ERROR(dev, "IO error : access MIPI_SWAP.\n"); return -EIO; } val |= (1 << MIPI_SWAP_CH3); return anx7625_reg_write(ctx, ctx->i2c.rx_p1_client, MIPI_SWAP, val); } static int anx7625_api_dsi_config(struct anx7625_data *ctx) { int val, ret; struct device *dev = ctx->dev; /* Swap MIPI-DSI data lane 3 P and N */ ret = anx7625_swap_dsi_lane3(ctx); if (ret < 0) { DRM_DEV_ERROR(dev, "IO error : swap dsi lane 3 fail.\n"); return ret; } /* DSI clock settings */ val = (0 << MIPI_HS_PWD_CLK) | (0 << MIPI_HS_RT_CLK) | (0 << MIPI_PD_CLK) | (1 << MIPI_CLK_RT_MANUAL_PD_EN) | (1 << MIPI_CLK_HS_MANUAL_PD_EN) | (0 << MIPI_CLK_DET_DET_BYPASS) | (0 << MIPI_CLK_MISS_CTRL) | (0 << MIPI_PD_LPTX_CH_MANUAL_PD_EN); ret = anx7625_reg_write(ctx, ctx->i2c.rx_p1_client, MIPI_PHY_CONTROL_3, val); /* * Decreased HS prepare timing delay from 160ns to 80ns work with * a) Dragon board 810 series (Qualcomm AP) * b) Moving Pixel DSI source (PG3A pattern generator + * P332 D-PHY Probe) default D-PHY timing * 5ns/step */ ret |= anx7625_reg_write(ctx, ctx->i2c.rx_p1_client, MIPI_TIME_HS_PRPR, 0x10); /* Enable DSI mode*/ ret |= anx7625_write_or(ctx, ctx->i2c.rx_p1_client, MIPI_DIGITAL_PLL_18, SELECT_DSI << MIPI_DPI_SELECT); ret |= anx7625_dsi_video_timing_config(ctx); if (ret < 0) { DRM_DEV_ERROR(dev, "dsi video timing config fail\n"); return ret; } /* Toggle m, n ready */ ret = anx7625_write_and(ctx, ctx->i2c.rx_p1_client, MIPI_DIGITAL_PLL_6, ~(MIPI_M_NUM_READY | MIPI_N_NUM_READY)); usleep_range(1000, 1100); ret |= anx7625_write_or(ctx, ctx->i2c.rx_p1_client, MIPI_DIGITAL_PLL_6, MIPI_M_NUM_READY | MIPI_N_NUM_READY); /* Configure integer stable register */ ret |= anx7625_reg_write(ctx, ctx->i2c.rx_p1_client, MIPI_VIDEO_STABLE_CNT, 0x02); /* Power on MIPI RX */ ret |= anx7625_reg_write(ctx, ctx->i2c.rx_p1_client, MIPI_LANE_CTRL_10, 0x00); ret |= anx7625_reg_write(ctx, ctx->i2c.rx_p1_client, MIPI_LANE_CTRL_10, 0x80); if (ret < 0) DRM_DEV_ERROR(dev, "IO error : mipi dsi enable init fail.\n"); return ret; } static int anx7625_dsi_config(struct anx7625_data *ctx) { struct device *dev = ctx->dev; int ret; DRM_DEV_DEBUG_DRIVER(dev, "config dsi.\n"); /* DSC disable */ ret = anx7625_write_and(ctx, ctx->i2c.rx_p0_client, R_DSC_CTRL_0, ~DSC_EN); ret |= anx7625_api_dsi_config(ctx); if (ret < 0) { DRM_DEV_ERROR(dev, "IO error : api dsi config error.\n"); return ret; } /* Set MIPI RX EN */ ret = anx7625_write_or(ctx, ctx->i2c.rx_p0_client, AP_AV_STATUS, AP_MIPI_RX_EN); /* Clear mute flag */ ret |= anx7625_write_and(ctx, ctx->i2c.rx_p0_client, AP_AV_STATUS, (u8)~AP_MIPI_MUTE); if (ret < 0) DRM_DEV_ERROR(dev, "IO error : enable mipi rx fail.\n"); else DRM_DEV_DEBUG_DRIVER(dev, "success to config DSI\n"); return ret; } static int anx7625_api_dpi_config(struct anx7625_data *ctx) { struct device *dev = ctx->dev; u16 freq = ctx->dt.pixelclock.min / 1000; int ret; /* configure pixel clock */ ret = anx7625_reg_write(ctx, ctx->i2c.rx_p0_client, PIXEL_CLOCK_L, freq & 0xFF); ret |= anx7625_reg_write(ctx, ctx->i2c.rx_p0_client, PIXEL_CLOCK_H, (freq >> 8)); /* set DPI mode */ /* set to DPI PLL module sel */ ret |= anx7625_reg_write(ctx, ctx->i2c.rx_p1_client, MIPI_DIGITAL_PLL_9, 0x20); /* power down MIPI */ ret |= anx7625_reg_write(ctx, ctx->i2c.rx_p1_client, MIPI_LANE_CTRL_10, 0x08); /* enable DPI mode */ ret |= anx7625_reg_write(ctx, ctx->i2c.rx_p1_client, MIPI_DIGITAL_PLL_18, 0x1C); /* set first edge */ ret |= anx7625_reg_write(ctx, ctx->i2c.tx_p2_client, VIDEO_CONTROL_0, 0x06); if (ret < 0) DRM_DEV_ERROR(dev, "IO error : dpi phy set failed.\n"); return ret; } static int anx7625_dpi_config(struct anx7625_data *ctx) { struct device *dev = ctx->dev; int ret; DRM_DEV_DEBUG_DRIVER(dev, "config dpi\n"); /* DSC disable */ ret = anx7625_write_and(ctx, ctx->i2c.rx_p0_client, R_DSC_CTRL_0, ~DSC_EN); if (ret < 0) { DRM_DEV_ERROR(dev, "IO error : disable dsc failed.\n"); return ret; } ret = anx7625_config_bit_matrix(ctx); if (ret < 0) { DRM_DEV_ERROR(dev, "config bit matrix failed.\n"); return ret; } ret = anx7625_api_dpi_config(ctx); if (ret < 0) { DRM_DEV_ERROR(dev, "mipi phy(dpi) setup failed.\n"); return ret; } /* set MIPI RX EN */ ret = anx7625_write_or(ctx, ctx->i2c.rx_p0_client, AP_AV_STATUS, AP_MIPI_RX_EN); /* clear mute flag */ ret |= anx7625_write_and(ctx, ctx->i2c.rx_p0_client, AP_AV_STATUS, (u8)~AP_MIPI_MUTE); if (ret < 0) DRM_DEV_ERROR(dev, "IO error : enable mipi rx failed.\n"); return ret; } static int anx7625_read_flash_status(struct anx7625_data *ctx) { return anx7625_reg_read(ctx, ctx->i2c.rx_p0_client, R_RAM_CTRL); } static int anx7625_hdcp_key_probe(struct anx7625_data *ctx) { int ret, val; struct device *dev = ctx->dev; u8 ident[FLASH_BUF_LEN]; ret = anx7625_reg_write(ctx, ctx->i2c.rx_p0_client, FLASH_ADDR_HIGH, 0x91); ret |= anx7625_reg_write(ctx, ctx->i2c.rx_p0_client, FLASH_ADDR_LOW, 0xA0); if (ret < 0) { dev_err(dev, "IO error : set key flash address.\n"); return ret; } ret = anx7625_reg_write(ctx, ctx->i2c.rx_p0_client, FLASH_LEN_HIGH, (FLASH_BUF_LEN - 1) >> 8); ret |= anx7625_reg_write(ctx, ctx->i2c.rx_p0_client, FLASH_LEN_LOW, (FLASH_BUF_LEN - 1) & 0xFF); if (ret < 0) { dev_err(dev, "IO error : set key flash len.\n"); return ret; } ret = anx7625_reg_write(ctx, ctx->i2c.rx_p0_client, R_FLASH_RW_CTRL, FLASH_READ); ret |= readx_poll_timeout(anx7625_read_flash_status, ctx, val, ((val & FLASH_DONE) || (val < 0)), 2000, 2000 * 150); if (ret) { dev_err(dev, "flash read access fail!\n"); return -EIO; } ret = anx7625_reg_block_read(ctx, ctx->i2c.rx_p0_client, FLASH_BUF_BASE_ADDR, FLASH_BUF_LEN, ident); if (ret < 0) { dev_err(dev, "read flash data fail!\n"); return -EIO; } if (ident[29] == 0xFF && ident[30] == 0xFF && ident[31] == 0xFF) return -EINVAL; return 0; } static int anx7625_hdcp_key_load(struct anx7625_data *ctx) { int ret; struct device *dev = ctx->dev; /* Select HDCP 1.4 KEY */ ret = anx7625_reg_write(ctx, ctx->i2c.rx_p0_client, R_BOOT_RETRY, 0x12); ret |= anx7625_reg_write(ctx, ctx->i2c.rx_p0_client, FLASH_ADDR_HIGH, HDCP14KEY_START_ADDR >> 8); ret |= anx7625_reg_write(ctx, ctx->i2c.rx_p0_client, FLASH_ADDR_LOW, HDCP14KEY_START_ADDR & 0xFF); ret |= anx7625_reg_write(ctx, ctx->i2c.rx_p0_client, R_RAM_LEN_H, HDCP14KEY_SIZE >> 12); ret |= anx7625_reg_write(ctx, ctx->i2c.rx_p0_client, R_RAM_LEN_L, HDCP14KEY_SIZE >> 4); ret |= anx7625_reg_write(ctx, ctx->i2c.rx_p0_client, R_RAM_ADDR_H, 0); ret |= anx7625_reg_write(ctx, ctx->i2c.rx_p0_client, R_RAM_ADDR_L, 0); /* Enable HDCP 1.4 KEY load */ ret |= anx7625_reg_write(ctx, ctx->i2c.rx_p0_client, R_RAM_CTRL, DECRYPT_EN | LOAD_START); dev_dbg(dev, "load HDCP 1.4 key done\n"); return ret; } static int anx7625_hdcp_disable(struct anx7625_data *ctx) { int ret; struct device *dev = ctx->dev; dev_dbg(dev, "disable HDCP 1.4\n"); /* Disable HDCP */ ret = anx7625_write_and(ctx, ctx->i2c.rx_p1_client, 0xee, 0x9f); /* Try auth flag */ ret |= anx7625_write_or(ctx, ctx->i2c.rx_p1_client, 0xec, 0x10); /* Interrupt for DRM */ ret |= anx7625_write_or(ctx, ctx->i2c.rx_p1_client, 0xff, 0x01); if (ret < 0) dev_err(dev, "fail to disable HDCP\n"); return anx7625_write_and(ctx, ctx->i2c.tx_p0_client, TX_HDCP_CTRL0, ~HARD_AUTH_EN & 0xFF); } static int anx7625_hdcp_enable(struct anx7625_data *ctx) { u8 bcap; int ret; struct device *dev = ctx->dev; ret = anx7625_hdcp_key_probe(ctx); if (ret) { dev_dbg(dev, "no key found, not to do hdcp\n"); return ret; } /* Read downstream capability */ ret = anx7625_aux_trans(ctx, DP_AUX_NATIVE_READ, DP_AUX_HDCP_BCAPS, 1, &bcap); if (ret < 0) return ret; if (!(bcap & DP_BCAPS_HDCP_CAPABLE)) { pr_warn("downstream not support HDCP 1.4, cap(%x).\n", bcap); return 0; } dev_dbg(dev, "enable HDCP 1.4\n"); /* First clear HDCP state */ ret = anx7625_reg_write(ctx, ctx->i2c.tx_p0_client, TX_HDCP_CTRL0, KSVLIST_VLD | BKSV_SRM_PASS | RE_AUTHEN); usleep_range(1000, 1100); /* Second clear HDCP state */ ret |= anx7625_reg_write(ctx, ctx->i2c.tx_p0_client, TX_HDCP_CTRL0, KSVLIST_VLD | BKSV_SRM_PASS | RE_AUTHEN); /* Set time for waiting KSVR */ ret |= anx7625_reg_write(ctx, ctx->i2c.tx_p0_client, SP_TX_WAIT_KSVR_TIME, 0xc8); /* Set time for waiting R0 */ ret |= anx7625_reg_write(ctx, ctx->i2c.tx_p0_client, SP_TX_WAIT_R0_TIME, 0xb0); ret |= anx7625_hdcp_key_load(ctx); if (ret) { pr_warn("prepare HDCP key failed.\n"); return ret; } ret = anx7625_write_or(ctx, ctx->i2c.rx_p1_client, 0xee, 0x20); /* Try auth flag */ ret |= anx7625_write_or(ctx, ctx->i2c.rx_p1_client, 0xec, 0x10); /* Interrupt for DRM */ ret |= anx7625_write_or(ctx, ctx->i2c.rx_p1_client, 0xff, 0x01); if (ret < 0) dev_err(dev, "fail to enable HDCP\n"); return anx7625_write_or(ctx, ctx->i2c.tx_p0_client, TX_HDCP_CTRL0, HARD_AUTH_EN); } static void anx7625_dp_start(struct anx7625_data *ctx) { int ret; struct device *dev = ctx->dev; u8 data; if (!ctx->display_timing_valid) { DRM_DEV_ERROR(dev, "mipi not set display timing yet.\n"); return; } dev_dbg(dev, "set downstream sink into normal\n"); /* Downstream sink enter into normal mode */ data = DP_SET_POWER_D0; ret = anx7625_aux_trans(ctx, DP_AUX_NATIVE_WRITE, DP_SET_POWER, 1, &data); if (ret < 0) dev_err(dev, "IO error : set sink into normal mode fail\n"); /* Disable HDCP */ anx7625_write_and(ctx, ctx->i2c.rx_p1_client, 0xee, 0x9f); if (ctx->pdata.is_dpi) ret = anx7625_dpi_config(ctx); else ret = anx7625_dsi_config(ctx); if (ret < 0) DRM_DEV_ERROR(dev, "MIPI phy setup error.\n"); ctx->hdcp_cp = DRM_MODE_CONTENT_PROTECTION_UNDESIRED; ctx->dp_en = 1; } static void anx7625_dp_stop(struct anx7625_data *ctx) { struct device *dev = ctx->dev; int ret; u8 data; DRM_DEV_DEBUG_DRIVER(dev, "stop dp output\n"); /* * Video disable: 0x72:08 bit 7 = 0; * Audio disable: 0x70:87 bit 0 = 0; */ ret = anx7625_write_and(ctx, ctx->i2c.tx_p0_client, 0x87, 0xfe); ret |= anx7625_write_and(ctx, ctx->i2c.tx_p2_client, 0x08, 0x7f); ret |= anx7625_video_mute_control(ctx, 1); dev_dbg(dev, "notify downstream enter into standby\n"); /* Downstream monitor enter into standby mode */ data = DP_SET_POWER_D3; ret |= anx7625_aux_trans(ctx, DP_AUX_NATIVE_WRITE, DP_SET_POWER, 1, &data); if (ret < 0) DRM_DEV_ERROR(dev, "IO error : mute video fail\n"); ctx->hdcp_cp = DRM_MODE_CONTENT_PROTECTION_UNDESIRED; ctx->dp_en = 0; } static int sp_tx_rst_aux(struct anx7625_data *ctx) { int ret; ret = anx7625_write_or(ctx, ctx->i2c.tx_p2_client, RST_CTRL2, AUX_RST); ret |= anx7625_write_and(ctx, ctx->i2c.tx_p2_client, RST_CTRL2, ~AUX_RST); return ret; } static int sp_tx_aux_wr(struct anx7625_data *ctx, u8 offset) { int ret; ret = anx7625_reg_write(ctx, ctx->i2c.rx_p0_client, AP_AUX_BUFF_START, offset); ret |= anx7625_reg_write(ctx, ctx->i2c.rx_p0_client, AP_AUX_COMMAND, 0x04); ret |= anx7625_write_or(ctx, ctx->i2c.rx_p0_client, AP_AUX_CTRL_STATUS, AP_AUX_CTRL_OP_EN); return (ret | wait_aux_op_finish(ctx)); } static int sp_tx_aux_rd(struct anx7625_data *ctx, u8 len_cmd) { int ret; ret = anx7625_reg_write(ctx, ctx->i2c.rx_p0_client, AP_AUX_COMMAND, len_cmd); ret |= anx7625_write_or(ctx, ctx->i2c.rx_p0_client, AP_AUX_CTRL_STATUS, AP_AUX_CTRL_OP_EN); return (ret | wait_aux_op_finish(ctx)); } static int sp_tx_get_edid_block(struct anx7625_data *ctx) { int c = 0; struct device *dev = ctx->dev; sp_tx_aux_wr(ctx, 0x7e); sp_tx_aux_rd(ctx, 0x01); c = anx7625_reg_read(ctx, ctx->i2c.rx_p0_client, AP_AUX_BUFF_START); if (c < 0) { DRM_DEV_ERROR(dev, "IO error : access AUX BUFF.\n"); return -EIO; } DRM_DEV_DEBUG_DRIVER(dev, " EDID Block = %d\n", c + 1); if (c > MAX_EDID_BLOCK) c = 1; return c; } static int edid_read(struct anx7625_data *ctx, u8 offset, u8 *pblock_buf) { int ret, cnt; struct device *dev = ctx->dev; for (cnt = 0; cnt <= EDID_TRY_CNT; cnt++) { sp_tx_aux_wr(ctx, offset); /* Set I2C read com 0x01 mot = 0 and read 16 bytes */ ret = sp_tx_aux_rd(ctx, 0xf1); if (ret) { ret = sp_tx_rst_aux(ctx); DRM_DEV_DEBUG_DRIVER(dev, "edid read fail, reset!\n"); } else { ret = anx7625_reg_block_read(ctx, ctx->i2c.rx_p0_client, AP_AUX_BUFF_START, MAX_DPCD_BUFFER_SIZE, pblock_buf); if (ret > 0) break; } } if (cnt > EDID_TRY_CNT) return -EIO; return ret; } static int segments_edid_read(struct anx7625_data *ctx, u8 segment, u8 *buf, u8 offset) { u8 cnt; int ret; struct device *dev = ctx->dev; /* Write address only */ ret = anx7625_reg_write(ctx, ctx->i2c.rx_p0_client, AP_AUX_ADDR_7_0, 0x30); ret |= anx7625_reg_write(ctx, ctx->i2c.rx_p0_client, AP_AUX_COMMAND, 0x04); ret |= anx7625_reg_write(ctx, ctx->i2c.rx_p0_client, AP_AUX_CTRL_STATUS, AP_AUX_CTRL_ADDRONLY | AP_AUX_CTRL_OP_EN); ret |= wait_aux_op_finish(ctx); /* Write segment address */ ret |= sp_tx_aux_wr(ctx, segment); /* Data read */ ret |= anx7625_reg_write(ctx, ctx->i2c.rx_p0_client, AP_AUX_ADDR_7_0, 0x50); if (ret) { DRM_DEV_ERROR(dev, "IO error : aux initial fail.\n"); return ret; } for (cnt = 0; cnt <= EDID_TRY_CNT; cnt++) { sp_tx_aux_wr(ctx, offset); /* Set I2C read com 0x01 mot = 0 and read 16 bytes */ ret = sp_tx_aux_rd(ctx, 0xf1); if (ret) { ret = sp_tx_rst_aux(ctx); DRM_DEV_ERROR(dev, "segment read fail, reset!\n"); } else { ret = anx7625_reg_block_read(ctx, ctx->i2c.rx_p0_client, AP_AUX_BUFF_START, MAX_DPCD_BUFFER_SIZE, buf); if (ret > 0) break; } } if (cnt > EDID_TRY_CNT) return -EIO; return ret; } static int sp_tx_edid_read(struct anx7625_data *ctx, u8 *pedid_blocks_buf) { u8 offset; int edid_pos; int count, blocks_num; u8 pblock_buf[MAX_DPCD_BUFFER_SIZE]; u8 i, j; int g_edid_break = 0; int ret; struct device *dev = ctx->dev; /* Address initial */ ret = anx7625_reg_write(ctx, ctx->i2c.rx_p0_client, AP_AUX_ADDR_7_0, 0x50); ret |= anx7625_reg_write(ctx, ctx->i2c.rx_p0_client, AP_AUX_ADDR_15_8, 0); ret |= anx7625_write_and(ctx, ctx->i2c.rx_p0_client, AP_AUX_ADDR_19_16, 0xf0); if (ret < 0) { DRM_DEV_ERROR(dev, "access aux channel IO error.\n"); return -EIO; } blocks_num = sp_tx_get_edid_block(ctx); if (blocks_num < 0) return blocks_num; count = 0; do { switch (count) { case 0: case 1: for (i = 0; i < 8; i++) { offset = (i + count * 8) * MAX_DPCD_BUFFER_SIZE; g_edid_break = edid_read(ctx, offset, pblock_buf); if (g_edid_break < 0) break; memcpy(&pedid_blocks_buf[offset], pblock_buf, MAX_DPCD_BUFFER_SIZE); } break; case 2: offset = 0x00; for (j = 0; j < 8; j++) { edid_pos = (j + count * 8) * MAX_DPCD_BUFFER_SIZE; if (g_edid_break == 1) break; ret = segments_edid_read(ctx, count / 2, pblock_buf, offset); if (ret < 0) return ret; memcpy(&pedid_blocks_buf[edid_pos], pblock_buf, MAX_DPCD_BUFFER_SIZE); offset = offset + 0x10; } break; case 3: offset = 0x80; for (j = 0; j < 8; j++) { edid_pos = (j + count * 8) * MAX_DPCD_BUFFER_SIZE; if (g_edid_break == 1) break; ret = segments_edid_read(ctx, count / 2, pblock_buf, offset); if (ret < 0) return ret; memcpy(&pedid_blocks_buf[edid_pos], pblock_buf, MAX_DPCD_BUFFER_SIZE); offset = offset + 0x10; } break; default: break; } count++; } while (blocks_num >= count); /* Check edid data */ if (!drm_edid_is_valid((struct edid *)pedid_blocks_buf)) { DRM_DEV_ERROR(dev, "WARNING! edid check fail!\n"); return -EINVAL; } /* Reset aux channel */ ret = sp_tx_rst_aux(ctx); if (ret < 0) { DRM_DEV_ERROR(dev, "Failed to reset aux channel!\n"); return ret; } return (blocks_num + 1); } static void anx7625_power_on(struct anx7625_data *ctx) { struct device *dev = ctx->dev; int ret, i; if (!ctx->pdata.low_power_mode) { DRM_DEV_DEBUG_DRIVER(dev, "not low power mode!\n"); return; } for (i = 0; i < ARRAY_SIZE(ctx->pdata.supplies); i++) { ret = regulator_enable(ctx->pdata.supplies[i].consumer); if (ret < 0) { DRM_DEV_DEBUG_DRIVER(dev, "cannot enable supply %d: %d\n", i, ret); goto reg_err; } usleep_range(2000, 2100); } usleep_range(11000, 12000); /* Power on pin enable */ gpiod_set_value(ctx->pdata.gpio_p_on, 1); usleep_range(10000, 11000); /* Power reset pin enable */ gpiod_set_value(ctx->pdata.gpio_reset, 1); usleep_range(10000, 11000); DRM_DEV_DEBUG_DRIVER(dev, "power on !\n"); return; reg_err: for (--i; i >= 0; i--) regulator_disable(ctx->pdata.supplies[i].consumer); } static void anx7625_power_standby(struct anx7625_data *ctx) { struct device *dev = ctx->dev; int ret; if (!ctx->pdata.low_power_mode) { DRM_DEV_DEBUG_DRIVER(dev, "not low power mode!\n"); return; } gpiod_set_value(ctx->pdata.gpio_reset, 0); usleep_range(1000, 1100); gpiod_set_value(ctx->pdata.gpio_p_on, 0); usleep_range(1000, 1100); ret = regulator_bulk_disable(ARRAY_SIZE(ctx->pdata.supplies), ctx->pdata.supplies); if (ret < 0) DRM_DEV_DEBUG_DRIVER(dev, "cannot disable supplies %d\n", ret); DRM_DEV_DEBUG_DRIVER(dev, "power down\n"); } /* Basic configurations of ANX7625 */ static void anx7625_config(struct anx7625_data *ctx) { anx7625_reg_write(ctx, ctx->i2c.rx_p0_client, XTAL_FRQ_SEL, XTAL_FRQ_27M); } static void anx7625_disable_pd_protocol(struct anx7625_data *ctx) { struct device *dev = ctx->dev; int ret; /* Reset main ocm */ ret = anx7625_reg_write(ctx, ctx->i2c.rx_p0_client, 0x88, 0x40); /* Disable PD */ ret |= anx7625_reg_write(ctx, ctx->i2c.rx_p0_client, AP_AV_STATUS, AP_DISABLE_PD); /* Release main ocm */ ret |= anx7625_reg_write(ctx, ctx->i2c.rx_p0_client, 0x88, 0x00); if (ret < 0) DRM_DEV_DEBUG_DRIVER(dev, "disable PD feature fail.\n"); else DRM_DEV_DEBUG_DRIVER(dev, "disable PD feature succeeded.\n"); } static int anx7625_ocm_loading_check(struct anx7625_data *ctx) { int ret; struct device *dev = ctx->dev; /* Check interface workable */ ret = anx7625_reg_read(ctx, ctx->i2c.rx_p0_client, FLASH_LOAD_STA); if (ret < 0) { DRM_DEV_ERROR(dev, "IO error : access flash load.\n"); return ret; } if ((ret & FLASH_LOAD_STA_CHK) != FLASH_LOAD_STA_CHK) return -ENODEV; anx7625_disable_pd_protocol(ctx); DRM_DEV_DEBUG_DRIVER(dev, "Firmware ver %02x%02x,", anx7625_reg_read(ctx, ctx->i2c.rx_p0_client, OCM_FW_VERSION), anx7625_reg_read(ctx, ctx->i2c.rx_p0_client, OCM_FW_REVERSION)); DRM_DEV_DEBUG_DRIVER(dev, "Driver version %s\n", ANX7625_DRV_VERSION); return 0; } static void anx7625_power_on_init(struct anx7625_data *ctx) { int retry_count, i; for (retry_count = 0; retry_count < 3; retry_count++) { anx7625_power_on(ctx); anx7625_config(ctx); for (i = 0; i < OCM_LOADING_TIME; i++) { if (!anx7625_ocm_loading_check(ctx)) return; usleep_range(1000, 1100); } anx7625_power_standby(ctx); } } static void anx7625_init_gpio(struct anx7625_data *platform) { struct device *dev = platform->dev; DRM_DEV_DEBUG_DRIVER(dev, "init gpio\n"); /* Gpio for chip power enable */ platform->pdata.gpio_p_on = devm_gpiod_get_optional(dev, "enable", GPIOD_OUT_LOW); if (IS_ERR_OR_NULL(platform->pdata.gpio_p_on)) { DRM_DEV_DEBUG_DRIVER(dev, "no enable gpio found\n"); platform->pdata.gpio_p_on = NULL; } /* Gpio for chip reset */ platform->pdata.gpio_reset = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_LOW); if (IS_ERR_OR_NULL(platform->pdata.gpio_reset)) { DRM_DEV_DEBUG_DRIVER(dev, "no reset gpio found\n"); platform->pdata.gpio_reset = NULL; } if (platform->pdata.gpio_p_on && platform->pdata.gpio_reset) { platform->pdata.low_power_mode = 1; DRM_DEV_DEBUG_DRIVER(dev, "low power mode, pon %d, reset %d.\n", desc_to_gpio(platform->pdata.gpio_p_on), desc_to_gpio(platform->pdata.gpio_reset)); } else { platform->pdata.low_power_mode = 0; DRM_DEV_DEBUG_DRIVER(dev, "not low power mode.\n"); } } static void anx7625_stop_dp_work(struct anx7625_data *ctx) { ctx->hpd_status = 0; ctx->hpd_high_cnt = 0; } static void anx7625_start_dp_work(struct anx7625_data *ctx) { int ret; struct device *dev = ctx->dev; if (ctx->hpd_high_cnt >= 2) { DRM_DEV_DEBUG_DRIVER(dev, "filter useless HPD\n"); return; } ctx->hpd_status = 1; ctx->hpd_high_cnt++; /* Not support HDCP */ ret = anx7625_write_and(ctx, ctx->i2c.rx_p1_client, 0xee, 0x9f); /* Try auth flag */ ret |= anx7625_write_or(ctx, ctx->i2c.rx_p1_client, 0xec, 0x10); /* Interrupt for DRM */ ret |= anx7625_write_or(ctx, ctx->i2c.rx_p1_client, 0xff, 0x01); if (ret < 0) { DRM_DEV_ERROR(dev, "fail to setting HDCP/auth\n"); return; } ret = anx7625_reg_read(ctx, ctx->i2c.rx_p1_client, 0x86); if (ret < 0) return; DRM_DEV_DEBUG_DRIVER(dev, "Secure OCM version=%02x\n", ret); } static int anx7625_read_hpd_status_p0(struct anx7625_data *ctx) { int ret; /* Set irq detect window to 2ms */ ret = anx7625_reg_write(ctx, ctx->i2c.tx_p2_client, HPD_DET_TIMER_BIT0_7, HPD_TIME & 0xFF); ret |= anx7625_reg_write(ctx, ctx->i2c.tx_p2_client, HPD_DET_TIMER_BIT8_15, (HPD_TIME >> 8) & 0xFF); ret |= anx7625_reg_write(ctx, ctx->i2c.tx_p2_client, HPD_DET_TIMER_BIT16_23, (HPD_TIME >> 16) & 0xFF); if (ret < 0) return ret; return anx7625_reg_read(ctx, ctx->i2c.rx_p0_client, SYSTEM_STSTUS); } static int _anx7625_hpd_polling(struct anx7625_data *ctx, unsigned long wait_us) { int ret, val; struct device *dev = ctx->dev; /* Interrupt mode, no need poll HPD status, just return */ if (ctx->pdata.intp_irq) return 0; ret = readx_poll_timeout(anx7625_read_hpd_status_p0, ctx, val, ((val & HPD_STATUS) || (val < 0)), wait_us / 100, wait_us); if (ret) { DRM_DEV_ERROR(dev, "no hpd.\n"); return ret; } DRM_DEV_DEBUG_DRIVER(dev, "system status: 0x%x. HPD raise up.\n", val); anx7625_reg_write(ctx, ctx->i2c.tcpc_client, INTR_ALERT_1, 0xFF); anx7625_reg_write(ctx, ctx->i2c.rx_p0_client, INTERFACE_CHANGE_INT, 0); anx7625_start_dp_work(ctx); if (!ctx->pdata.panel_bridge && ctx->bridge_attached) drm_helper_hpd_irq_event(ctx->bridge.dev); return 0; } static int anx7625_wait_hpd_asserted(struct drm_dp_aux *aux, unsigned long wait_us) { struct anx7625_data *ctx = container_of(aux, struct anx7625_data, aux); struct device *dev = ctx->dev; int ret; pm_runtime_get_sync(dev); ret = _anx7625_hpd_polling(ctx, wait_us); pm_runtime_mark_last_busy(dev); pm_runtime_put_autosuspend(dev); return ret; } static void anx7625_remove_edid(struct anx7625_data *ctx) { ctx->slimport_edid_p.edid_block_num = -1; } static void anx7625_dp_adjust_swing(struct anx7625_data *ctx) { int i; for (i = 0; i < ctx->pdata.dp_lane0_swing_reg_cnt; i++) anx7625_reg_write(ctx, ctx->i2c.tx_p1_client, DP_TX_LANE0_SWING_REG0 + i, ctx->pdata.lane0_reg_data[i]); for (i = 0; i < ctx->pdata.dp_lane1_swing_reg_cnt; i++) anx7625_reg_write(ctx, ctx->i2c.tx_p1_client, DP_TX_LANE1_SWING_REG0 + i, ctx->pdata.lane1_reg_data[i]); } static void dp_hpd_change_handler(struct anx7625_data *ctx, bool on) { struct device *dev = ctx->dev; /* HPD changed */ DRM_DEV_DEBUG_DRIVER(dev, "dp_hpd_change_default_func: %d\n", (u32)on); if (on == 0) { DRM_DEV_DEBUG_DRIVER(dev, " HPD low\n"); anx7625_remove_edid(ctx); anx7625_stop_dp_work(ctx); } else { DRM_DEV_DEBUG_DRIVER(dev, " HPD high\n"); anx7625_start_dp_work(ctx); anx7625_dp_adjust_swing(ctx); } } static int anx7625_hpd_change_detect(struct anx7625_data *ctx) { int intr_vector, status; struct device *dev = ctx->dev; status = anx7625_reg_write(ctx, ctx->i2c.tcpc_client, INTR_ALERT_1, 0xFF); if (status < 0) { DRM_DEV_ERROR(dev, "cannot clear alert reg.\n"); return status; } intr_vector = anx7625_reg_read(ctx, ctx->i2c.rx_p0_client, INTERFACE_CHANGE_INT); if (intr_vector < 0) { DRM_DEV_ERROR(dev, "cannot access interrupt change reg.\n"); return intr_vector; } DRM_DEV_DEBUG_DRIVER(dev, "0x7e:0x44=%x\n", intr_vector); status = anx7625_reg_write(ctx, ctx->i2c.rx_p0_client, INTERFACE_CHANGE_INT, intr_vector & (~intr_vector)); if (status < 0) { DRM_DEV_ERROR(dev, "cannot clear interrupt change reg.\n"); return status; } if (!(intr_vector & HPD_STATUS_CHANGE)) return -ENOENT; status = anx7625_reg_read(ctx, ctx->i2c.rx_p0_client, SYSTEM_STSTUS); if (status < 0) { DRM_DEV_ERROR(dev, "cannot clear interrupt status.\n"); return status; } DRM_DEV_DEBUG_DRIVER(dev, "0x7e:0x45=%x\n", status); dp_hpd_change_handler(ctx, status & HPD_STATUS); return 0; } static void anx7625_work_func(struct work_struct *work) { int event; struct anx7625_data *ctx = container_of(work, struct anx7625_data, work); mutex_lock(&ctx->lock); if (pm_runtime_suspended(ctx->dev)) { mutex_unlock(&ctx->lock); return; } event = anx7625_hpd_change_detect(ctx); mutex_unlock(&ctx->lock); if (event < 0) return; if (ctx->bridge_attached) drm_helper_hpd_irq_event(ctx->bridge.dev); } static irqreturn_t anx7625_intr_hpd_isr(int irq, void *data) { struct anx7625_data *ctx = (struct anx7625_data *)data; queue_work(ctx->workqueue, &ctx->work); return IRQ_HANDLED; } static int anx7625_get_swing_setting(struct device *dev, struct anx7625_platform_data *pdata) { int num_regs; if (of_get_property(dev->of_node, "analogix,lane0-swing", &num_regs)) { if (num_regs > DP_TX_SWING_REG_CNT) num_regs = DP_TX_SWING_REG_CNT; pdata->dp_lane0_swing_reg_cnt = num_regs; of_property_read_u8_array(dev->of_node, "analogix,lane0-swing", pdata->lane0_reg_data, num_regs); } if (of_get_property(dev->of_node, "analogix,lane1-swing", &num_regs)) { if (num_regs > DP_TX_SWING_REG_CNT) num_regs = DP_TX_SWING_REG_CNT; pdata->dp_lane1_swing_reg_cnt = num_regs; of_property_read_u8_array(dev->of_node, "analogix,lane1-swing", pdata->lane1_reg_data, num_regs); } return 0; } static int anx7625_parse_dt(struct device *dev, struct anx7625_platform_data *pdata) { struct device_node *np = dev->of_node, *ep0; int bus_type, mipi_lanes; anx7625_get_swing_setting(dev, pdata); pdata->is_dpi = 0; /* default dsi mode */ of_node_put(pdata->mipi_host_node); pdata->mipi_host_node = of_graph_get_remote_node(np, 0, 0); if (!pdata->mipi_host_node) { DRM_DEV_ERROR(dev, "fail to get internal panel.\n"); return -ENODEV; } bus_type = 0; mipi_lanes = MAX_LANES_SUPPORT; ep0 = of_graph_get_endpoint_by_regs(np, 0, 0); if (ep0) { if (of_property_read_u32(ep0, "bus-type", &bus_type)) bus_type = 0; mipi_lanes = drm_of_get_data_lanes_count(ep0, 1, MAX_LANES_SUPPORT); of_node_put(ep0); } if (bus_type == V4L2_FWNODE_BUS_TYPE_DPI) /* bus type is DPI */ pdata->is_dpi = 1; pdata->mipi_lanes = MAX_LANES_SUPPORT; if (mipi_lanes > 0) pdata->mipi_lanes = mipi_lanes; if (pdata->is_dpi) DRM_DEV_DEBUG_DRIVER(dev, "found MIPI DPI host node.\n"); else DRM_DEV_DEBUG_DRIVER(dev, "found MIPI DSI host node.\n"); if (of_property_read_bool(np, "analogix,audio-enable")) pdata->audio_en = 1; return 0; } static int anx7625_parse_dt_panel(struct device *dev, struct anx7625_platform_data *pdata) { struct device_node *np = dev->of_node; pdata->panel_bridge = devm_drm_of_get_bridge(dev, np, 1, 0); if (IS_ERR(pdata->panel_bridge)) { if (PTR_ERR(pdata->panel_bridge) == -ENODEV) { pdata->panel_bridge = NULL; return 0; } return PTR_ERR(pdata->panel_bridge); } DRM_DEV_DEBUG_DRIVER(dev, "get panel node.\n"); return 0; } static bool anx7625_of_panel_on_aux_bus(struct device *dev) { struct device_node *bus, *panel; bus = of_get_child_by_name(dev->of_node, "aux-bus"); if (!bus) return false; panel = of_get_child_by_name(bus, "panel"); of_node_put(bus); if (!panel) return false; of_node_put(panel); return true; } static inline struct anx7625_data *bridge_to_anx7625(struct drm_bridge *bridge) { return container_of(bridge, struct anx7625_data, bridge); } static ssize_t anx7625_aux_transfer(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg) { struct anx7625_data *ctx = container_of(aux, struct anx7625_data, aux); struct device *dev = ctx->dev; u8 request = msg->request & ~DP_AUX_I2C_MOT; int ret = 0; pm_runtime_get_sync(dev); msg->reply = 0; switch (request) { case DP_AUX_NATIVE_WRITE: case DP_AUX_I2C_WRITE: case DP_AUX_NATIVE_READ: case DP_AUX_I2C_READ: break; default: ret = -EINVAL; } if (!ret) ret = anx7625_aux_trans(ctx, msg->request, msg->address, msg->size, msg->buffer); pm_runtime_mark_last_busy(dev); pm_runtime_put_autosuspend(dev); return ret; } static struct edid *anx7625_get_edid(struct anx7625_data *ctx) { struct device *dev = ctx->dev; struct s_edid_data *p_edid = &ctx->slimport_edid_p; int edid_num; u8 *edid; edid = kmalloc(FOUR_BLOCK_SIZE, GFP_KERNEL); if (!edid) { DRM_DEV_ERROR(dev, "Fail to allocate buffer\n"); return NULL; } if (ctx->slimport_edid_p.edid_block_num > 0) { memcpy(edid, ctx->slimport_edid_p.edid_raw_data, FOUR_BLOCK_SIZE); return (struct edid *)edid; } pm_runtime_get_sync(dev); _anx7625_hpd_polling(ctx, 5000 * 100); edid_num = sp_tx_edid_read(ctx, p_edid->edid_raw_data); pm_runtime_put_sync(dev); if (edid_num < 1) { DRM_DEV_ERROR(dev, "Fail to read EDID: %d\n", edid_num); kfree(edid); return NULL; } p_edid->edid_block_num = edid_num; memcpy(edid, ctx->slimport_edid_p.edid_raw_data, FOUR_BLOCK_SIZE); return (struct edid *)edid; } static enum drm_connector_status anx7625_sink_detect(struct anx7625_data *ctx) { struct device *dev = ctx->dev; DRM_DEV_DEBUG_DRIVER(dev, "sink detect\n"); if (ctx->pdata.panel_bridge) return connector_status_connected; return ctx->hpd_status ? connector_status_connected : connector_status_disconnected; } static int anx7625_audio_hw_params(struct device *dev, void *data, struct hdmi_codec_daifmt *fmt, struct hdmi_codec_params *params) { struct anx7625_data *ctx = dev_get_drvdata(dev); int wl, ch, rate; int ret = 0; if (anx7625_sink_detect(ctx) == connector_status_disconnected) { DRM_DEV_DEBUG_DRIVER(dev, "DP not connected\n"); return 0; } if (fmt->fmt != HDMI_DSP_A && fmt->fmt != HDMI_I2S) { DRM_DEV_ERROR(dev, "only supports DSP_A & I2S\n"); return -EINVAL; } DRM_DEV_DEBUG_DRIVER(dev, "setting %d Hz, %d bit, %d channels\n", params->sample_rate, params->sample_width, params->cea.channels); if (fmt->fmt == HDMI_DSP_A) ret = anx7625_write_and_or(ctx, ctx->i2c.tx_p2_client, AUDIO_CHANNEL_STATUS_6, ~I2S_SLAVE_MODE, TDM_SLAVE_MODE); else ret = anx7625_write_and_or(ctx, ctx->i2c.tx_p2_client, AUDIO_CHANNEL_STATUS_6, ~TDM_SLAVE_MODE, I2S_SLAVE_MODE); /* Word length */ switch (params->sample_width) { case 16: wl = AUDIO_W_LEN_16_20MAX; break; case 18: wl = AUDIO_W_LEN_18_20MAX; break; case 20: wl = AUDIO_W_LEN_20_20MAX; break; case 24: wl = AUDIO_W_LEN_24_24MAX; break; default: DRM_DEV_DEBUG_DRIVER(dev, "wordlength: %d bit not support", params->sample_width); return -EINVAL; } ret |= anx7625_write_and_or(ctx, ctx->i2c.tx_p2_client, AUDIO_CHANNEL_STATUS_5, 0xf0, wl); /* Channel num */ switch (params->cea.channels) { case 2: ch = I2S_CH_2; break; case 4: ch = TDM_CH_4; break; case 6: ch = TDM_CH_6; break; case 8: ch = TDM_CH_8; break; default: DRM_DEV_DEBUG_DRIVER(dev, "channel number: %d not support", params->cea.channels); return -EINVAL; } ret |= anx7625_write_and_or(ctx, ctx->i2c.tx_p2_client, AUDIO_CHANNEL_STATUS_6, 0x1f, ch << 5); if (ch > I2S_CH_2) ret |= anx7625_write_or(ctx, ctx->i2c.tx_p2_client, AUDIO_CHANNEL_STATUS_6, AUDIO_LAYOUT); else ret |= anx7625_write_and(ctx, ctx->i2c.tx_p2_client, AUDIO_CHANNEL_STATUS_6, ~AUDIO_LAYOUT); /* FS */ switch (params->sample_rate) { case 32000: rate = AUDIO_FS_32K; break; case 44100: rate = AUDIO_FS_441K; break; case 48000: rate = AUDIO_FS_48K; break; case 88200: rate = AUDIO_FS_882K; break; case 96000: rate = AUDIO_FS_96K; break; case 176400: rate = AUDIO_FS_1764K; break; case 192000: rate = AUDIO_FS_192K; break; default: DRM_DEV_DEBUG_DRIVER(dev, "sample rate: %d not support", params->sample_rate); return -EINVAL; } ret |= anx7625_write_and_or(ctx, ctx->i2c.tx_p2_client, AUDIO_CHANNEL_STATUS_4, 0xf0, rate); ret |= anx7625_write_or(ctx, ctx->i2c.rx_p0_client, AP_AV_STATUS, AP_AUDIO_CHG); if (ret < 0) { DRM_DEV_ERROR(dev, "IO error : config audio.\n"); return -EIO; } return 0; } static void anx7625_audio_shutdown(struct device *dev, void *data) { DRM_DEV_DEBUG_DRIVER(dev, "stop audio\n"); } static int anx7625_hdmi_i2s_get_dai_id(struct snd_soc_component *component, struct device_node *endpoint) { struct of_endpoint of_ep; int ret; ret = of_graph_parse_endpoint(endpoint, &of_ep); if (ret < 0) return ret; /* * HDMI sound should be located at external DPI port * Didn't have good way to check where is internal(DSI) * or external(DPI) bridge */ return 0; } static void anx7625_audio_update_connector_status(struct anx7625_data *ctx, enum drm_connector_status status) { if (ctx->plugged_cb && ctx->codec_dev) { ctx->plugged_cb(ctx->codec_dev, status == connector_status_connected); } } static int anx7625_audio_hook_plugged_cb(struct device *dev, void *data, hdmi_codec_plugged_cb fn, struct device *codec_dev) { struct anx7625_data *ctx = data; ctx->plugged_cb = fn; ctx->codec_dev = codec_dev; anx7625_audio_update_connector_status(ctx, anx7625_sink_detect(ctx)); return 0; } static int anx7625_audio_get_eld(struct device *dev, void *data, u8 *buf, size_t len) { struct anx7625_data *ctx = dev_get_drvdata(dev); if (!ctx->connector) { /* Pass en empty ELD if connector not available */ memset(buf, 0, len); } else { dev_dbg(dev, "audio copy eld\n"); memcpy(buf, ctx->connector->eld, min(sizeof(ctx->connector->eld), len)); } return 0; } static const struct hdmi_codec_ops anx7625_codec_ops = { .hw_params = anx7625_audio_hw_params, .audio_shutdown = anx7625_audio_shutdown, .get_eld = anx7625_audio_get_eld, .get_dai_id = anx7625_hdmi_i2s_get_dai_id, .hook_plugged_cb = anx7625_audio_hook_plugged_cb, }; static void anx7625_unregister_audio(struct anx7625_data *ctx) { struct device *dev = ctx->dev; if (ctx->audio_pdev) { platform_device_unregister(ctx->audio_pdev); ctx->audio_pdev = NULL; } DRM_DEV_DEBUG_DRIVER(dev, "unbound to %s", HDMI_CODEC_DRV_NAME); } static int anx7625_register_audio(struct device *dev, struct anx7625_data *ctx) { struct hdmi_codec_pdata codec_data = { .ops = &anx7625_codec_ops, .max_i2s_channels = 8, .i2s = 1, .data = ctx, }; ctx->audio_pdev = platform_device_register_data(dev, HDMI_CODEC_DRV_NAME, PLATFORM_DEVID_AUTO, &codec_data, sizeof(codec_data)); if (IS_ERR(ctx->audio_pdev)) return PTR_ERR(ctx->audio_pdev); DRM_DEV_DEBUG_DRIVER(dev, "bound to %s", HDMI_CODEC_DRV_NAME); return 0; } static int anx7625_setup_dsi_device(struct anx7625_data *ctx) { struct mipi_dsi_device *dsi; struct device *dev = ctx->dev; struct mipi_dsi_host *host; const struct mipi_dsi_device_info info = { .type = "anx7625", .channel = 0, .node = NULL, }; host = of_find_mipi_dsi_host_by_node(ctx->pdata.mipi_host_node); if (!host) { DRM_DEV_ERROR(dev, "fail to find dsi host.\n"); return -EPROBE_DEFER; } dsi = devm_mipi_dsi_device_register_full(dev, host, &info); if (IS_ERR(dsi)) { DRM_DEV_ERROR(dev, "fail to create dsi device.\n"); return -EINVAL; } dsi->lanes = ctx->pdata.mipi_lanes; dsi->format = MIPI_DSI_FMT_RGB888; dsi->mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_SYNC_PULSE | MIPI_DSI_MODE_VIDEO_HSE | MIPI_DSI_HS_PKT_END_ALIGNED; ctx->dsi = dsi; return 0; } static int anx7625_attach_dsi(struct anx7625_data *ctx) { struct device *dev = ctx->dev; int ret; DRM_DEV_DEBUG_DRIVER(dev, "attach dsi\n"); ret = devm_mipi_dsi_attach(dev, ctx->dsi); if (ret) { DRM_DEV_ERROR(dev, "fail to attach dsi to host.\n"); return ret; } DRM_DEV_DEBUG_DRIVER(dev, "attach dsi succeeded.\n"); return 0; } static void hdcp_check_work_func(struct work_struct *work) { u8 status; struct delayed_work *dwork; struct anx7625_data *ctx; struct device *dev; struct drm_device *drm_dev; dwork = to_delayed_work(work); ctx = container_of(dwork, struct anx7625_data, hdcp_work); dev = ctx->dev; if (!ctx->connector) { dev_err(dev, "HDCP connector is null!"); return; } drm_dev = ctx->connector->dev; drm_modeset_lock(&drm_dev->mode_config.connection_mutex, NULL); mutex_lock(&ctx->hdcp_wq_lock); status = anx7625_reg_read(ctx, ctx->i2c.tx_p0_client, 0); dev_dbg(dev, "sink HDCP status check: %.02x\n", status); if (status & BIT(1)) { ctx->hdcp_cp = DRM_MODE_CONTENT_PROTECTION_ENABLED; drm_hdcp_update_content_protection(ctx->connector, ctx->hdcp_cp); dev_dbg(dev, "update CP to ENABLE\n"); } mutex_unlock(&ctx->hdcp_wq_lock); drm_modeset_unlock(&drm_dev->mode_config.connection_mutex); } static int anx7625_connector_atomic_check(struct anx7625_data *ctx, struct drm_connector_state *state) { struct device *dev = ctx->dev; int cp; dev_dbg(dev, "hdcp state check\n"); cp = state->content_protection; if (cp == ctx->hdcp_cp) return 0; if (cp == DRM_MODE_CONTENT_PROTECTION_DESIRED) { if (ctx->dp_en) { dev_dbg(dev, "enable HDCP\n"); anx7625_hdcp_enable(ctx); queue_delayed_work(ctx->hdcp_workqueue, &ctx->hdcp_work, msecs_to_jiffies(2000)); } } if (cp == DRM_MODE_CONTENT_PROTECTION_UNDESIRED) { if (ctx->hdcp_cp != DRM_MODE_CONTENT_PROTECTION_ENABLED) { dev_err(dev, "current CP is not ENABLED\n"); return -EINVAL; } anx7625_hdcp_disable(ctx); ctx->hdcp_cp = DRM_MODE_CONTENT_PROTECTION_UNDESIRED; drm_hdcp_update_content_protection(ctx->connector, ctx->hdcp_cp); dev_dbg(dev, "update CP to UNDESIRE\n"); } if (cp == DRM_MODE_CONTENT_PROTECTION_ENABLED) { dev_err(dev, "Userspace illegal set to PROTECTION ENABLE\n"); return -EINVAL; } return 0; } static int anx7625_bridge_attach(struct drm_bridge *bridge, enum drm_bridge_attach_flags flags) { struct anx7625_data *ctx = bridge_to_anx7625(bridge); int err; struct device *dev = ctx->dev; DRM_DEV_DEBUG_DRIVER(dev, "drm attach\n"); if (!(flags & DRM_BRIDGE_ATTACH_NO_CONNECTOR)) return -EINVAL; if (!bridge->encoder) { DRM_DEV_ERROR(dev, "Parent encoder object not found"); return -ENODEV; } ctx->aux.drm_dev = bridge->dev; err = drm_dp_aux_register(&ctx->aux); if (err) { dev_err(dev, "failed to register aux channel: %d\n", err); return err; } if (ctx->pdata.panel_bridge) { err = drm_bridge_attach(bridge->encoder, ctx->pdata.panel_bridge, &ctx->bridge, flags); if (err) return err; } ctx->bridge_attached = 1; return 0; } static void anx7625_bridge_detach(struct drm_bridge *bridge) { struct anx7625_data *ctx = bridge_to_anx7625(bridge); drm_dp_aux_unregister(&ctx->aux); } static enum drm_mode_status anx7625_bridge_mode_valid(struct drm_bridge *bridge, const struct drm_display_info *info, const struct drm_display_mode *mode) { struct anx7625_data *ctx = bridge_to_anx7625(bridge); struct device *dev = ctx->dev; DRM_DEV_DEBUG_DRIVER(dev, "drm mode checking\n"); /* Max 1200p at 5.4 Ghz, one lane, pixel clock 300M */ if (mode->clock > SUPPORT_PIXEL_CLOCK) { DRM_DEV_DEBUG_DRIVER(dev, "drm mode invalid, pixelclock too high.\n"); return MODE_CLOCK_HIGH; } DRM_DEV_DEBUG_DRIVER(dev, "drm mode valid.\n"); return MODE_OK; } static void anx7625_bridge_mode_set(struct drm_bridge *bridge, const struct drm_display_mode *old_mode, const struct drm_display_mode *mode) { struct anx7625_data *ctx = bridge_to_anx7625(bridge); struct device *dev = ctx->dev; DRM_DEV_DEBUG_DRIVER(dev, "drm mode set\n"); ctx->dt.pixelclock.min = mode->clock; ctx->dt.hactive.min = mode->hdisplay; ctx->dt.hsync_len.min = mode->hsync_end - mode->hsync_start; ctx->dt.hfront_porch.min = mode->hsync_start - mode->hdisplay; ctx->dt.hback_porch.min = mode->htotal - mode->hsync_end; ctx->dt.vactive.min = mode->vdisplay; ctx->dt.vsync_len.min = mode->vsync_end - mode->vsync_start; ctx->dt.vfront_porch.min = mode->vsync_start - mode->vdisplay; ctx->dt.vback_porch.min = mode->vtotal - mode->vsync_end; ctx->display_timing_valid = 1; DRM_DEV_DEBUG_DRIVER(dev, "pixelclock(%d).\n", ctx->dt.pixelclock.min); DRM_DEV_DEBUG_DRIVER(dev, "hactive(%d), hsync(%d), hfp(%d), hbp(%d)\n", ctx->dt.hactive.min, ctx->dt.hsync_len.min, ctx->dt.hfront_porch.min, ctx->dt.hback_porch.min); DRM_DEV_DEBUG_DRIVER(dev, "vactive(%d), vsync(%d), vfp(%d), vbp(%d)\n", ctx->dt.vactive.min, ctx->dt.vsync_len.min, ctx->dt.vfront_porch.min, ctx->dt.vback_porch.min); DRM_DEV_DEBUG_DRIVER(dev, "hdisplay(%d),hsync_start(%d).\n", mode->hdisplay, mode->hsync_start); DRM_DEV_DEBUG_DRIVER(dev, "hsync_end(%d),htotal(%d).\n", mode->hsync_end, mode->htotal); DRM_DEV_DEBUG_DRIVER(dev, "vdisplay(%d),vsync_start(%d).\n", mode->vdisplay, mode->vsync_start); DRM_DEV_DEBUG_DRIVER(dev, "vsync_end(%d),vtotal(%d).\n", mode->vsync_end, mode->vtotal); } static bool anx7625_bridge_mode_fixup(struct drm_bridge *bridge, const struct drm_display_mode *mode, struct drm_display_mode *adj) { struct anx7625_data *ctx = bridge_to_anx7625(bridge); struct device *dev = ctx->dev; u32 hsync, hfp, hbp, hblanking; u32 adj_hsync, adj_hfp, adj_hbp, adj_hblanking, delta_adj; u32 vref, adj_clock; DRM_DEV_DEBUG_DRIVER(dev, "drm mode fixup set\n"); /* No need fixup for external monitor */ if (!ctx->pdata.panel_bridge) return true; hsync = mode->hsync_end - mode->hsync_start; hfp = mode->hsync_start - mode->hdisplay; hbp = mode->htotal - mode->hsync_end; hblanking = mode->htotal - mode->hdisplay; DRM_DEV_DEBUG_DRIVER(dev, "before mode fixup\n"); DRM_DEV_DEBUG_DRIVER(dev, "hsync(%d), hfp(%d), hbp(%d), clock(%d)\n", hsync, hfp, hbp, adj->clock); DRM_DEV_DEBUG_DRIVER(dev, "hsync_start(%d), hsync_end(%d), htot(%d)\n", adj->hsync_start, adj->hsync_end, adj->htotal); adj_hfp = hfp; adj_hsync = hsync; adj_hbp = hbp; adj_hblanking = hblanking; /* HFP needs to be even */ if (hfp & 0x1) { adj_hfp += 1; adj_hblanking += 1; } /* HBP needs to be even */ if (hbp & 0x1) { adj_hbp -= 1; adj_hblanking -= 1; } /* HSYNC needs to be even */ if (hsync & 0x1) { if (adj_hblanking < hblanking) adj_hsync += 1; else adj_hsync -= 1; } /* * Once illegal timing detected, use default HFP, HSYNC, HBP * This adjusting made for built-in eDP panel, for the externel * DP monitor, may need return false. */ if (hblanking < HBLANKING_MIN || (hfp < HP_MIN && hbp < HP_MIN)) { adj_hsync = SYNC_LEN_DEF; adj_hfp = HFP_HBP_DEF; adj_hbp = HFP_HBP_DEF; vref = adj->clock * 1000 / (adj->htotal * adj->vtotal); if (hblanking < HBLANKING_MIN) { delta_adj = HBLANKING_MIN - hblanking; adj_clock = vref * delta_adj * adj->vtotal; adj->clock += DIV_ROUND_UP(adj_clock, 1000); } else { delta_adj = hblanking - HBLANKING_MIN; adj_clock = vref * delta_adj * adj->vtotal; adj->clock -= DIV_ROUND_UP(adj_clock, 1000); } DRM_WARN("illegal hblanking timing, use default.\n"); DRM_WARN("hfp(%d), hbp(%d), hsync(%d).\n", hfp, hbp, hsync); } else if (adj_hfp < HP_MIN) { /* Adjust hfp if hfp less than HP_MIN */ delta_adj = HP_MIN - adj_hfp; adj_hfp = HP_MIN; /* * Balance total HBlanking pixel, if HBP does not have enough * space, adjust HSYNC length, otherwise adjust HBP */ if ((adj_hbp - delta_adj) < HP_MIN) /* HBP not enough space */ adj_hsync -= delta_adj; else adj_hbp -= delta_adj; } else if (adj_hbp < HP_MIN) { delta_adj = HP_MIN - adj_hbp; adj_hbp = HP_MIN; /* * Balance total HBlanking pixel, if HBP hasn't enough space, * adjust HSYNC length, otherwize adjust HBP */ if ((adj_hfp - delta_adj) < HP_MIN) /* HFP not enough space */ adj_hsync -= delta_adj; else adj_hfp -= delta_adj; } DRM_DEV_DEBUG_DRIVER(dev, "after mode fixup\n"); DRM_DEV_DEBUG_DRIVER(dev, "hsync(%d), hfp(%d), hbp(%d), clock(%d)\n", adj_hsync, adj_hfp, adj_hbp, adj->clock); /* Reconstruct timing */ adj->hsync_start = adj->hdisplay + adj_hfp; adj->hsync_end = adj->hsync_start + adj_hsync; adj->htotal = adj->hsync_end + adj_hbp; DRM_DEV_DEBUG_DRIVER(dev, "hsync_start(%d), hsync_end(%d), htot(%d)\n", adj->hsync_start, adj->hsync_end, adj->htotal); return true; } static int anx7625_bridge_atomic_check(struct drm_bridge *bridge, struct drm_bridge_state *bridge_state, struct drm_crtc_state *crtc_state, struct drm_connector_state *conn_state) { struct anx7625_data *ctx = bridge_to_anx7625(bridge); struct device *dev = ctx->dev; dev_dbg(dev, "drm bridge atomic check\n"); anx7625_bridge_mode_fixup(bridge, &crtc_state->mode, &crtc_state->adjusted_mode); return anx7625_connector_atomic_check(ctx, conn_state); } static void anx7625_bridge_atomic_enable(struct drm_bridge *bridge, struct drm_bridge_state *state) { struct anx7625_data *ctx = bridge_to_anx7625(bridge); struct device *dev = ctx->dev; struct drm_connector *connector; dev_dbg(dev, "drm atomic enable\n"); if (!bridge->encoder) { dev_err(dev, "Parent encoder object not found"); return; } connector = drm_atomic_get_new_connector_for_encoder(state->base.state, bridge->encoder); if (!connector) return; ctx->connector = connector; pm_runtime_get_sync(dev); _anx7625_hpd_polling(ctx, 5000 * 100); anx7625_dp_start(ctx); } static void anx7625_bridge_atomic_disable(struct drm_bridge *bridge, struct drm_bridge_state *old) { struct anx7625_data *ctx = bridge_to_anx7625(bridge); struct device *dev = ctx->dev; dev_dbg(dev, "drm atomic disable\n"); ctx->connector = NULL; anx7625_dp_stop(ctx); pm_runtime_put_sync(dev); } static enum drm_connector_status anx7625_bridge_detect(struct drm_bridge *bridge) { struct anx7625_data *ctx = bridge_to_anx7625(bridge); struct device *dev = ctx->dev; DRM_DEV_DEBUG_DRIVER(dev, "drm bridge detect\n"); return anx7625_sink_detect(ctx); } static struct edid *anx7625_bridge_get_edid(struct drm_bridge *bridge, struct drm_connector *connector) { struct anx7625_data *ctx = bridge_to_anx7625(bridge); struct device *dev = ctx->dev; DRM_DEV_DEBUG_DRIVER(dev, "drm bridge get edid\n"); return anx7625_get_edid(ctx); } static const struct drm_bridge_funcs anx7625_bridge_funcs = { .attach = anx7625_bridge_attach, .detach = anx7625_bridge_detach, .mode_valid = anx7625_bridge_mode_valid, .mode_set = anx7625_bridge_mode_set, .atomic_check = anx7625_bridge_atomic_check, .atomic_enable = anx7625_bridge_atomic_enable, .atomic_disable = anx7625_bridge_atomic_disable, .atomic_duplicate_state = drm_atomic_helper_bridge_duplicate_state, .atomic_destroy_state = drm_atomic_helper_bridge_destroy_state, .atomic_reset = drm_atomic_helper_bridge_reset, .detect = anx7625_bridge_detect, .get_edid = anx7625_bridge_get_edid, }; static int anx7625_register_i2c_dummy_clients(struct anx7625_data *ctx, struct i2c_client *client) { struct device *dev = ctx->dev; ctx->i2c.tx_p0_client = devm_i2c_new_dummy_device(dev, client->adapter, TX_P0_ADDR >> 1); if (IS_ERR(ctx->i2c.tx_p0_client)) return PTR_ERR(ctx->i2c.tx_p0_client); ctx->i2c.tx_p1_client = devm_i2c_new_dummy_device(dev, client->adapter, TX_P1_ADDR >> 1); if (IS_ERR(ctx->i2c.tx_p1_client)) return PTR_ERR(ctx->i2c.tx_p1_client); ctx->i2c.tx_p2_client = devm_i2c_new_dummy_device(dev, client->adapter, TX_P2_ADDR >> 1); if (IS_ERR(ctx->i2c.tx_p2_client)) return PTR_ERR(ctx->i2c.tx_p2_client); ctx->i2c.rx_p0_client = devm_i2c_new_dummy_device(dev, client->adapter, RX_P0_ADDR >> 1); if (IS_ERR(ctx->i2c.rx_p0_client)) return PTR_ERR(ctx->i2c.rx_p0_client); ctx->i2c.rx_p1_client = devm_i2c_new_dummy_device(dev, client->adapter, RX_P1_ADDR >> 1); if (IS_ERR(ctx->i2c.rx_p1_client)) return PTR_ERR(ctx->i2c.rx_p1_client); ctx->i2c.rx_p2_client = devm_i2c_new_dummy_device(dev, client->adapter, RX_P2_ADDR >> 1); if (IS_ERR(ctx->i2c.rx_p2_client)) return PTR_ERR(ctx->i2c.rx_p2_client); ctx->i2c.tcpc_client = devm_i2c_new_dummy_device(dev, client->adapter, TCPC_INTERFACE_ADDR >> 1); if (IS_ERR(ctx->i2c.tcpc_client)) return PTR_ERR(ctx->i2c.tcpc_client); return 0; } static int __maybe_unused anx7625_runtime_pm_suspend(struct device *dev) { struct anx7625_data *ctx = dev_get_drvdata(dev); mutex_lock(&ctx->lock); anx7625_stop_dp_work(ctx); anx7625_power_standby(ctx); mutex_unlock(&ctx->lock); return 0; } static int __maybe_unused anx7625_runtime_pm_resume(struct device *dev) { struct anx7625_data *ctx = dev_get_drvdata(dev); mutex_lock(&ctx->lock); anx7625_power_on_init(ctx); mutex_unlock(&ctx->lock); return 0; } static const struct dev_pm_ops anx7625_pm_ops = { SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend, pm_runtime_force_resume) SET_RUNTIME_PM_OPS(anx7625_runtime_pm_suspend, anx7625_runtime_pm_resume, NULL) }; static void anx7625_runtime_disable(void *data) { pm_runtime_dont_use_autosuspend(data); pm_runtime_disable(data); } static int anx7625_link_bridge(struct drm_dp_aux *aux) { struct anx7625_data *platform = container_of(aux, struct anx7625_data, aux); struct device *dev = aux->dev; int ret; ret = anx7625_parse_dt_panel(dev, &platform->pdata); if (ret) { DRM_DEV_ERROR(dev, "fail to parse DT for panel : %d\n", ret); return ret; } platform->bridge.funcs = &anx7625_bridge_funcs; platform->bridge.of_node = dev->of_node; if (!anx7625_of_panel_on_aux_bus(dev)) platform->bridge.ops |= DRM_BRIDGE_OP_EDID; if (!platform->pdata.panel_bridge) platform->bridge.ops |= DRM_BRIDGE_OP_HPD | DRM_BRIDGE_OP_DETECT; platform->bridge.type = platform->pdata.panel_bridge ? DRM_MODE_CONNECTOR_eDP : DRM_MODE_CONNECTOR_DisplayPort; drm_bridge_add(&platform->bridge); if (!platform->pdata.is_dpi) { ret = anx7625_attach_dsi(platform); if (ret) drm_bridge_remove(&platform->bridge); } return ret; } static int anx7625_i2c_probe(struct i2c_client *client) { struct anx7625_data *platform; struct anx7625_platform_data *pdata; int ret = 0; struct device *dev = &client->dev; if (!i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_I2C_BLOCK)) { DRM_DEV_ERROR(dev, "anx7625's i2c bus doesn't support\n"); return -ENODEV; } platform = devm_kzalloc(dev, sizeof(*platform), GFP_KERNEL); if (!platform) { DRM_DEV_ERROR(dev, "fail to allocate driver data\n"); return -ENOMEM; } pdata = &platform->pdata; platform->dev = &client->dev; i2c_set_clientdata(client, platform); pdata->supplies[0].supply = "vdd10"; pdata->supplies[1].supply = "vdd18"; pdata->supplies[2].supply = "vdd33"; ret = devm_regulator_bulk_get(dev, ARRAY_SIZE(pdata->supplies), pdata->supplies); if (ret) { DRM_DEV_ERROR(dev, "fail to get power supplies: %d\n", ret); return ret; } anx7625_init_gpio(platform); mutex_init(&platform->lock); mutex_init(&platform->hdcp_wq_lock); INIT_DELAYED_WORK(&platform->hdcp_work, hdcp_check_work_func); platform->hdcp_workqueue = create_workqueue("hdcp workqueue"); if (!platform->hdcp_workqueue) { dev_err(dev, "fail to create work queue\n"); ret = -ENOMEM; return ret; } platform->pdata.intp_irq = client->irq; if (platform->pdata.intp_irq) { INIT_WORK(&platform->work, anx7625_work_func); platform->workqueue = alloc_workqueue("anx7625_work", WQ_FREEZABLE | WQ_MEM_RECLAIM, 1); if (!platform->workqueue) { DRM_DEV_ERROR(dev, "fail to create work queue\n"); ret = -ENOMEM; goto free_hdcp_wq; } ret = devm_request_threaded_irq(dev, platform->pdata.intp_irq, NULL, anx7625_intr_hpd_isr, IRQF_TRIGGER_FALLING | IRQF_ONESHOT, "anx7625-intp", platform); if (ret) { DRM_DEV_ERROR(dev, "fail to request irq\n"); goto free_wq; } } platform->aux.name = "anx7625-aux"; platform->aux.dev = dev; platform->aux.transfer = anx7625_aux_transfer; platform->aux.wait_hpd_asserted = anx7625_wait_hpd_asserted; drm_dp_aux_init(&platform->aux); ret = anx7625_parse_dt(dev, pdata); if (ret) { if (ret != -EPROBE_DEFER) DRM_DEV_ERROR(dev, "fail to parse DT : %d\n", ret); goto free_wq; } if (!platform->pdata.is_dpi) { ret = anx7625_setup_dsi_device(platform); if (ret < 0) goto free_wq; } /* * Registering the i2c devices will retrigger deferred probe, so it * needs to be done after calls that might return EPROBE_DEFER, * otherwise we can get an infinite loop. */ if (anx7625_register_i2c_dummy_clients(platform, client) != 0) { ret = -ENOMEM; DRM_DEV_ERROR(dev, "fail to reserve I2C bus.\n"); goto free_wq; } pm_runtime_enable(dev); pm_runtime_set_autosuspend_delay(dev, 1000); pm_runtime_use_autosuspend(dev); pm_suspend_ignore_children(dev, true); ret = devm_add_action_or_reset(dev, anx7625_runtime_disable, dev); if (ret) goto free_wq; /* * Populating the aux bus will retrigger deferred probe, so it needs to * be done after calls that might return EPROBE_DEFER, otherwise we can * get an infinite loop. */ ret = devm_of_dp_aux_populate_bus(&platform->aux, anx7625_link_bridge); if (ret) { if (ret != -ENODEV) { DRM_DEV_ERROR(dev, "failed to populate aux bus : %d\n", ret); goto free_wq; } ret = anx7625_link_bridge(&platform->aux); if (ret) goto free_wq; } if (!platform->pdata.low_power_mode) { anx7625_disable_pd_protocol(platform); pm_runtime_get_sync(dev); _anx7625_hpd_polling(platform, 5000 * 100); } /* Add work function */ if (platform->pdata.intp_irq) queue_work(platform->workqueue, &platform->work); if (platform->pdata.audio_en) anx7625_register_audio(dev, platform); DRM_DEV_DEBUG_DRIVER(dev, "probe done\n"); return 0; free_wq: if (platform->workqueue) destroy_workqueue(platform->workqueue); free_hdcp_wq: if (platform->hdcp_workqueue) destroy_workqueue(platform->hdcp_workqueue); return ret; } static void anx7625_i2c_remove(struct i2c_client *client) { struct anx7625_data *platform = i2c_get_clientdata(client); drm_bridge_remove(&platform->bridge); if (platform->pdata.intp_irq) destroy_workqueue(platform->workqueue); if (platform->hdcp_workqueue) { cancel_delayed_work(&platform->hdcp_work); flush_workqueue(platform->hdcp_workqueue); destroy_workqueue(platform->hdcp_workqueue); } if (!platform->pdata.low_power_mode) pm_runtime_put_sync_suspend(&client->dev); if (platform->pdata.audio_en) anx7625_unregister_audio(platform); } static const struct i2c_device_id anx7625_id[] = { {"anx7625", 0}, {} }; MODULE_DEVICE_TABLE(i2c, anx7625_id); static const struct of_device_id anx_match_table[] = { {.compatible = "analogix,anx7625",}, {}, }; MODULE_DEVICE_TABLE(of, anx_match_table); static struct i2c_driver anx7625_driver = { .driver = { .name = "anx7625", .of_match_table = anx_match_table, .pm = &anx7625_pm_ops, }, .probe = anx7625_i2c_probe, .remove = anx7625_i2c_remove, .id_table = anx7625_id, }; module_i2c_driver(anx7625_driver); MODULE_DESCRIPTION("MIPI2DP anx7625 driver"); MODULE_AUTHOR("Xin Ji <[email protected]>"); MODULE_LICENSE("GPL v2"); MODULE_VERSION(ANX7625_DRV_VERSION);
linux-master
drivers/gpu/drm/bridge/analogix/anx7625.c
// SPDX-License-Identifier: GPL-2.0 /* * Copyright: 2017 Cadence Design Systems, Inc. * * Author: Boris Brezillon <[email protected]> */ #include <drm/drm_atomic_helper.h> #include <drm/drm_drv.h> #include <drm/drm_probe_helper.h> #include <video/mipi_display.h> #include <linux/clk.h> #include <linux/interrupt.h> #include <linux/iopoll.h> #include <linux/module.h> #include <linux/of.h> #include <linux/of_graph.h> #include <linux/platform_device.h> #include <linux/pm_runtime.h> #include <linux/reset.h> #include <linux/phy/phy-mipi-dphy.h> #include "cdns-dsi-core.h" #ifdef CONFIG_DRM_CDNS_DSI_J721E #include "cdns-dsi-j721e.h" #endif #define IP_CONF 0x0 #define SP_HS_FIFO_DEPTH(x) (((x) & GENMASK(30, 26)) >> 26) #define SP_LP_FIFO_DEPTH(x) (((x) & GENMASK(25, 21)) >> 21) #define VRS_FIFO_DEPTH(x) (((x) & GENMASK(20, 16)) >> 16) #define DIRCMD_FIFO_DEPTH(x) (((x) & GENMASK(15, 13)) >> 13) #define SDI_IFACE_32 BIT(12) #define INTERNAL_DATAPATH_32 (0 << 10) #define INTERNAL_DATAPATH_16 (1 << 10) #define INTERNAL_DATAPATH_8 (3 << 10) #define INTERNAL_DATAPATH_SIZE ((x) & GENMASK(11, 10)) #define NUM_IFACE(x) ((((x) & GENMASK(9, 8)) >> 8) + 1) #define MAX_LANE_NB(x) (((x) & GENMASK(7, 6)) >> 6) #define RX_FIFO_DEPTH(x) ((x) & GENMASK(5, 0)) #define MCTL_MAIN_DATA_CTL 0x4 #define TE_MIPI_POLLING_EN BIT(25) #define TE_HW_POLLING_EN BIT(24) #define DISP_EOT_GEN BIT(18) #define HOST_EOT_GEN BIT(17) #define DISP_GEN_CHECKSUM BIT(16) #define DISP_GEN_ECC BIT(15) #define BTA_EN BIT(14) #define READ_EN BIT(13) #define REG_TE_EN BIT(12) #define IF_TE_EN(x) BIT(8 + (x)) #define TVG_SEL BIT(6) #define VID_EN BIT(5) #define IF_VID_SELECT(x) ((x) << 2) #define IF_VID_SELECT_MASK GENMASK(3, 2) #define IF_VID_MODE BIT(1) #define LINK_EN BIT(0) #define MCTL_MAIN_PHY_CTL 0x8 #define HS_INVERT_DAT(x) BIT(19 + ((x) * 2)) #define SWAP_PINS_DAT(x) BIT(18 + ((x) * 2)) #define HS_INVERT_CLK BIT(17) #define SWAP_PINS_CLK BIT(16) #define HS_SKEWCAL_EN BIT(15) #define WAIT_BURST_TIME(x) ((x) << 10) #define DATA_ULPM_EN(x) BIT(6 + (x)) #define CLK_ULPM_EN BIT(5) #define CLK_CONTINUOUS BIT(4) #define DATA_LANE_EN(x) BIT((x) - 1) #define MCTL_MAIN_EN 0xc #define DATA_FORCE_STOP BIT(17) #define CLK_FORCE_STOP BIT(16) #define IF_EN(x) BIT(13 + (x)) #define DATA_LANE_ULPM_REQ(l) BIT(9 + (l)) #define CLK_LANE_ULPM_REQ BIT(8) #define DATA_LANE_START(x) BIT(4 + (x)) #define CLK_LANE_EN BIT(3) #define PLL_START BIT(0) #define MCTL_DPHY_CFG0 0x10 #define DPHY_C_RSTB BIT(20) #define DPHY_D_RSTB(x) GENMASK(15 + (x), 16) #define DPHY_PLL_PDN BIT(10) #define DPHY_CMN_PDN BIT(9) #define DPHY_C_PDN BIT(8) #define DPHY_D_PDN(x) GENMASK(3 + (x), 4) #define DPHY_ALL_D_PDN GENMASK(7, 4) #define DPHY_PLL_PSO BIT(1) #define DPHY_CMN_PSO BIT(0) #define MCTL_DPHY_TIMEOUT1 0x14 #define HSTX_TIMEOUT(x) ((x) << 4) #define HSTX_TIMEOUT_MAX GENMASK(17, 0) #define CLK_DIV(x) (x) #define CLK_DIV_MAX GENMASK(3, 0) #define MCTL_DPHY_TIMEOUT2 0x18 #define LPRX_TIMEOUT(x) (x) #define MCTL_ULPOUT_TIME 0x1c #define DATA_LANE_ULPOUT_TIME(x) ((x) << 9) #define CLK_LANE_ULPOUT_TIME(x) (x) #define MCTL_3DVIDEO_CTL 0x20 #define VID_VSYNC_3D_EN BIT(7) #define VID_VSYNC_3D_LR BIT(5) #define VID_VSYNC_3D_SECOND_EN BIT(4) #define VID_VSYNC_3DFORMAT_LINE (0 << 2) #define VID_VSYNC_3DFORMAT_FRAME (1 << 2) #define VID_VSYNC_3DFORMAT_PIXEL (2 << 2) #define VID_VSYNC_3DMODE_OFF 0 #define VID_VSYNC_3DMODE_PORTRAIT 1 #define VID_VSYNC_3DMODE_LANDSCAPE 2 #define MCTL_MAIN_STS 0x24 #define MCTL_MAIN_STS_CTL 0x130 #define MCTL_MAIN_STS_CLR 0x150 #define MCTL_MAIN_STS_FLAG 0x170 #define HS_SKEWCAL_DONE BIT(11) #define IF_UNTERM_PKT_ERR(x) BIT(8 + (x)) #define LPRX_TIMEOUT_ERR BIT(7) #define HSTX_TIMEOUT_ERR BIT(6) #define DATA_LANE_RDY(l) BIT(2 + (l)) #define CLK_LANE_RDY BIT(1) #define PLL_LOCKED BIT(0) #define MCTL_DPHY_ERR 0x28 #define MCTL_DPHY_ERR_CTL1 0x148 #define MCTL_DPHY_ERR_CLR 0x168 #define MCTL_DPHY_ERR_FLAG 0x188 #define ERR_CONT_LP(x, l) BIT(18 + ((x) * 4) + (l)) #define ERR_CONTROL(l) BIT(14 + (l)) #define ERR_SYNESC(l) BIT(10 + (l)) #define ERR_ESC(l) BIT(6 + (l)) #define MCTL_DPHY_ERR_CTL2 0x14c #define ERR_CONT_LP_EDGE(x, l) BIT(12 + ((x) * 4) + (l)) #define ERR_CONTROL_EDGE(l) BIT(8 + (l)) #define ERR_SYN_ESC_EDGE(l) BIT(4 + (l)) #define ERR_ESC_EDGE(l) BIT(0 + (l)) #define MCTL_LANE_STS 0x2c #define PPI_C_TX_READY_HS BIT(18) #define DPHY_PLL_LOCK BIT(17) #define PPI_D_RX_ULPS_ESC(x) (((x) & GENMASK(15, 12)) >> 12) #define LANE_STATE_START 0 #define LANE_STATE_IDLE 1 #define LANE_STATE_WRITE 2 #define LANE_STATE_ULPM 3 #define LANE_STATE_READ 4 #define DATA_LANE_STATE(l, val) \ (((val) >> (2 + 2 * (l) + ((l) ? 1 : 0))) & GENMASK((l) ? 1 : 2, 0)) #define CLK_LANE_STATE_HS 2 #define CLK_LANE_STATE(val) ((val) & GENMASK(1, 0)) #define DSC_MODE_CTL 0x30 #define DSC_MODE_EN BIT(0) #define DSC_CMD_SEND 0x34 #define DSC_SEND_PPS BIT(0) #define DSC_EXECUTE_QUEUE BIT(1) #define DSC_PPS_WRDAT 0x38 #define DSC_MODE_STS 0x3c #define DSC_PPS_DONE BIT(1) #define DSC_EXEC_DONE BIT(2) #define CMD_MODE_CTL 0x70 #define IF_LP_EN(x) BIT(9 + (x)) #define IF_VCHAN_ID(x, c) ((c) << ((x) * 2)) #define CMD_MODE_CTL2 0x74 #define TE_TIMEOUT(x) ((x) << 11) #define FILL_VALUE(x) ((x) << 3) #define ARB_IF_WITH_HIGHEST_PRIORITY(x) ((x) << 1) #define ARB_ROUND_ROBIN_MODE BIT(0) #define CMD_MODE_STS 0x78 #define CMD_MODE_STS_CTL 0x134 #define CMD_MODE_STS_CLR 0x154 #define CMD_MODE_STS_FLAG 0x174 #define ERR_IF_UNDERRUN(x) BIT(4 + (x)) #define ERR_UNWANTED_READ BIT(3) #define ERR_TE_MISS BIT(2) #define ERR_NO_TE BIT(1) #define CSM_RUNNING BIT(0) #define DIRECT_CMD_SEND 0x80 #define DIRECT_CMD_MAIN_SETTINGS 0x84 #define TRIGGER_VAL(x) ((x) << 25) #define CMD_LP_EN BIT(24) #define CMD_SIZE(x) ((x) << 16) #define CMD_VCHAN_ID(x) ((x) << 14) #define CMD_DATATYPE(x) ((x) << 8) #define CMD_LONG BIT(3) #define WRITE_CMD 0 #define READ_CMD 1 #define TE_REQ 4 #define TRIGGER_REQ 5 #define BTA_REQ 6 #define DIRECT_CMD_STS 0x88 #define DIRECT_CMD_STS_CTL 0x138 #define DIRECT_CMD_STS_CLR 0x158 #define DIRECT_CMD_STS_FLAG 0x178 #define RCVD_ACK_VAL(val) ((val) >> 16) #define RCVD_TRIGGER_VAL(val) (((val) & GENMASK(14, 11)) >> 11) #define READ_COMPLETED_WITH_ERR BIT(10) #define BTA_FINISHED BIT(9) #define BTA_COMPLETED BIT(8) #define TE_RCVD BIT(7) #define TRIGGER_RCVD BIT(6) #define ACK_WITH_ERR_RCVD BIT(5) #define ACK_RCVD BIT(4) #define READ_COMPLETED BIT(3) #define TRIGGER_COMPLETED BIT(2) #define WRITE_COMPLETED BIT(1) #define SENDING_CMD BIT(0) #define DIRECT_CMD_STOP_READ 0x8c #define DIRECT_CMD_WRDATA 0x90 #define DIRECT_CMD_FIFO_RST 0x94 #define DIRECT_CMD_RDDATA 0xa0 #define DIRECT_CMD_RD_PROPS 0xa4 #define RD_DCS BIT(18) #define RD_VCHAN_ID(val) (((val) >> 16) & GENMASK(1, 0)) #define RD_SIZE(val) ((val) & GENMASK(15, 0)) #define DIRECT_CMD_RD_STS 0xa8 #define DIRECT_CMD_RD_STS_CTL 0x13c #define DIRECT_CMD_RD_STS_CLR 0x15c #define DIRECT_CMD_RD_STS_FLAG 0x17c #define ERR_EOT_WITH_ERR BIT(8) #define ERR_MISSING_EOT BIT(7) #define ERR_WRONG_LENGTH BIT(6) #define ERR_OVERSIZE BIT(5) #define ERR_RECEIVE BIT(4) #define ERR_UNDECODABLE BIT(3) #define ERR_CHECKSUM BIT(2) #define ERR_UNCORRECTABLE BIT(1) #define ERR_FIXED BIT(0) #define VID_MAIN_CTL 0xb0 #define VID_IGNORE_MISS_VSYNC BIT(31) #define VID_FIELD_SW BIT(28) #define VID_INTERLACED_EN BIT(27) #define RECOVERY_MODE(x) ((x) << 25) #define RECOVERY_MODE_NEXT_HSYNC 0 #define RECOVERY_MODE_NEXT_STOP_POINT 2 #define RECOVERY_MODE_NEXT_VSYNC 3 #define REG_BLKEOL_MODE(x) ((x) << 23) #define REG_BLKLINE_MODE(x) ((x) << 21) #define REG_BLK_MODE_NULL_PKT 0 #define REG_BLK_MODE_BLANKING_PKT 1 #define REG_BLK_MODE_LP 2 #define SYNC_PULSE_HORIZONTAL BIT(20) #define SYNC_PULSE_ACTIVE BIT(19) #define BURST_MODE BIT(18) #define VID_PIXEL_MODE_MASK GENMASK(17, 14) #define VID_PIXEL_MODE_RGB565 (0 << 14) #define VID_PIXEL_MODE_RGB666_PACKED (1 << 14) #define VID_PIXEL_MODE_RGB666 (2 << 14) #define VID_PIXEL_MODE_RGB888 (3 << 14) #define VID_PIXEL_MODE_RGB101010 (4 << 14) #define VID_PIXEL_MODE_RGB121212 (5 << 14) #define VID_PIXEL_MODE_YUV420 (8 << 14) #define VID_PIXEL_MODE_YUV422_PACKED (9 << 14) #define VID_PIXEL_MODE_YUV422 (10 << 14) #define VID_PIXEL_MODE_YUV422_24B (11 << 14) #define VID_PIXEL_MODE_DSC_COMP (12 << 14) #define VID_DATATYPE(x) ((x) << 8) #define VID_VIRTCHAN_ID(iface, x) ((x) << (4 + (iface) * 2)) #define STOP_MODE(x) ((x) << 2) #define START_MODE(x) (x) #define VID_VSIZE1 0xb4 #define VFP_LEN(x) ((x) << 12) #define VBP_LEN(x) ((x) << 6) #define VSA_LEN(x) (x) #define VID_VSIZE2 0xb8 #define VACT_LEN(x) (x) #define VID_HSIZE1 0xc0 #define HBP_LEN(x) ((x) << 16) #define HSA_LEN(x) (x) #define VID_HSIZE2 0xc4 #define HFP_LEN(x) ((x) << 16) #define HACT_LEN(x) (x) #define VID_BLKSIZE1 0xcc #define BLK_EOL_PKT_LEN(x) ((x) << 15) #define BLK_LINE_EVENT_PKT_LEN(x) (x) #define VID_BLKSIZE2 0xd0 #define BLK_LINE_PULSE_PKT_LEN(x) (x) #define VID_PKT_TIME 0xd8 #define BLK_EOL_DURATION(x) (x) #define VID_DPHY_TIME 0xdc #define REG_WAKEUP_TIME(x) ((x) << 17) #define REG_LINE_DURATION(x) (x) #define VID_ERR_COLOR1 0xe0 #define COL_GREEN(x) ((x) << 12) #define COL_RED(x) (x) #define VID_ERR_COLOR2 0xe4 #define PAD_VAL(x) ((x) << 12) #define COL_BLUE(x) (x) #define VID_VPOS 0xe8 #define LINE_VAL(val) (((val) & GENMASK(14, 2)) >> 2) #define LINE_POS(val) ((val) & GENMASK(1, 0)) #define VID_HPOS 0xec #define HORIZ_VAL(val) (((val) & GENMASK(17, 3)) >> 3) #define HORIZ_POS(val) ((val) & GENMASK(2, 0)) #define VID_MODE_STS 0xf0 #define VID_MODE_STS_CTL 0x140 #define VID_MODE_STS_CLR 0x160 #define VID_MODE_STS_FLAG 0x180 #define VSG_RECOVERY BIT(10) #define ERR_VRS_WRONG_LEN BIT(9) #define ERR_LONG_READ BIT(8) #define ERR_LINE_WRITE BIT(7) #define ERR_BURST_WRITE BIT(6) #define ERR_SMALL_HEIGHT BIT(5) #define ERR_SMALL_LEN BIT(4) #define ERR_MISSING_VSYNC BIT(3) #define ERR_MISSING_HSYNC BIT(2) #define ERR_MISSING_DATA BIT(1) #define VSG_RUNNING BIT(0) #define VID_VCA_SETTING1 0xf4 #define BURST_LP BIT(16) #define MAX_BURST_LIMIT(x) (x) #define VID_VCA_SETTING2 0xf8 #define MAX_LINE_LIMIT(x) ((x) << 16) #define EXACT_BURST_LIMIT(x) (x) #define TVG_CTL 0xfc #define TVG_STRIPE_SIZE(x) ((x) << 5) #define TVG_MODE_MASK GENMASK(4, 3) #define TVG_MODE_SINGLE_COLOR (0 << 3) #define TVG_MODE_VSTRIPES (2 << 3) #define TVG_MODE_HSTRIPES (3 << 3) #define TVG_STOPMODE_MASK GENMASK(2, 1) #define TVG_STOPMODE_EOF (0 << 1) #define TVG_STOPMODE_EOL (1 << 1) #define TVG_STOPMODE_NOW (2 << 1) #define TVG_RUN BIT(0) #define TVG_IMG_SIZE 0x100 #define TVG_NBLINES(x) ((x) << 16) #define TVG_LINE_SIZE(x) (x) #define TVG_COLOR1 0x104 #define TVG_COL1_GREEN(x) ((x) << 12) #define TVG_COL1_RED(x) (x) #define TVG_COLOR1_BIS 0x108 #define TVG_COL1_BLUE(x) (x) #define TVG_COLOR2 0x10c #define TVG_COL2_GREEN(x) ((x) << 12) #define TVG_COL2_RED(x) (x) #define TVG_COLOR2_BIS 0x110 #define TVG_COL2_BLUE(x) (x) #define TVG_STS 0x114 #define TVG_STS_CTL 0x144 #define TVG_STS_CLR 0x164 #define TVG_STS_FLAG 0x184 #define TVG_STS_RUNNING BIT(0) #define STS_CTL_EDGE(e) ((e) << 16) #define DPHY_LANES_MAP 0x198 #define DAT_REMAP_CFG(b, l) ((l) << ((b) * 8)) #define DPI_IRQ_EN 0x1a0 #define DPI_IRQ_CLR 0x1a4 #define DPI_IRQ_STS 0x1a8 #define PIXEL_BUF_OVERFLOW BIT(0) #define DPI_CFG 0x1ac #define DPI_CFG_FIFO_DEPTH(x) ((x) >> 16) #define DPI_CFG_FIFO_LEVEL(x) ((x) & GENMASK(15, 0)) #define TEST_GENERIC 0x1f0 #define TEST_STATUS(x) ((x) >> 16) #define TEST_CTRL(x) (x) #define ID_REG 0x1fc #define REV_VENDOR_ID(x) (((x) & GENMASK(31, 20)) >> 20) #define REV_PRODUCT_ID(x) (((x) & GENMASK(19, 12)) >> 12) #define REV_HW(x) (((x) & GENMASK(11, 8)) >> 8) #define REV_MAJOR(x) (((x) & GENMASK(7, 4)) >> 4) #define REV_MINOR(x) ((x) & GENMASK(3, 0)) #define DSI_OUTPUT_PORT 0 #define DSI_INPUT_PORT(inputid) (1 + (inputid)) #define DSI_HBP_FRAME_OVERHEAD 12 #define DSI_HSA_FRAME_OVERHEAD 14 #define DSI_HFP_FRAME_OVERHEAD 6 #define DSI_HSS_VSS_VSE_FRAME_OVERHEAD 4 #define DSI_BLANKING_FRAME_OVERHEAD 6 #define DSI_NULL_FRAME_OVERHEAD 6 #define DSI_EOT_PKT_SIZE 4 static inline struct cdns_dsi *input_to_dsi(struct cdns_dsi_input *input) { return container_of(input, struct cdns_dsi, input); } static inline struct cdns_dsi *to_cdns_dsi(struct mipi_dsi_host *host) { return container_of(host, struct cdns_dsi, base); } static inline struct cdns_dsi_input * bridge_to_cdns_dsi_input(struct drm_bridge *bridge) { return container_of(bridge, struct cdns_dsi_input, bridge); } static unsigned int mode_to_dpi_hfp(const struct drm_display_mode *mode, bool mode_valid_check) { if (mode_valid_check) return mode->hsync_start - mode->hdisplay; return mode->crtc_hsync_start - mode->crtc_hdisplay; } static unsigned int dpi_to_dsi_timing(unsigned int dpi_timing, unsigned int dpi_bpp, unsigned int dsi_pkt_overhead) { unsigned int dsi_timing = DIV_ROUND_UP(dpi_timing * dpi_bpp, 8); if (dsi_timing < dsi_pkt_overhead) dsi_timing = 0; else dsi_timing -= dsi_pkt_overhead; return dsi_timing; } static int cdns_dsi_mode2cfg(struct cdns_dsi *dsi, const struct drm_display_mode *mode, struct cdns_dsi_cfg *dsi_cfg, bool mode_valid_check) { struct cdns_dsi_output *output = &dsi->output; unsigned int tmp; bool sync_pulse = false; int bpp; memset(dsi_cfg, 0, sizeof(*dsi_cfg)); if (output->dev->mode_flags & MIPI_DSI_MODE_VIDEO_SYNC_PULSE) sync_pulse = true; bpp = mipi_dsi_pixel_format_to_bpp(output->dev->format); if (mode_valid_check) tmp = mode->htotal - (sync_pulse ? mode->hsync_end : mode->hsync_start); else tmp = mode->crtc_htotal - (sync_pulse ? mode->crtc_hsync_end : mode->crtc_hsync_start); dsi_cfg->hbp = dpi_to_dsi_timing(tmp, bpp, DSI_HBP_FRAME_OVERHEAD); if (sync_pulse) { if (mode_valid_check) tmp = mode->hsync_end - mode->hsync_start; else tmp = mode->crtc_hsync_end - mode->crtc_hsync_start; dsi_cfg->hsa = dpi_to_dsi_timing(tmp, bpp, DSI_HSA_FRAME_OVERHEAD); } dsi_cfg->hact = dpi_to_dsi_timing(mode_valid_check ? mode->hdisplay : mode->crtc_hdisplay, bpp, 0); dsi_cfg->hfp = dpi_to_dsi_timing(mode_to_dpi_hfp(mode, mode_valid_check), bpp, DSI_HFP_FRAME_OVERHEAD); return 0; } static int cdns_dsi_adjust_phy_config(struct cdns_dsi *dsi, struct cdns_dsi_cfg *dsi_cfg, struct phy_configure_opts_mipi_dphy *phy_cfg, const struct drm_display_mode *mode, bool mode_valid_check) { struct cdns_dsi_output *output = &dsi->output; unsigned long long dlane_bps; unsigned long adj_dsi_htotal; unsigned long dsi_htotal; unsigned long dpi_htotal; unsigned long dpi_hz; unsigned int dsi_hfp_ext; unsigned int lanes = output->dev->lanes; dsi_htotal = dsi_cfg->hbp + DSI_HBP_FRAME_OVERHEAD; if (output->dev->mode_flags & MIPI_DSI_MODE_VIDEO_SYNC_PULSE) dsi_htotal += dsi_cfg->hsa + DSI_HSA_FRAME_OVERHEAD; dsi_htotal += dsi_cfg->hact; dsi_htotal += dsi_cfg->hfp + DSI_HFP_FRAME_OVERHEAD; /* * Make sure DSI htotal is aligned on a lane boundary when calculating * the expected data rate. This is done by extending HFP in case of * misalignment. */ adj_dsi_htotal = dsi_htotal; if (dsi_htotal % lanes) adj_dsi_htotal += lanes - (dsi_htotal % lanes); dpi_hz = (mode_valid_check ? mode->clock : mode->crtc_clock) * 1000; dlane_bps = (unsigned long long)dpi_hz * adj_dsi_htotal; /* data rate in bytes/sec is not an integer, refuse the mode. */ dpi_htotal = mode_valid_check ? mode->htotal : mode->crtc_htotal; if (do_div(dlane_bps, lanes * dpi_htotal)) return -EINVAL; /* data rate was in bytes/sec, convert to bits/sec. */ phy_cfg->hs_clk_rate = dlane_bps * 8; dsi_hfp_ext = adj_dsi_htotal - dsi_htotal; dsi_cfg->hfp += dsi_hfp_ext; dsi_cfg->htotal = dsi_htotal + dsi_hfp_ext; return 0; } static int cdns_dsi_check_conf(struct cdns_dsi *dsi, const struct drm_display_mode *mode, struct cdns_dsi_cfg *dsi_cfg, bool mode_valid_check) { struct cdns_dsi_output *output = &dsi->output; struct phy_configure_opts_mipi_dphy *phy_cfg = &output->phy_opts.mipi_dphy; unsigned long dsi_hss_hsa_hse_hbp; unsigned int nlanes = output->dev->lanes; int ret; ret = cdns_dsi_mode2cfg(dsi, mode, dsi_cfg, mode_valid_check); if (ret) return ret; phy_mipi_dphy_get_default_config(mode->crtc_clock * 1000, mipi_dsi_pixel_format_to_bpp(output->dev->format), nlanes, phy_cfg); ret = cdns_dsi_adjust_phy_config(dsi, dsi_cfg, phy_cfg, mode, mode_valid_check); if (ret) return ret; ret = phy_validate(dsi->dphy, PHY_MODE_MIPI_DPHY, 0, &output->phy_opts); if (ret) return ret; dsi_hss_hsa_hse_hbp = dsi_cfg->hbp + DSI_HBP_FRAME_OVERHEAD; if (output->dev->mode_flags & MIPI_DSI_MODE_VIDEO_SYNC_PULSE) dsi_hss_hsa_hse_hbp += dsi_cfg->hsa + DSI_HSA_FRAME_OVERHEAD; /* * Make sure DPI(HFP) > DSI(HSS+HSA+HSE+HBP) to guarantee that the FIFO * is empty before we start a receiving a new line on the DPI * interface. */ if ((u64)phy_cfg->hs_clk_rate * mode_to_dpi_hfp(mode, mode_valid_check) * nlanes < (u64)dsi_hss_hsa_hse_hbp * (mode_valid_check ? mode->clock : mode->crtc_clock) * 1000) return -EINVAL; return 0; } static int cdns_dsi_bridge_attach(struct drm_bridge *bridge, enum drm_bridge_attach_flags flags) { struct cdns_dsi_input *input = bridge_to_cdns_dsi_input(bridge); struct cdns_dsi *dsi = input_to_dsi(input); struct cdns_dsi_output *output = &dsi->output; if (!drm_core_check_feature(bridge->dev, DRIVER_ATOMIC)) { dev_err(dsi->base.dev, "cdns-dsi driver is only compatible with DRM devices supporting atomic updates"); return -ENOTSUPP; } return drm_bridge_attach(bridge->encoder, output->bridge, bridge, flags); } static enum drm_mode_status cdns_dsi_bridge_mode_valid(struct drm_bridge *bridge, const struct drm_display_info *info, const struct drm_display_mode *mode) { struct cdns_dsi_input *input = bridge_to_cdns_dsi_input(bridge); struct cdns_dsi *dsi = input_to_dsi(input); struct cdns_dsi_output *output = &dsi->output; struct cdns_dsi_cfg dsi_cfg; int bpp, ret; /* * VFP_DSI should be less than VFP_DPI and VFP_DSI should be at * least 1. */ if (mode->vtotal - mode->vsync_end < 2) return MODE_V_ILLEGAL; /* VSA_DSI = VSA_DPI and must be at least 2. */ if (mode->vsync_end - mode->vsync_start < 2) return MODE_V_ILLEGAL; /* HACT must be 32-bits aligned. */ bpp = mipi_dsi_pixel_format_to_bpp(output->dev->format); if ((mode->hdisplay * bpp) % 32) return MODE_H_ILLEGAL; ret = cdns_dsi_check_conf(dsi, mode, &dsi_cfg, true); if (ret) return MODE_BAD; return MODE_OK; } static void cdns_dsi_bridge_disable(struct drm_bridge *bridge) { struct cdns_dsi_input *input = bridge_to_cdns_dsi_input(bridge); struct cdns_dsi *dsi = input_to_dsi(input); u32 val; val = readl(dsi->regs + MCTL_MAIN_DATA_CTL); val &= ~(IF_VID_SELECT_MASK | IF_VID_MODE | VID_EN | HOST_EOT_GEN | DISP_EOT_GEN); writel(val, dsi->regs + MCTL_MAIN_DATA_CTL); val = readl(dsi->regs + MCTL_MAIN_EN) & ~IF_EN(input->id); writel(val, dsi->regs + MCTL_MAIN_EN); if (dsi->platform_ops && dsi->platform_ops->disable) dsi->platform_ops->disable(dsi); pm_runtime_put(dsi->base.dev); } static void cdns_dsi_bridge_post_disable(struct drm_bridge *bridge) { struct cdns_dsi_input *input = bridge_to_cdns_dsi_input(bridge); struct cdns_dsi *dsi = input_to_dsi(input); pm_runtime_put(dsi->base.dev); } static void cdns_dsi_hs_init(struct cdns_dsi *dsi) { struct cdns_dsi_output *output = &dsi->output; u32 status; if (dsi->phy_initialized) return; /* * Power all internal DPHY blocks down and maintain their reset line * asserted before changing the DPHY config. */ writel(DPHY_CMN_PSO | DPHY_PLL_PSO | DPHY_ALL_D_PDN | DPHY_C_PDN | DPHY_CMN_PDN | DPHY_PLL_PDN, dsi->regs + MCTL_DPHY_CFG0); phy_init(dsi->dphy); phy_set_mode(dsi->dphy, PHY_MODE_MIPI_DPHY); phy_configure(dsi->dphy, &output->phy_opts); phy_power_on(dsi->dphy); /* Activate the PLL and wait until it's locked. */ writel(PLL_LOCKED, dsi->regs + MCTL_MAIN_STS_CLR); writel(DPHY_CMN_PSO | DPHY_ALL_D_PDN | DPHY_C_PDN | DPHY_CMN_PDN, dsi->regs + MCTL_DPHY_CFG0); WARN_ON_ONCE(readl_poll_timeout(dsi->regs + MCTL_MAIN_STS, status, status & PLL_LOCKED, 100, 100)); /* De-assert data and clock reset lines. */ writel(DPHY_CMN_PSO | DPHY_ALL_D_PDN | DPHY_C_PDN | DPHY_CMN_PDN | DPHY_D_RSTB(output->dev->lanes) | DPHY_C_RSTB, dsi->regs + MCTL_DPHY_CFG0); dsi->phy_initialized = true; } static void cdns_dsi_init_link(struct cdns_dsi *dsi) { struct cdns_dsi_output *output = &dsi->output; unsigned long sysclk_period, ulpout; u32 val; int i; if (dsi->link_initialized) return; val = 0; for (i = 1; i < output->dev->lanes; i++) val |= DATA_LANE_EN(i); if (!(output->dev->mode_flags & MIPI_DSI_CLOCK_NON_CONTINUOUS)) val |= CLK_CONTINUOUS; writel(val, dsi->regs + MCTL_MAIN_PHY_CTL); /* ULPOUT should be set to 1ms and is expressed in sysclk cycles. */ sysclk_period = NSEC_PER_SEC / clk_get_rate(dsi->dsi_sys_clk); ulpout = DIV_ROUND_UP(NSEC_PER_MSEC, sysclk_period); writel(CLK_LANE_ULPOUT_TIME(ulpout) | DATA_LANE_ULPOUT_TIME(ulpout), dsi->regs + MCTL_ULPOUT_TIME); writel(LINK_EN, dsi->regs + MCTL_MAIN_DATA_CTL); val = CLK_LANE_EN | PLL_START; for (i = 0; i < output->dev->lanes; i++) val |= DATA_LANE_START(i); writel(val, dsi->regs + MCTL_MAIN_EN); dsi->link_initialized = true; } static void cdns_dsi_bridge_enable(struct drm_bridge *bridge) { struct cdns_dsi_input *input = bridge_to_cdns_dsi_input(bridge); struct cdns_dsi *dsi = input_to_dsi(input); struct cdns_dsi_output *output = &dsi->output; struct drm_display_mode *mode; struct phy_configure_opts_mipi_dphy *phy_cfg = &output->phy_opts.mipi_dphy; unsigned long tx_byte_period; struct cdns_dsi_cfg dsi_cfg; u32 tmp, reg_wakeup, div; int nlanes; if (WARN_ON(pm_runtime_get_sync(dsi->base.dev) < 0)) return; if (dsi->platform_ops && dsi->platform_ops->enable) dsi->platform_ops->enable(dsi); mode = &bridge->encoder->crtc->state->adjusted_mode; nlanes = output->dev->lanes; WARN_ON_ONCE(cdns_dsi_check_conf(dsi, mode, &dsi_cfg, false)); cdns_dsi_hs_init(dsi); cdns_dsi_init_link(dsi); writel(HBP_LEN(dsi_cfg.hbp) | HSA_LEN(dsi_cfg.hsa), dsi->regs + VID_HSIZE1); writel(HFP_LEN(dsi_cfg.hfp) | HACT_LEN(dsi_cfg.hact), dsi->regs + VID_HSIZE2); writel(VBP_LEN(mode->crtc_vtotal - mode->crtc_vsync_end - 1) | VFP_LEN(mode->crtc_vsync_start - mode->crtc_vdisplay) | VSA_LEN(mode->crtc_vsync_end - mode->crtc_vsync_start + 1), dsi->regs + VID_VSIZE1); writel(mode->crtc_vdisplay, dsi->regs + VID_VSIZE2); tmp = dsi_cfg.htotal - (dsi_cfg.hsa + DSI_BLANKING_FRAME_OVERHEAD + DSI_HSA_FRAME_OVERHEAD); writel(BLK_LINE_PULSE_PKT_LEN(tmp), dsi->regs + VID_BLKSIZE2); if (output->dev->mode_flags & MIPI_DSI_MODE_VIDEO_SYNC_PULSE) writel(MAX_LINE_LIMIT(tmp - DSI_NULL_FRAME_OVERHEAD), dsi->regs + VID_VCA_SETTING2); tmp = dsi_cfg.htotal - (DSI_HSS_VSS_VSE_FRAME_OVERHEAD + DSI_BLANKING_FRAME_OVERHEAD); writel(BLK_LINE_EVENT_PKT_LEN(tmp), dsi->regs + VID_BLKSIZE1); if (!(output->dev->mode_flags & MIPI_DSI_MODE_VIDEO_SYNC_PULSE)) writel(MAX_LINE_LIMIT(tmp - DSI_NULL_FRAME_OVERHEAD), dsi->regs + VID_VCA_SETTING2); tmp = DIV_ROUND_UP(dsi_cfg.htotal, nlanes) - DIV_ROUND_UP(dsi_cfg.hsa, nlanes); if (!(output->dev->mode_flags & MIPI_DSI_MODE_NO_EOT_PACKET)) tmp -= DIV_ROUND_UP(DSI_EOT_PKT_SIZE, nlanes); tx_byte_period = DIV_ROUND_DOWN_ULL((u64)NSEC_PER_SEC * 8, phy_cfg->hs_clk_rate); reg_wakeup = (phy_cfg->hs_prepare + phy_cfg->hs_zero) / tx_byte_period; writel(REG_WAKEUP_TIME(reg_wakeup) | REG_LINE_DURATION(tmp), dsi->regs + VID_DPHY_TIME); /* * HSTX and LPRX timeouts are both expressed in TX byte clk cycles and * both should be set to at least the time it takes to transmit a * frame. */ tmp = NSEC_PER_SEC / drm_mode_vrefresh(mode); tmp /= tx_byte_period; for (div = 0; div <= CLK_DIV_MAX; div++) { if (tmp <= HSTX_TIMEOUT_MAX) break; tmp >>= 1; } if (tmp > HSTX_TIMEOUT_MAX) tmp = HSTX_TIMEOUT_MAX; writel(CLK_DIV(div) | HSTX_TIMEOUT(tmp), dsi->regs + MCTL_DPHY_TIMEOUT1); writel(LPRX_TIMEOUT(tmp), dsi->regs + MCTL_DPHY_TIMEOUT2); if (output->dev->mode_flags & MIPI_DSI_MODE_VIDEO) { switch (output->dev->format) { case MIPI_DSI_FMT_RGB888: tmp = VID_PIXEL_MODE_RGB888 | VID_DATATYPE(MIPI_DSI_PACKED_PIXEL_STREAM_24); break; case MIPI_DSI_FMT_RGB666: tmp = VID_PIXEL_MODE_RGB666 | VID_DATATYPE(MIPI_DSI_PIXEL_STREAM_3BYTE_18); break; case MIPI_DSI_FMT_RGB666_PACKED: tmp = VID_PIXEL_MODE_RGB666_PACKED | VID_DATATYPE(MIPI_DSI_PACKED_PIXEL_STREAM_18); break; case MIPI_DSI_FMT_RGB565: tmp = VID_PIXEL_MODE_RGB565 | VID_DATATYPE(MIPI_DSI_PACKED_PIXEL_STREAM_16); break; default: dev_err(dsi->base.dev, "Unsupported DSI format\n"); return; } if (output->dev->mode_flags & MIPI_DSI_MODE_VIDEO_SYNC_PULSE) tmp |= SYNC_PULSE_ACTIVE | SYNC_PULSE_HORIZONTAL; tmp |= REG_BLKLINE_MODE(REG_BLK_MODE_BLANKING_PKT) | REG_BLKEOL_MODE(REG_BLK_MODE_BLANKING_PKT) | RECOVERY_MODE(RECOVERY_MODE_NEXT_HSYNC) | VID_IGNORE_MISS_VSYNC; writel(tmp, dsi->regs + VID_MAIN_CTL); } tmp = readl(dsi->regs + MCTL_MAIN_DATA_CTL); tmp &= ~(IF_VID_SELECT_MASK | HOST_EOT_GEN | IF_VID_MODE); if (!(output->dev->mode_flags & MIPI_DSI_MODE_NO_EOT_PACKET)) tmp |= HOST_EOT_GEN; if (output->dev->mode_flags & MIPI_DSI_MODE_VIDEO) tmp |= IF_VID_MODE | IF_VID_SELECT(input->id) | VID_EN; writel(tmp, dsi->regs + MCTL_MAIN_DATA_CTL); tmp = readl(dsi->regs + MCTL_MAIN_EN) | IF_EN(input->id); writel(tmp, dsi->regs + MCTL_MAIN_EN); } static void cdns_dsi_bridge_pre_enable(struct drm_bridge *bridge) { struct cdns_dsi_input *input = bridge_to_cdns_dsi_input(bridge); struct cdns_dsi *dsi = input_to_dsi(input); if (WARN_ON(pm_runtime_get_sync(dsi->base.dev) < 0)) return; cdns_dsi_init_link(dsi); cdns_dsi_hs_init(dsi); } static const struct drm_bridge_funcs cdns_dsi_bridge_funcs = { .attach = cdns_dsi_bridge_attach, .mode_valid = cdns_dsi_bridge_mode_valid, .disable = cdns_dsi_bridge_disable, .pre_enable = cdns_dsi_bridge_pre_enable, .enable = cdns_dsi_bridge_enable, .post_disable = cdns_dsi_bridge_post_disable, }; static int cdns_dsi_attach(struct mipi_dsi_host *host, struct mipi_dsi_device *dev) { struct cdns_dsi *dsi = to_cdns_dsi(host); struct cdns_dsi_output *output = &dsi->output; struct cdns_dsi_input *input = &dsi->input; struct drm_bridge *bridge; struct drm_panel *panel; struct device_node *np; int ret; /* * We currently do not support connecting several DSI devices to the * same host. In order to support that we'd need the DRM bridge * framework to allow dynamic reconfiguration of the bridge chain. */ if (output->dev) return -EBUSY; /* We do not support burst mode yet. */ if (dev->mode_flags & MIPI_DSI_MODE_VIDEO_BURST) return -ENOTSUPP; /* * The host <-> device link might be described using an OF-graph * representation, in this case we extract the device of_node from * this representation, otherwise we use dsidev->dev.of_node which * should have been filled by the core. */ np = of_graph_get_remote_node(dsi->base.dev->of_node, DSI_OUTPUT_PORT, dev->channel); if (!np) np = of_node_get(dev->dev.of_node); panel = of_drm_find_panel(np); if (!IS_ERR(panel)) { bridge = drm_panel_bridge_add_typed(panel, DRM_MODE_CONNECTOR_DSI); } else { bridge = of_drm_find_bridge(dev->dev.of_node); if (!bridge) bridge = ERR_PTR(-EINVAL); } of_node_put(np); if (IS_ERR(bridge)) { ret = PTR_ERR(bridge); dev_err(host->dev, "failed to add DSI device %s (err = %d)", dev->name, ret); return ret; } output->dev = dev; output->bridge = bridge; output->panel = panel; /* * The DSI output has been properly configured, we can now safely * register the input to the bridge framework so that it can take place * in a display pipeline. */ drm_bridge_add(&input->bridge); return 0; } static int cdns_dsi_detach(struct mipi_dsi_host *host, struct mipi_dsi_device *dev) { struct cdns_dsi *dsi = to_cdns_dsi(host); struct cdns_dsi_output *output = &dsi->output; struct cdns_dsi_input *input = &dsi->input; drm_bridge_remove(&input->bridge); if (output->panel) drm_panel_bridge_remove(output->bridge); return 0; } static irqreturn_t cdns_dsi_interrupt(int irq, void *data) { struct cdns_dsi *dsi = data; irqreturn_t ret = IRQ_NONE; u32 flag, ctl; flag = readl(dsi->regs + DIRECT_CMD_STS_FLAG); if (flag) { ctl = readl(dsi->regs + DIRECT_CMD_STS_CTL); ctl &= ~flag; writel(ctl, dsi->regs + DIRECT_CMD_STS_CTL); complete(&dsi->direct_cmd_comp); ret = IRQ_HANDLED; } return ret; } static ssize_t cdns_dsi_transfer(struct mipi_dsi_host *host, const struct mipi_dsi_msg *msg) { struct cdns_dsi *dsi = to_cdns_dsi(host); u32 cmd, sts, val, wait = WRITE_COMPLETED, ctl = 0; struct mipi_dsi_packet packet; int ret, i, tx_len, rx_len; ret = pm_runtime_resume_and_get(host->dev); if (ret < 0) return ret; cdns_dsi_init_link(dsi); ret = mipi_dsi_create_packet(&packet, msg); if (ret) goto out; tx_len = msg->tx_buf ? msg->tx_len : 0; rx_len = msg->rx_buf ? msg->rx_len : 0; /* For read operations, the maximum TX len is 2. */ if (rx_len && tx_len > 2) { ret = -ENOTSUPP; goto out; } /* TX len is limited by the CMD FIFO depth. */ if (tx_len > dsi->direct_cmd_fifo_depth) { ret = -ENOTSUPP; goto out; } /* RX len is limited by the RX FIFO depth. */ if (rx_len > dsi->rx_fifo_depth) { ret = -ENOTSUPP; goto out; } cmd = CMD_SIZE(tx_len) | CMD_VCHAN_ID(msg->channel) | CMD_DATATYPE(msg->type); if (msg->flags & MIPI_DSI_MSG_USE_LPM) cmd |= CMD_LP_EN; if (mipi_dsi_packet_format_is_long(msg->type)) cmd |= CMD_LONG; if (rx_len) { cmd |= READ_CMD; wait = READ_COMPLETED_WITH_ERR | READ_COMPLETED; ctl = READ_EN | BTA_EN; } else if (msg->flags & MIPI_DSI_MSG_REQ_ACK) { cmd |= BTA_REQ; wait = ACK_WITH_ERR_RCVD | ACK_RCVD; ctl = BTA_EN; } writel(readl(dsi->regs + MCTL_MAIN_DATA_CTL) | ctl, dsi->regs + MCTL_MAIN_DATA_CTL); writel(cmd, dsi->regs + DIRECT_CMD_MAIN_SETTINGS); for (i = 0; i < tx_len; i += 4) { const u8 *buf = msg->tx_buf; int j; val = 0; for (j = 0; j < 4 && j + i < tx_len; j++) val |= (u32)buf[i + j] << (8 * j); writel(val, dsi->regs + DIRECT_CMD_WRDATA); } /* Clear status flags before sending the command. */ writel(wait, dsi->regs + DIRECT_CMD_STS_CLR); writel(wait, dsi->regs + DIRECT_CMD_STS_CTL); reinit_completion(&dsi->direct_cmd_comp); writel(0, dsi->regs + DIRECT_CMD_SEND); wait_for_completion_timeout(&dsi->direct_cmd_comp, msecs_to_jiffies(1000)); sts = readl(dsi->regs + DIRECT_CMD_STS); writel(wait, dsi->regs + DIRECT_CMD_STS_CLR); writel(0, dsi->regs + DIRECT_CMD_STS_CTL); writel(readl(dsi->regs + MCTL_MAIN_DATA_CTL) & ~ctl, dsi->regs + MCTL_MAIN_DATA_CTL); /* We did not receive the events we were waiting for. */ if (!(sts & wait)) { ret = -ETIMEDOUT; goto out; } /* 'READ' or 'WRITE with ACK' failed. */ if (sts & (READ_COMPLETED_WITH_ERR | ACK_WITH_ERR_RCVD)) { ret = -EIO; goto out; } for (i = 0; i < rx_len; i += 4) { u8 *buf = msg->rx_buf; int j; val = readl(dsi->regs + DIRECT_CMD_RDDATA); for (j = 0; j < 4 && j + i < rx_len; j++) buf[i + j] = val >> (8 * j); } out: pm_runtime_put(host->dev); return ret; } static const struct mipi_dsi_host_ops cdns_dsi_ops = { .attach = cdns_dsi_attach, .detach = cdns_dsi_detach, .transfer = cdns_dsi_transfer, }; static int __maybe_unused cdns_dsi_resume(struct device *dev) { struct cdns_dsi *dsi = dev_get_drvdata(dev); reset_control_deassert(dsi->dsi_p_rst); clk_prepare_enable(dsi->dsi_p_clk); clk_prepare_enable(dsi->dsi_sys_clk); return 0; } static int __maybe_unused cdns_dsi_suspend(struct device *dev) { struct cdns_dsi *dsi = dev_get_drvdata(dev); clk_disable_unprepare(dsi->dsi_sys_clk); clk_disable_unprepare(dsi->dsi_p_clk); reset_control_assert(dsi->dsi_p_rst); dsi->link_initialized = false; return 0; } static UNIVERSAL_DEV_PM_OPS(cdns_dsi_pm_ops, cdns_dsi_suspend, cdns_dsi_resume, NULL); static int cdns_dsi_drm_probe(struct platform_device *pdev) { struct cdns_dsi *dsi; struct cdns_dsi_input *input; int ret, irq; u32 val; dsi = devm_kzalloc(&pdev->dev, sizeof(*dsi), GFP_KERNEL); if (!dsi) return -ENOMEM; platform_set_drvdata(pdev, dsi); input = &dsi->input; dsi->regs = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(dsi->regs)) return PTR_ERR(dsi->regs); dsi->dsi_p_clk = devm_clk_get(&pdev->dev, "dsi_p_clk"); if (IS_ERR(dsi->dsi_p_clk)) return PTR_ERR(dsi->dsi_p_clk); dsi->dsi_p_rst = devm_reset_control_get_optional_exclusive(&pdev->dev, "dsi_p_rst"); if (IS_ERR(dsi->dsi_p_rst)) return PTR_ERR(dsi->dsi_p_rst); dsi->dsi_sys_clk = devm_clk_get(&pdev->dev, "dsi_sys_clk"); if (IS_ERR(dsi->dsi_sys_clk)) return PTR_ERR(dsi->dsi_sys_clk); irq = platform_get_irq(pdev, 0); if (irq < 0) return irq; dsi->dphy = devm_phy_get(&pdev->dev, "dphy"); if (IS_ERR(dsi->dphy)) return PTR_ERR(dsi->dphy); ret = clk_prepare_enable(dsi->dsi_p_clk); if (ret) return ret; val = readl(dsi->regs + ID_REG); if (REV_VENDOR_ID(val) != 0xcad) { dev_err(&pdev->dev, "invalid vendor id\n"); ret = -EINVAL; goto err_disable_pclk; } dsi->platform_ops = of_device_get_match_data(&pdev->dev); val = readl(dsi->regs + IP_CONF); dsi->direct_cmd_fifo_depth = 1 << (DIRCMD_FIFO_DEPTH(val) + 2); dsi->rx_fifo_depth = RX_FIFO_DEPTH(val); init_completion(&dsi->direct_cmd_comp); writel(0, dsi->regs + MCTL_MAIN_DATA_CTL); writel(0, dsi->regs + MCTL_MAIN_EN); writel(0, dsi->regs + MCTL_MAIN_PHY_CTL); /* * We only support the DPI input, so force input->id to * CDNS_DPI_INPUT. */ input->id = CDNS_DPI_INPUT; input->bridge.funcs = &cdns_dsi_bridge_funcs; input->bridge.of_node = pdev->dev.of_node; /* Mask all interrupts before registering the IRQ handler. */ writel(0, dsi->regs + MCTL_MAIN_STS_CTL); writel(0, dsi->regs + MCTL_DPHY_ERR_CTL1); writel(0, dsi->regs + CMD_MODE_STS_CTL); writel(0, dsi->regs + DIRECT_CMD_STS_CTL); writel(0, dsi->regs + DIRECT_CMD_RD_STS_CTL); writel(0, dsi->regs + VID_MODE_STS_CTL); writel(0, dsi->regs + TVG_STS_CTL); writel(0, dsi->regs + DPI_IRQ_EN); ret = devm_request_irq(&pdev->dev, irq, cdns_dsi_interrupt, 0, dev_name(&pdev->dev), dsi); if (ret) goto err_disable_pclk; pm_runtime_enable(&pdev->dev); dsi->base.dev = &pdev->dev; dsi->base.ops = &cdns_dsi_ops; if (dsi->platform_ops && dsi->platform_ops->init) { ret = dsi->platform_ops->init(dsi); if (ret != 0) { dev_err(&pdev->dev, "platform initialization failed: %d\n", ret); goto err_disable_runtime_pm; } } ret = mipi_dsi_host_register(&dsi->base); if (ret) goto err_deinit_platform; clk_disable_unprepare(dsi->dsi_p_clk); return 0; err_deinit_platform: if (dsi->platform_ops && dsi->platform_ops->deinit) dsi->platform_ops->deinit(dsi); err_disable_runtime_pm: pm_runtime_disable(&pdev->dev); err_disable_pclk: clk_disable_unprepare(dsi->dsi_p_clk); return ret; } static void cdns_dsi_drm_remove(struct platform_device *pdev) { struct cdns_dsi *dsi = platform_get_drvdata(pdev); mipi_dsi_host_unregister(&dsi->base); if (dsi->platform_ops && dsi->platform_ops->deinit) dsi->platform_ops->deinit(dsi); pm_runtime_disable(&pdev->dev); } static const struct of_device_id cdns_dsi_of_match[] = { { .compatible = "cdns,dsi" }, #ifdef CONFIG_DRM_CDNS_DSI_J721E { .compatible = "ti,j721e-dsi", .data = &dsi_ti_j721e_ops, }, #endif { }, }; MODULE_DEVICE_TABLE(of, cdns_dsi_of_match); static struct platform_driver cdns_dsi_platform_driver = { .probe = cdns_dsi_drm_probe, .remove_new = cdns_dsi_drm_remove, .driver = { .name = "cdns-dsi", .of_match_table = cdns_dsi_of_match, .pm = &cdns_dsi_pm_ops, }, }; module_platform_driver(cdns_dsi_platform_driver); MODULE_AUTHOR("Boris Brezillon <[email protected]>"); MODULE_DESCRIPTION("Cadence DSI driver"); MODULE_LICENSE("GPL"); MODULE_ALIAS("platform:cdns-dsi");
linux-master
drivers/gpu/drm/bridge/cadence/cdns-dsi-core.c
// SPDX-License-Identifier: GPL-2.0 /* * Cadence MHDP8546 DP bridge driver. * * Copyright (C) 2020 Cadence Design Systems, Inc. * */ #include <linux/io.h> #include <linux/iopoll.h> #include <asm/unaligned.h> #include <drm/display/drm_hdcp_helper.h> #include "cdns-mhdp8546-hdcp.h" static int cdns_mhdp_secure_mailbox_read(struct cdns_mhdp_device *mhdp) { int ret, empty; WARN_ON(!mutex_is_locked(&mhdp->mbox_mutex)); ret = readx_poll_timeout(readl, mhdp->sapb_regs + CDNS_MAILBOX_EMPTY, empty, !empty, MAILBOX_RETRY_US, MAILBOX_TIMEOUT_US); if (ret < 0) return ret; return readl(mhdp->sapb_regs + CDNS_MAILBOX_RX_DATA) & 0xff; } static int cdns_mhdp_secure_mailbox_write(struct cdns_mhdp_device *mhdp, u8 val) { int ret, full; WARN_ON(!mutex_is_locked(&mhdp->mbox_mutex)); ret = readx_poll_timeout(readl, mhdp->sapb_regs + CDNS_MAILBOX_FULL, full, !full, MAILBOX_RETRY_US, MAILBOX_TIMEOUT_US); if (ret < 0) return ret; writel(val, mhdp->sapb_regs + CDNS_MAILBOX_TX_DATA); return 0; } static int cdns_mhdp_secure_mailbox_recv_header(struct cdns_mhdp_device *mhdp, u8 module_id, u8 opcode, u16 req_size) { u32 mbox_size, i; u8 header[4]; int ret; /* read the header of the message */ for (i = 0; i < sizeof(header); i++) { ret = cdns_mhdp_secure_mailbox_read(mhdp); if (ret < 0) return ret; header[i] = ret; } mbox_size = get_unaligned_be16(header + 2); if (opcode != header[0] || module_id != header[1] || (opcode != HDCP_TRAN_IS_REC_ID_VALID && req_size != mbox_size)) { for (i = 0; i < mbox_size; i++) if (cdns_mhdp_secure_mailbox_read(mhdp) < 0) break; return -EINVAL; } return 0; } static int cdns_mhdp_secure_mailbox_recv_data(struct cdns_mhdp_device *mhdp, u8 *buff, u16 buff_size) { int ret; u32 i; for (i = 0; i < buff_size; i++) { ret = cdns_mhdp_secure_mailbox_read(mhdp); if (ret < 0) return ret; buff[i] = ret; } return 0; } static int cdns_mhdp_secure_mailbox_send(struct cdns_mhdp_device *mhdp, u8 module_id, u8 opcode, u16 size, u8 *message) { u8 header[4]; int ret; u32 i; header[0] = opcode; header[1] = module_id; put_unaligned_be16(size, header + 2); for (i = 0; i < sizeof(header); i++) { ret = cdns_mhdp_secure_mailbox_write(mhdp, header[i]); if (ret) return ret; } for (i = 0; i < size; i++) { ret = cdns_mhdp_secure_mailbox_write(mhdp, message[i]); if (ret) return ret; } return 0; } static int cdns_mhdp_hdcp_get_status(struct cdns_mhdp_device *mhdp, u16 *hdcp_port_status) { u8 hdcp_status[HDCP_STATUS_SIZE]; int ret; mutex_lock(&mhdp->mbox_mutex); ret = cdns_mhdp_secure_mailbox_send(mhdp, MB_MODULE_ID_HDCP_TX, HDCP_TRAN_STATUS_CHANGE, 0, NULL); if (ret) goto err_get_hdcp_status; ret = cdns_mhdp_secure_mailbox_recv_header(mhdp, MB_MODULE_ID_HDCP_TX, HDCP_TRAN_STATUS_CHANGE, sizeof(hdcp_status)); if (ret) goto err_get_hdcp_status; ret = cdns_mhdp_secure_mailbox_recv_data(mhdp, hdcp_status, sizeof(hdcp_status)); if (ret) goto err_get_hdcp_status; *hdcp_port_status = ((u16)(hdcp_status[0] << 8) | hdcp_status[1]); err_get_hdcp_status: mutex_unlock(&mhdp->mbox_mutex); return ret; } static u8 cdns_mhdp_hdcp_handle_status(struct cdns_mhdp_device *mhdp, u16 status) { u8 err = GET_HDCP_PORT_STS_LAST_ERR(status); if (err) dev_dbg(mhdp->dev, "HDCP Error = %d", err); return err; } static int cdns_mhdp_hdcp_rx_id_valid_response(struct cdns_mhdp_device *mhdp, u8 valid) { int ret; mutex_lock(&mhdp->mbox_mutex); ret = cdns_mhdp_secure_mailbox_send(mhdp, MB_MODULE_ID_HDCP_TX, HDCP_TRAN_RESPOND_RECEIVER_ID_VALID, 1, &valid); mutex_unlock(&mhdp->mbox_mutex); return ret; } static int cdns_mhdp_hdcp_rx_id_valid(struct cdns_mhdp_device *mhdp, u8 *recv_num, u8 *hdcp_rx_id) { u8 rec_id_hdr[2]; u8 status; int ret; mutex_lock(&mhdp->mbox_mutex); ret = cdns_mhdp_secure_mailbox_send(mhdp, MB_MODULE_ID_HDCP_TX, HDCP_TRAN_IS_REC_ID_VALID, 0, NULL); if (ret) goto err_rx_id_valid; ret = cdns_mhdp_secure_mailbox_recv_header(mhdp, MB_MODULE_ID_HDCP_TX, HDCP_TRAN_IS_REC_ID_VALID, sizeof(status)); if (ret) goto err_rx_id_valid; ret = cdns_mhdp_secure_mailbox_recv_data(mhdp, rec_id_hdr, 2); if (ret) goto err_rx_id_valid; *recv_num = rec_id_hdr[0]; ret = cdns_mhdp_secure_mailbox_recv_data(mhdp, hdcp_rx_id, 5 * *recv_num); err_rx_id_valid: mutex_unlock(&mhdp->mbox_mutex); return ret; } static int cdns_mhdp_hdcp_km_stored_resp(struct cdns_mhdp_device *mhdp, u32 size, u8 *km) { int ret; mutex_lock(&mhdp->mbox_mutex); ret = cdns_mhdp_secure_mailbox_send(mhdp, MB_MODULE_ID_HDCP_TX, HDCP2X_TX_RESPOND_KM, size, km); mutex_unlock(&mhdp->mbox_mutex); return ret; } static int cdns_mhdp_hdcp_tx_is_km_stored(struct cdns_mhdp_device *mhdp, u8 *resp, u32 size) { int ret; mutex_lock(&mhdp->mbox_mutex); ret = cdns_mhdp_secure_mailbox_send(mhdp, MB_MODULE_ID_HDCP_TX, HDCP2X_TX_IS_KM_STORED, 0, NULL); if (ret) goto err_is_km_stored; ret = cdns_mhdp_secure_mailbox_recv_header(mhdp, MB_MODULE_ID_HDCP_TX, HDCP2X_TX_IS_KM_STORED, size); if (ret) goto err_is_km_stored; ret = cdns_mhdp_secure_mailbox_recv_data(mhdp, resp, size); err_is_km_stored: mutex_unlock(&mhdp->mbox_mutex); return ret; } static int cdns_mhdp_hdcp_tx_config(struct cdns_mhdp_device *mhdp, u8 hdcp_cfg) { int ret; mutex_lock(&mhdp->mbox_mutex); ret = cdns_mhdp_secure_mailbox_send(mhdp, MB_MODULE_ID_HDCP_TX, HDCP_TRAN_CONFIGURATION, 1, &hdcp_cfg); mutex_unlock(&mhdp->mbox_mutex); return ret; } static int cdns_mhdp_hdcp_set_config(struct cdns_mhdp_device *mhdp, u8 hdcp_config, bool enable) { u16 hdcp_port_status; u32 ret_event; u8 hdcp_cfg; int ret; hdcp_cfg = hdcp_config | (enable ? 0x04 : 0) | (HDCP_CONTENT_TYPE_0 << 3); cdns_mhdp_hdcp_tx_config(mhdp, hdcp_cfg); ret_event = cdns_mhdp_wait_for_sw_event(mhdp, CDNS_HDCP_TX_STATUS); if (!ret_event) return -1; ret = cdns_mhdp_hdcp_get_status(mhdp, &hdcp_port_status); if (ret || cdns_mhdp_hdcp_handle_status(mhdp, hdcp_port_status)) return -1; return 0; } static int cdns_mhdp_hdcp_auth_check(struct cdns_mhdp_device *mhdp) { u16 hdcp_port_status; u32 ret_event; int ret; ret_event = cdns_mhdp_wait_for_sw_event(mhdp, CDNS_HDCP_TX_STATUS); if (!ret_event) return -1; ret = cdns_mhdp_hdcp_get_status(mhdp, &hdcp_port_status); if (ret || cdns_mhdp_hdcp_handle_status(mhdp, hdcp_port_status)) return -1; if (hdcp_port_status & 1) { dev_dbg(mhdp->dev, "Authentication completed successfully!\n"); return 0; } dev_dbg(mhdp->dev, "Authentication failed\n"); return -1; } static int cdns_mhdp_hdcp_check_receviers(struct cdns_mhdp_device *mhdp) { u8 hdcp_rec_id[HDCP_MAX_RECEIVERS][HDCP_RECEIVER_ID_SIZE_BYTES]; u8 hdcp_num_rec; u32 ret_event; ret_event = cdns_mhdp_wait_for_sw_event(mhdp, CDNS_HDCP_TX_IS_RCVR_ID_VALID); if (!ret_event) return -1; hdcp_num_rec = 0; memset(&hdcp_rec_id, 0, sizeof(hdcp_rec_id)); cdns_mhdp_hdcp_rx_id_valid(mhdp, &hdcp_num_rec, (u8 *)hdcp_rec_id); cdns_mhdp_hdcp_rx_id_valid_response(mhdp, 1); return 0; } static int cdns_mhdp_hdcp_auth_22(struct cdns_mhdp_device *mhdp) { u8 resp[HDCP_STATUS_SIZE]; u16 hdcp_port_status; u32 ret_event; int ret; dev_dbg(mhdp->dev, "HDCP: Start 2.2 Authentication\n"); ret_event = cdns_mhdp_wait_for_sw_event(mhdp, CDNS_HDCP2_TX_IS_KM_STORED); if (!ret_event) return -1; if (ret_event & CDNS_HDCP_TX_STATUS) { mhdp->sw_events &= ~CDNS_HDCP_TX_STATUS; ret = cdns_mhdp_hdcp_get_status(mhdp, &hdcp_port_status); if (ret || cdns_mhdp_hdcp_handle_status(mhdp, hdcp_port_status)) return -1; } cdns_mhdp_hdcp_tx_is_km_stored(mhdp, resp, sizeof(resp)); cdns_mhdp_hdcp_km_stored_resp(mhdp, 0, NULL); if (cdns_mhdp_hdcp_check_receviers(mhdp)) return -1; return 0; } static inline int cdns_mhdp_hdcp_auth_14(struct cdns_mhdp_device *mhdp) { dev_dbg(mhdp->dev, "HDCP: Starting 1.4 Authentication\n"); return cdns_mhdp_hdcp_check_receviers(mhdp); } static int cdns_mhdp_hdcp_auth(struct cdns_mhdp_device *mhdp, u8 hdcp_config) { int ret; ret = cdns_mhdp_hdcp_set_config(mhdp, hdcp_config, true); if (ret) goto auth_failed; if (hdcp_config == HDCP_TX_1) ret = cdns_mhdp_hdcp_auth_14(mhdp); else ret = cdns_mhdp_hdcp_auth_22(mhdp); if (ret) goto auth_failed; ret = cdns_mhdp_hdcp_auth_check(mhdp); if (ret) ret = cdns_mhdp_hdcp_auth_check(mhdp); auth_failed: return ret; } static int _cdns_mhdp_hdcp_disable(struct cdns_mhdp_device *mhdp) { int ret; dev_dbg(mhdp->dev, "[%s:%d] HDCP is being disabled...\n", mhdp->connector.name, mhdp->connector.base.id); ret = cdns_mhdp_hdcp_set_config(mhdp, 0, false); return ret; } static int _cdns_mhdp_hdcp_enable(struct cdns_mhdp_device *mhdp, u8 content_type) { int ret, tries = 3; u32 i; for (i = 0; i < tries; i++) { if (content_type == DRM_MODE_HDCP_CONTENT_TYPE0 || content_type == DRM_MODE_HDCP_CONTENT_TYPE1) { ret = cdns_mhdp_hdcp_auth(mhdp, HDCP_TX_2); if (!ret) return 0; _cdns_mhdp_hdcp_disable(mhdp); } if (content_type == DRM_MODE_HDCP_CONTENT_TYPE0) { ret = cdns_mhdp_hdcp_auth(mhdp, HDCP_TX_1); if (!ret) return 0; _cdns_mhdp_hdcp_disable(mhdp); } } dev_err(mhdp->dev, "HDCP authentication failed (%d tries/%d)\n", tries, ret); return ret; } static int cdns_mhdp_hdcp_check_link(struct cdns_mhdp_device *mhdp) { u16 hdcp_port_status; int ret = 0; mutex_lock(&mhdp->hdcp.mutex); if (mhdp->hdcp.value == DRM_MODE_CONTENT_PROTECTION_UNDESIRED) goto out; ret = cdns_mhdp_hdcp_get_status(mhdp, &hdcp_port_status); if (!ret && hdcp_port_status & HDCP_PORT_STS_AUTH) goto out; dev_err(mhdp->dev, "[%s:%d] HDCP link failed, retrying authentication\n", mhdp->connector.name, mhdp->connector.base.id); ret = _cdns_mhdp_hdcp_disable(mhdp); if (ret) { mhdp->hdcp.value = DRM_MODE_CONTENT_PROTECTION_DESIRED; schedule_work(&mhdp->hdcp.prop_work); goto out; } ret = _cdns_mhdp_hdcp_enable(mhdp, mhdp->hdcp.hdcp_content_type); if (ret) { mhdp->hdcp.value = DRM_MODE_CONTENT_PROTECTION_DESIRED; schedule_work(&mhdp->hdcp.prop_work); } out: mutex_unlock(&mhdp->hdcp.mutex); return ret; } static void cdns_mhdp_hdcp_check_work(struct work_struct *work) { struct delayed_work *d_work = to_delayed_work(work); struct cdns_mhdp_hdcp *hdcp = container_of(d_work, struct cdns_mhdp_hdcp, check_work); struct cdns_mhdp_device *mhdp = container_of(hdcp, struct cdns_mhdp_device, hdcp); if (!cdns_mhdp_hdcp_check_link(mhdp)) schedule_delayed_work(&hdcp->check_work, DRM_HDCP_CHECK_PERIOD_MS); } static void cdns_mhdp_hdcp_prop_work(struct work_struct *work) { struct cdns_mhdp_hdcp *hdcp = container_of(work, struct cdns_mhdp_hdcp, prop_work); struct cdns_mhdp_device *mhdp = container_of(hdcp, struct cdns_mhdp_device, hdcp); struct drm_device *dev = mhdp->connector.dev; struct drm_connector_state *state; drm_modeset_lock(&dev->mode_config.connection_mutex, NULL); mutex_lock(&mhdp->hdcp.mutex); if (mhdp->hdcp.value != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) { state = mhdp->connector.state; state->content_protection = mhdp->hdcp.value; } mutex_unlock(&mhdp->hdcp.mutex); drm_modeset_unlock(&dev->mode_config.connection_mutex); } int cdns_mhdp_hdcp_set_lc(struct cdns_mhdp_device *mhdp, u8 *val) { int ret; mutex_lock(&mhdp->mbox_mutex); ret = cdns_mhdp_secure_mailbox_send(mhdp, MB_MODULE_ID_HDCP_GENERAL, HDCP_GENERAL_SET_LC_128, 16, val); mutex_unlock(&mhdp->mbox_mutex); return ret; } int cdns_mhdp_hdcp_set_public_key_param(struct cdns_mhdp_device *mhdp, struct cdns_hdcp_tx_public_key_param *val) { int ret; mutex_lock(&mhdp->mbox_mutex); ret = cdns_mhdp_secure_mailbox_send(mhdp, MB_MODULE_ID_HDCP_TX, HDCP2X_TX_SET_PUBLIC_KEY_PARAMS, sizeof(*val), (u8 *)val); mutex_unlock(&mhdp->mbox_mutex); return ret; } int cdns_mhdp_hdcp_enable(struct cdns_mhdp_device *mhdp, u8 content_type) { int ret; mutex_lock(&mhdp->hdcp.mutex); ret = _cdns_mhdp_hdcp_enable(mhdp, content_type); if (ret) goto out; mhdp->hdcp.hdcp_content_type = content_type; mhdp->hdcp.value = DRM_MODE_CONTENT_PROTECTION_ENABLED; schedule_work(&mhdp->hdcp.prop_work); schedule_delayed_work(&mhdp->hdcp.check_work, DRM_HDCP_CHECK_PERIOD_MS); out: mutex_unlock(&mhdp->hdcp.mutex); return ret; } int cdns_mhdp_hdcp_disable(struct cdns_mhdp_device *mhdp) { int ret = 0; mutex_lock(&mhdp->hdcp.mutex); if (mhdp->hdcp.value != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) { mhdp->hdcp.value = DRM_MODE_CONTENT_PROTECTION_UNDESIRED; schedule_work(&mhdp->hdcp.prop_work); ret = _cdns_mhdp_hdcp_disable(mhdp); } mutex_unlock(&mhdp->hdcp.mutex); cancel_delayed_work_sync(&mhdp->hdcp.check_work); return ret; } void cdns_mhdp_hdcp_init(struct cdns_mhdp_device *mhdp) { INIT_DELAYED_WORK(&mhdp->hdcp.check_work, cdns_mhdp_hdcp_check_work); INIT_WORK(&mhdp->hdcp.prop_work, cdns_mhdp_hdcp_prop_work); mutex_init(&mhdp->hdcp.mutex); }
linux-master
drivers/gpu/drm/bridge/cadence/cdns-mhdp8546-hdcp.c
// SPDX-License-Identifier: GPL-2.0 /* * Cadence MHDP8546 DP bridge driver. * * Copyright (C) 2020 Cadence Design Systems, Inc. * * Authors: Quentin Schulz <[email protected]> * Swapnil Jakhade <[email protected]> * Yuti Amonkar <[email protected]> * Tomi Valkeinen <[email protected]> * Jyri Sarha <[email protected]> * * TODO: * - Implement optimized mailbox communication using mailbox interrupts * - Add support for power management * - Add support for features like audio, MST and fast link training * - Implement request_fw_cancel to handle HW_STATE * - Fix asynchronous loading of firmware implementation * - Add DRM helper function for cdns_mhdp_lower_link_rate */ #include <linux/clk.h> #include <linux/delay.h> #include <linux/err.h> #include <linux/firmware.h> #include <linux/io.h> #include <linux/iopoll.h> #include <linux/irq.h> #include <linux/media-bus-format.h> #include <linux/module.h> #include <linux/of.h> #include <linux/phy/phy.h> #include <linux/phy/phy-dp.h> #include <linux/platform_device.h> #include <linux/slab.h> #include <linux/wait.h> #include <drm/display/drm_dp_helper.h> #include <drm/display/drm_hdcp_helper.h> #include <drm/drm_atomic.h> #include <drm/drm_atomic_helper.h> #include <drm/drm_atomic_state_helper.h> #include <drm/drm_bridge.h> #include <drm/drm_connector.h> #include <drm/drm_edid.h> #include <drm/drm_modeset_helper_vtables.h> #include <drm/drm_print.h> #include <drm/drm_probe_helper.h> #include <asm/unaligned.h> #include "cdns-mhdp8546-core.h" #include "cdns-mhdp8546-hdcp.h" #include "cdns-mhdp8546-j721e.h" static void cdns_mhdp_bridge_hpd_enable(struct drm_bridge *bridge) { struct cdns_mhdp_device *mhdp = bridge_to_mhdp(bridge); /* Enable SW event interrupts */ if (mhdp->bridge_attached) writel(readl(mhdp->regs + CDNS_APB_INT_MASK) & ~CDNS_APB_INT_MASK_SW_EVENT_INT, mhdp->regs + CDNS_APB_INT_MASK); } static void cdns_mhdp_bridge_hpd_disable(struct drm_bridge *bridge) { struct cdns_mhdp_device *mhdp = bridge_to_mhdp(bridge); writel(readl(mhdp->regs + CDNS_APB_INT_MASK) | CDNS_APB_INT_MASK_SW_EVENT_INT, mhdp->regs + CDNS_APB_INT_MASK); } static int cdns_mhdp_mailbox_read(struct cdns_mhdp_device *mhdp) { int ret, empty; WARN_ON(!mutex_is_locked(&mhdp->mbox_mutex)); ret = readx_poll_timeout(readl, mhdp->regs + CDNS_MAILBOX_EMPTY, empty, !empty, MAILBOX_RETRY_US, MAILBOX_TIMEOUT_US); if (ret < 0) return ret; return readl(mhdp->regs + CDNS_MAILBOX_RX_DATA) & 0xff; } static int cdns_mhdp_mailbox_write(struct cdns_mhdp_device *mhdp, u8 val) { int ret, full; WARN_ON(!mutex_is_locked(&mhdp->mbox_mutex)); ret = readx_poll_timeout(readl, mhdp->regs + CDNS_MAILBOX_FULL, full, !full, MAILBOX_RETRY_US, MAILBOX_TIMEOUT_US); if (ret < 0) return ret; writel(val, mhdp->regs + CDNS_MAILBOX_TX_DATA); return 0; } static int cdns_mhdp_mailbox_recv_header(struct cdns_mhdp_device *mhdp, u8 module_id, u8 opcode, u16 req_size) { u32 mbox_size, i; u8 header[4]; int ret; /* read the header of the message */ for (i = 0; i < sizeof(header); i++) { ret = cdns_mhdp_mailbox_read(mhdp); if (ret < 0) return ret; header[i] = ret; } mbox_size = get_unaligned_be16(header + 2); if (opcode != header[0] || module_id != header[1] || req_size != mbox_size) { /* * If the message in mailbox is not what we want, we need to * clear the mailbox by reading its contents. */ for (i = 0; i < mbox_size; i++) if (cdns_mhdp_mailbox_read(mhdp) < 0) break; return -EINVAL; } return 0; } static int cdns_mhdp_mailbox_recv_data(struct cdns_mhdp_device *mhdp, u8 *buff, u16 buff_size) { u32 i; int ret; for (i = 0; i < buff_size; i++) { ret = cdns_mhdp_mailbox_read(mhdp); if (ret < 0) return ret; buff[i] = ret; } return 0; } static int cdns_mhdp_mailbox_send(struct cdns_mhdp_device *mhdp, u8 module_id, u8 opcode, u16 size, u8 *message) { u8 header[4]; int ret, i; header[0] = opcode; header[1] = module_id; put_unaligned_be16(size, header + 2); for (i = 0; i < sizeof(header); i++) { ret = cdns_mhdp_mailbox_write(mhdp, header[i]); if (ret) return ret; } for (i = 0; i < size; i++) { ret = cdns_mhdp_mailbox_write(mhdp, message[i]); if (ret) return ret; } return 0; } static int cdns_mhdp_reg_read(struct cdns_mhdp_device *mhdp, u32 addr, u32 *value) { u8 msg[4], resp[8]; int ret; put_unaligned_be32(addr, msg); mutex_lock(&mhdp->mbox_mutex); ret = cdns_mhdp_mailbox_send(mhdp, MB_MODULE_ID_GENERAL, GENERAL_REGISTER_READ, sizeof(msg), msg); if (ret) goto out; ret = cdns_mhdp_mailbox_recv_header(mhdp, MB_MODULE_ID_GENERAL, GENERAL_REGISTER_READ, sizeof(resp)); if (ret) goto out; ret = cdns_mhdp_mailbox_recv_data(mhdp, resp, sizeof(resp)); if (ret) goto out; /* Returned address value should be the same as requested */ if (memcmp(msg, resp, sizeof(msg))) { ret = -EINVAL; goto out; } *value = get_unaligned_be32(resp + 4); out: mutex_unlock(&mhdp->mbox_mutex); if (ret) { dev_err(mhdp->dev, "Failed to read register\n"); *value = 0; } return ret; } static int cdns_mhdp_reg_write(struct cdns_mhdp_device *mhdp, u16 addr, u32 val) { u8 msg[6]; int ret; put_unaligned_be16(addr, msg); put_unaligned_be32(val, msg + 2); mutex_lock(&mhdp->mbox_mutex); ret = cdns_mhdp_mailbox_send(mhdp, MB_MODULE_ID_DP_TX, DPTX_WRITE_REGISTER, sizeof(msg), msg); mutex_unlock(&mhdp->mbox_mutex); return ret; } static int cdns_mhdp_reg_write_bit(struct cdns_mhdp_device *mhdp, u16 addr, u8 start_bit, u8 bits_no, u32 val) { u8 field[8]; int ret; put_unaligned_be16(addr, field); field[2] = start_bit; field[3] = bits_no; put_unaligned_be32(val, field + 4); mutex_lock(&mhdp->mbox_mutex); ret = cdns_mhdp_mailbox_send(mhdp, MB_MODULE_ID_DP_TX, DPTX_WRITE_FIELD, sizeof(field), field); mutex_unlock(&mhdp->mbox_mutex); return ret; } static int cdns_mhdp_dpcd_read(struct cdns_mhdp_device *mhdp, u32 addr, u8 *data, u16 len) { u8 msg[5], reg[5]; int ret; put_unaligned_be16(len, msg); put_unaligned_be24(addr, msg + 2); mutex_lock(&mhdp->mbox_mutex); ret = cdns_mhdp_mailbox_send(mhdp, MB_MODULE_ID_DP_TX, DPTX_READ_DPCD, sizeof(msg), msg); if (ret) goto out; ret = cdns_mhdp_mailbox_recv_header(mhdp, MB_MODULE_ID_DP_TX, DPTX_READ_DPCD, sizeof(reg) + len); if (ret) goto out; ret = cdns_mhdp_mailbox_recv_data(mhdp, reg, sizeof(reg)); if (ret) goto out; ret = cdns_mhdp_mailbox_recv_data(mhdp, data, len); out: mutex_unlock(&mhdp->mbox_mutex); return ret; } static int cdns_mhdp_dpcd_write(struct cdns_mhdp_device *mhdp, u32 addr, u8 value) { u8 msg[6], reg[5]; int ret; put_unaligned_be16(1, msg); put_unaligned_be24(addr, msg + 2); msg[5] = value; mutex_lock(&mhdp->mbox_mutex); ret = cdns_mhdp_mailbox_send(mhdp, MB_MODULE_ID_DP_TX, DPTX_WRITE_DPCD, sizeof(msg), msg); if (ret) goto out; ret = cdns_mhdp_mailbox_recv_header(mhdp, MB_MODULE_ID_DP_TX, DPTX_WRITE_DPCD, sizeof(reg)); if (ret) goto out; ret = cdns_mhdp_mailbox_recv_data(mhdp, reg, sizeof(reg)); if (ret) goto out; if (addr != get_unaligned_be24(reg + 2)) ret = -EINVAL; out: mutex_unlock(&mhdp->mbox_mutex); if (ret) dev_err(mhdp->dev, "dpcd write failed: %d\n", ret); return ret; } static int cdns_mhdp_set_firmware_active(struct cdns_mhdp_device *mhdp, bool enable) { u8 msg[5]; int ret, i; msg[0] = GENERAL_MAIN_CONTROL; msg[1] = MB_MODULE_ID_GENERAL; msg[2] = 0; msg[3] = 1; msg[4] = enable ? FW_ACTIVE : FW_STANDBY; mutex_lock(&mhdp->mbox_mutex); for (i = 0; i < sizeof(msg); i++) { ret = cdns_mhdp_mailbox_write(mhdp, msg[i]); if (ret) goto out; } /* read the firmware state */ ret = cdns_mhdp_mailbox_recv_data(mhdp, msg, sizeof(msg)); if (ret) goto out; ret = 0; out: mutex_unlock(&mhdp->mbox_mutex); if (ret < 0) dev_err(mhdp->dev, "set firmware active failed\n"); return ret; } static int cdns_mhdp_get_hpd_status(struct cdns_mhdp_device *mhdp) { u8 status; int ret; mutex_lock(&mhdp->mbox_mutex); ret = cdns_mhdp_mailbox_send(mhdp, MB_MODULE_ID_DP_TX, DPTX_HPD_STATE, 0, NULL); if (ret) goto err_get_hpd; ret = cdns_mhdp_mailbox_recv_header(mhdp, MB_MODULE_ID_DP_TX, DPTX_HPD_STATE, sizeof(status)); if (ret) goto err_get_hpd; ret = cdns_mhdp_mailbox_recv_data(mhdp, &status, sizeof(status)); if (ret) goto err_get_hpd; mutex_unlock(&mhdp->mbox_mutex); dev_dbg(mhdp->dev, "%s: HPD %splugged\n", __func__, status ? "" : "un"); return status; err_get_hpd: mutex_unlock(&mhdp->mbox_mutex); return ret; } static int cdns_mhdp_get_edid_block(void *data, u8 *edid, unsigned int block, size_t length) { struct cdns_mhdp_device *mhdp = data; u8 msg[2], reg[2], i; int ret; mutex_lock(&mhdp->mbox_mutex); for (i = 0; i < 4; i++) { msg[0] = block / 2; msg[1] = block % 2; ret = cdns_mhdp_mailbox_send(mhdp, MB_MODULE_ID_DP_TX, DPTX_GET_EDID, sizeof(msg), msg); if (ret) continue; ret = cdns_mhdp_mailbox_recv_header(mhdp, MB_MODULE_ID_DP_TX, DPTX_GET_EDID, sizeof(reg) + length); if (ret) continue; ret = cdns_mhdp_mailbox_recv_data(mhdp, reg, sizeof(reg)); if (ret) continue; ret = cdns_mhdp_mailbox_recv_data(mhdp, edid, length); if (ret) continue; if (reg[0] == length && reg[1] == block / 2) break; } mutex_unlock(&mhdp->mbox_mutex); if (ret) dev_err(mhdp->dev, "get block[%d] edid failed: %d\n", block, ret); return ret; } static int cdns_mhdp_read_hpd_event(struct cdns_mhdp_device *mhdp) { u8 event = 0; int ret; mutex_lock(&mhdp->mbox_mutex); ret = cdns_mhdp_mailbox_send(mhdp, MB_MODULE_ID_DP_TX, DPTX_READ_EVENT, 0, NULL); if (ret) goto out; ret = cdns_mhdp_mailbox_recv_header(mhdp, MB_MODULE_ID_DP_TX, DPTX_READ_EVENT, sizeof(event)); if (ret < 0) goto out; ret = cdns_mhdp_mailbox_recv_data(mhdp, &event, sizeof(event)); out: mutex_unlock(&mhdp->mbox_mutex); if (ret < 0) return ret; dev_dbg(mhdp->dev, "%s: %s%s%s%s\n", __func__, (event & DPTX_READ_EVENT_HPD_TO_HIGH) ? "TO_HIGH " : "", (event & DPTX_READ_EVENT_HPD_TO_LOW) ? "TO_LOW " : "", (event & DPTX_READ_EVENT_HPD_PULSE) ? "PULSE " : "", (event & DPTX_READ_EVENT_HPD_STATE) ? "HPD_STATE " : ""); return event; } static int cdns_mhdp_adjust_lt(struct cdns_mhdp_device *mhdp, unsigned int nlanes, unsigned int udelay, const u8 *lanes_data, u8 link_status[DP_LINK_STATUS_SIZE]) { u8 payload[7]; u8 hdr[5]; /* For DPCD read response header */ u32 addr; int ret; if (nlanes != 4 && nlanes != 2 && nlanes != 1) { dev_err(mhdp->dev, "invalid number of lanes: %u\n", nlanes); ret = -EINVAL; goto out; } payload[0] = nlanes; put_unaligned_be16(udelay, payload + 1); memcpy(payload + 3, lanes_data, nlanes); mutex_lock(&mhdp->mbox_mutex); ret = cdns_mhdp_mailbox_send(mhdp, MB_MODULE_ID_DP_TX, DPTX_ADJUST_LT, sizeof(payload), payload); if (ret) goto out; /* Yes, read the DPCD read command response */ ret = cdns_mhdp_mailbox_recv_header(mhdp, MB_MODULE_ID_DP_TX, DPTX_READ_DPCD, sizeof(hdr) + DP_LINK_STATUS_SIZE); if (ret) goto out; ret = cdns_mhdp_mailbox_recv_data(mhdp, hdr, sizeof(hdr)); if (ret) goto out; addr = get_unaligned_be24(hdr + 2); if (addr != DP_LANE0_1_STATUS) goto out; ret = cdns_mhdp_mailbox_recv_data(mhdp, link_status, DP_LINK_STATUS_SIZE); out: mutex_unlock(&mhdp->mbox_mutex); if (ret) dev_err(mhdp->dev, "Failed to adjust Link Training.\n"); return ret; } /** * cdns_mhdp_link_power_up() - power up a DisplayPort link * @aux: DisplayPort AUX channel * @link: pointer to a structure containing the link configuration * * Returns 0 on success or a negative error code on failure. */ static int cdns_mhdp_link_power_up(struct drm_dp_aux *aux, struct cdns_mhdp_link *link) { u8 value; int err; /* DP_SET_POWER register is only available on DPCD v1.1 and later */ if (link->revision < 0x11) return 0; err = drm_dp_dpcd_readb(aux, DP_SET_POWER, &value); if (err < 0) return err; value &= ~DP_SET_POWER_MASK; value |= DP_SET_POWER_D0; err = drm_dp_dpcd_writeb(aux, DP_SET_POWER, value); if (err < 0) return err; /* * According to the DP 1.1 specification, a "Sink Device must exit the * power saving state within 1 ms" (Section 2.5.3.1, Table 5-52, "Sink * Control Field" (register 0x600). */ usleep_range(1000, 2000); return 0; } /** * cdns_mhdp_link_power_down() - power down a DisplayPort link * @aux: DisplayPort AUX channel * @link: pointer to a structure containing the link configuration * * Returns 0 on success or a negative error code on failure. */ static int cdns_mhdp_link_power_down(struct drm_dp_aux *aux, struct cdns_mhdp_link *link) { u8 value; int err; /* DP_SET_POWER register is only available on DPCD v1.1 and later */ if (link->revision < 0x11) return 0; err = drm_dp_dpcd_readb(aux, DP_SET_POWER, &value); if (err < 0) return err; value &= ~DP_SET_POWER_MASK; value |= DP_SET_POWER_D3; err = drm_dp_dpcd_writeb(aux, DP_SET_POWER, value); if (err < 0) return err; return 0; } /** * cdns_mhdp_link_configure() - configure a DisplayPort link * @aux: DisplayPort AUX channel * @link: pointer to a structure containing the link configuration * * Returns 0 on success or a negative error code on failure. */ static int cdns_mhdp_link_configure(struct drm_dp_aux *aux, struct cdns_mhdp_link *link) { u8 values[2]; int err; values[0] = drm_dp_link_rate_to_bw_code(link->rate); values[1] = link->num_lanes; if (link->capabilities & DP_LINK_CAP_ENHANCED_FRAMING) values[1] |= DP_LANE_COUNT_ENHANCED_FRAME_EN; err = drm_dp_dpcd_write(aux, DP_LINK_BW_SET, values, sizeof(values)); if (err < 0) return err; return 0; } static unsigned int cdns_mhdp_max_link_rate(struct cdns_mhdp_device *mhdp) { return min(mhdp->host.link_rate, mhdp->sink.link_rate); } static u8 cdns_mhdp_max_num_lanes(struct cdns_mhdp_device *mhdp) { return min(mhdp->sink.lanes_cnt, mhdp->host.lanes_cnt); } static u8 cdns_mhdp_eq_training_pattern_supported(struct cdns_mhdp_device *mhdp) { return fls(mhdp->host.pattern_supp & mhdp->sink.pattern_supp); } static bool cdns_mhdp_get_ssc_supported(struct cdns_mhdp_device *mhdp) { /* Check if SSC is supported by both sides */ return mhdp->host.ssc && mhdp->sink.ssc; } static enum drm_connector_status cdns_mhdp_detect(struct cdns_mhdp_device *mhdp) { dev_dbg(mhdp->dev, "%s: %d\n", __func__, mhdp->plugged); if (mhdp->plugged) return connector_status_connected; else return connector_status_disconnected; } static int cdns_mhdp_check_fw_version(struct cdns_mhdp_device *mhdp) { u32 major_num, minor_num, revision; u32 fw_ver, lib_ver; fw_ver = (readl(mhdp->regs + CDNS_VER_H) << 8) | readl(mhdp->regs + CDNS_VER_L); lib_ver = (readl(mhdp->regs + CDNS_LIB_H_ADDR) << 8) | readl(mhdp->regs + CDNS_LIB_L_ADDR); if (lib_ver < 33984) { /* * Older FW versions with major number 1, used to store FW * version information by storing repository revision number * in registers. This is for identifying these FW versions. */ major_num = 1; minor_num = 2; if (fw_ver == 26098) { revision = 15; } else if (lib_ver == 0 && fw_ver == 0) { revision = 17; } else { dev_err(mhdp->dev, "Unsupported FW version: fw_ver = %u, lib_ver = %u\n", fw_ver, lib_ver); return -ENODEV; } } else { /* To identify newer FW versions with major number 2 onwards. */ major_num = fw_ver / 10000; minor_num = (fw_ver / 100) % 100; revision = (fw_ver % 10000) % 100; } dev_dbg(mhdp->dev, "FW version: v%u.%u.%u\n", major_num, minor_num, revision); return 0; } static int cdns_mhdp_fw_activate(const struct firmware *fw, struct cdns_mhdp_device *mhdp) { unsigned int reg; int ret; /* Release uCPU reset and stall it. */ writel(CDNS_CPU_STALL, mhdp->regs + CDNS_APB_CTRL); memcpy_toio(mhdp->regs + CDNS_MHDP_IMEM, fw->data, fw->size); /* Leave debug mode, release stall */ writel(0, mhdp->regs + CDNS_APB_CTRL); /* * Wait for the KEEP_ALIVE "message" on the first 8 bits. * Updated each sched "tick" (~2ms) */ ret = readl_poll_timeout(mhdp->regs + CDNS_KEEP_ALIVE, reg, reg & CDNS_KEEP_ALIVE_MASK, 500, CDNS_KEEP_ALIVE_TIMEOUT); if (ret) { dev_err(mhdp->dev, "device didn't give any life sign: reg %d\n", reg); return ret; } ret = cdns_mhdp_check_fw_version(mhdp); if (ret) return ret; /* Init events to 0 as it's not cleared by FW at boot but on read */ readl(mhdp->regs + CDNS_SW_EVENT0); readl(mhdp->regs + CDNS_SW_EVENT1); readl(mhdp->regs + CDNS_SW_EVENT2); readl(mhdp->regs + CDNS_SW_EVENT3); /* Activate uCPU */ ret = cdns_mhdp_set_firmware_active(mhdp, true); if (ret) return ret; spin_lock(&mhdp->start_lock); mhdp->hw_state = MHDP_HW_READY; /* * Here we must keep the lock while enabling the interrupts * since it would otherwise be possible that interrupt enable * code is executed after the bridge is detached. The similar * situation is not possible in attach()/detach() callbacks * since the hw_state changes from MHDP_HW_READY to * MHDP_HW_STOPPED happens only due to driver removal when * bridge should already be detached. */ cdns_mhdp_bridge_hpd_enable(&mhdp->bridge); spin_unlock(&mhdp->start_lock); wake_up(&mhdp->fw_load_wq); dev_dbg(mhdp->dev, "DP FW activated\n"); return 0; } static void cdns_mhdp_fw_cb(const struct firmware *fw, void *context) { struct cdns_mhdp_device *mhdp = context; bool bridge_attached; int ret; dev_dbg(mhdp->dev, "firmware callback\n"); if (!fw || !fw->data) { dev_err(mhdp->dev, "%s: No firmware.\n", __func__); return; } ret = cdns_mhdp_fw_activate(fw, mhdp); release_firmware(fw); if (ret) return; /* * XXX how to make sure the bridge is still attached when * calling drm_kms_helper_hotplug_event() after releasing * the lock? We should not hold the spin lock when * calling drm_kms_helper_hotplug_event() since it may * cause a dead lock. FB-dev console calls detect from the * same thread just down the call stack started here. */ spin_lock(&mhdp->start_lock); bridge_attached = mhdp->bridge_attached; spin_unlock(&mhdp->start_lock); if (bridge_attached) { if (mhdp->connector.dev) drm_kms_helper_hotplug_event(mhdp->bridge.dev); else drm_bridge_hpd_notify(&mhdp->bridge, cdns_mhdp_detect(mhdp)); } } static int cdns_mhdp_load_firmware(struct cdns_mhdp_device *mhdp) { int ret; ret = request_firmware_nowait(THIS_MODULE, true, FW_NAME, mhdp->dev, GFP_KERNEL, mhdp, cdns_mhdp_fw_cb); if (ret) { dev_err(mhdp->dev, "failed to load firmware (%s), ret: %d\n", FW_NAME, ret); return ret; } return 0; } static ssize_t cdns_mhdp_transfer(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg) { struct cdns_mhdp_device *mhdp = dev_get_drvdata(aux->dev); int ret; if (msg->request != DP_AUX_NATIVE_WRITE && msg->request != DP_AUX_NATIVE_READ) return -EOPNOTSUPP; if (msg->request == DP_AUX_NATIVE_WRITE) { const u8 *buf = msg->buffer; unsigned int i; for (i = 0; i < msg->size; ++i) { ret = cdns_mhdp_dpcd_write(mhdp, msg->address + i, buf[i]); if (!ret) continue; dev_err(mhdp->dev, "Failed to write DPCD addr %u\n", msg->address + i); return ret; } } else { ret = cdns_mhdp_dpcd_read(mhdp, msg->address, msg->buffer, msg->size); if (ret) { dev_err(mhdp->dev, "Failed to read DPCD addr %u\n", msg->address); return ret; } } return msg->size; } static int cdns_mhdp_link_training_init(struct cdns_mhdp_device *mhdp) { union phy_configure_opts phy_cfg; u32 reg32; int ret; drm_dp_dpcd_writeb(&mhdp->aux, DP_TRAINING_PATTERN_SET, DP_TRAINING_PATTERN_DISABLE); /* Reset PHY configuration */ reg32 = CDNS_PHY_COMMON_CONFIG | CDNS_PHY_TRAINING_TYPE(1); if (!mhdp->host.scrambler) reg32 |= CDNS_PHY_SCRAMBLER_BYPASS; cdns_mhdp_reg_write(mhdp, CDNS_DPTX_PHY_CONFIG, reg32); cdns_mhdp_reg_write(mhdp, CDNS_DP_ENHNCD, mhdp->sink.enhanced & mhdp->host.enhanced); cdns_mhdp_reg_write(mhdp, CDNS_DP_LANE_EN, CDNS_DP_LANE_EN_LANES(mhdp->link.num_lanes)); cdns_mhdp_link_configure(&mhdp->aux, &mhdp->link); phy_cfg.dp.link_rate = mhdp->link.rate / 100; phy_cfg.dp.lanes = mhdp->link.num_lanes; memset(phy_cfg.dp.voltage, 0, sizeof(phy_cfg.dp.voltage)); memset(phy_cfg.dp.pre, 0, sizeof(phy_cfg.dp.pre)); phy_cfg.dp.ssc = cdns_mhdp_get_ssc_supported(mhdp); phy_cfg.dp.set_lanes = true; phy_cfg.dp.set_rate = true; phy_cfg.dp.set_voltages = true; ret = phy_configure(mhdp->phy, &phy_cfg); if (ret) { dev_err(mhdp->dev, "%s: phy_configure() failed: %d\n", __func__, ret); return ret; } cdns_mhdp_reg_write(mhdp, CDNS_DPTX_PHY_CONFIG, CDNS_PHY_COMMON_CONFIG | CDNS_PHY_TRAINING_EN | CDNS_PHY_TRAINING_TYPE(1) | CDNS_PHY_SCRAMBLER_BYPASS); drm_dp_dpcd_writeb(&mhdp->aux, DP_TRAINING_PATTERN_SET, DP_TRAINING_PATTERN_1 | DP_LINK_SCRAMBLING_DISABLE); return 0; } static void cdns_mhdp_get_adjust_train(struct cdns_mhdp_device *mhdp, u8 link_status[DP_LINK_STATUS_SIZE], u8 lanes_data[CDNS_DP_MAX_NUM_LANES], union phy_configure_opts *phy_cfg) { u8 adjust, max_pre_emph, max_volt_swing; u8 set_volt, set_pre; unsigned int i; max_pre_emph = CDNS_PRE_EMPHASIS(mhdp->host.pre_emphasis) << DP_TRAIN_PRE_EMPHASIS_SHIFT; max_volt_swing = CDNS_VOLT_SWING(mhdp->host.volt_swing); for (i = 0; i < mhdp->link.num_lanes; i++) { /* Check if Voltage swing and pre-emphasis are within limits */ adjust = drm_dp_get_adjust_request_voltage(link_status, i); set_volt = min(adjust, max_volt_swing); adjust = drm_dp_get_adjust_request_pre_emphasis(link_status, i); set_pre = min(adjust, max_pre_emph) >> DP_TRAIN_PRE_EMPHASIS_SHIFT; /* * Voltage swing level and pre-emphasis level combination is * not allowed: leaving pre-emphasis as-is, and adjusting * voltage swing. */ if (set_volt + set_pre > 3) set_volt = 3 - set_pre; phy_cfg->dp.voltage[i] = set_volt; lanes_data[i] = set_volt; if (set_volt == max_volt_swing) lanes_data[i] |= DP_TRAIN_MAX_SWING_REACHED; phy_cfg->dp.pre[i] = set_pre; lanes_data[i] |= (set_pre << DP_TRAIN_PRE_EMPHASIS_SHIFT); if (set_pre == (max_pre_emph >> DP_TRAIN_PRE_EMPHASIS_SHIFT)) lanes_data[i] |= DP_TRAIN_MAX_PRE_EMPHASIS_REACHED; } } static void cdns_mhdp_set_adjust_request_voltage(u8 link_status[DP_LINK_STATUS_SIZE], unsigned int lane, u8 volt) { unsigned int s = ((lane & 1) ? DP_ADJUST_VOLTAGE_SWING_LANE1_SHIFT : DP_ADJUST_VOLTAGE_SWING_LANE0_SHIFT); unsigned int idx = DP_ADJUST_REQUEST_LANE0_1 - DP_LANE0_1_STATUS + (lane >> 1); link_status[idx] &= ~(DP_ADJUST_VOLTAGE_SWING_LANE0_MASK << s); link_status[idx] |= volt << s; } static void cdns_mhdp_set_adjust_request_pre_emphasis(u8 link_status[DP_LINK_STATUS_SIZE], unsigned int lane, u8 pre_emphasis) { unsigned int s = ((lane & 1) ? DP_ADJUST_PRE_EMPHASIS_LANE1_SHIFT : DP_ADJUST_PRE_EMPHASIS_LANE0_SHIFT); unsigned int idx = DP_ADJUST_REQUEST_LANE0_1 - DP_LANE0_1_STATUS + (lane >> 1); link_status[idx] &= ~(DP_ADJUST_PRE_EMPHASIS_LANE0_MASK << s); link_status[idx] |= pre_emphasis << s; } static void cdns_mhdp_adjust_requested_eq(struct cdns_mhdp_device *mhdp, u8 link_status[DP_LINK_STATUS_SIZE]) { u8 max_pre = CDNS_PRE_EMPHASIS(mhdp->host.pre_emphasis); u8 max_volt = CDNS_VOLT_SWING(mhdp->host.volt_swing); unsigned int i; u8 volt, pre; for (i = 0; i < mhdp->link.num_lanes; i++) { volt = drm_dp_get_adjust_request_voltage(link_status, i); pre = drm_dp_get_adjust_request_pre_emphasis(link_status, i); if (volt + pre > 3) cdns_mhdp_set_adjust_request_voltage(link_status, i, 3 - pre); if (mhdp->host.volt_swing & CDNS_FORCE_VOLT_SWING) cdns_mhdp_set_adjust_request_voltage(link_status, i, max_volt); if (mhdp->host.pre_emphasis & CDNS_FORCE_PRE_EMPHASIS) cdns_mhdp_set_adjust_request_pre_emphasis(link_status, i, max_pre); } } static void cdns_mhdp_print_lt_status(const char *prefix, struct cdns_mhdp_device *mhdp, union phy_configure_opts *phy_cfg) { char vs[8] = "0/0/0/0"; char pe[8] = "0/0/0/0"; unsigned int i; for (i = 0; i < mhdp->link.num_lanes; i++) { vs[i * 2] = '0' + phy_cfg->dp.voltage[i]; pe[i * 2] = '0' + phy_cfg->dp.pre[i]; } vs[i * 2 - 1] = '\0'; pe[i * 2 - 1] = '\0'; dev_dbg(mhdp->dev, "%s, %u lanes, %u Mbps, vs %s, pe %s\n", prefix, mhdp->link.num_lanes, mhdp->link.rate / 100, vs, pe); } static bool cdns_mhdp_link_training_channel_eq(struct cdns_mhdp_device *mhdp, u8 eq_tps, unsigned int training_interval) { u8 lanes_data[CDNS_DP_MAX_NUM_LANES], fail_counter_short = 0; u8 link_status[DP_LINK_STATUS_SIZE]; union phy_configure_opts phy_cfg; u32 reg32; int ret; bool r; dev_dbg(mhdp->dev, "Starting EQ phase\n"); /* Enable link training TPS[eq_tps] in PHY */ reg32 = CDNS_PHY_COMMON_CONFIG | CDNS_PHY_TRAINING_EN | CDNS_PHY_TRAINING_TYPE(eq_tps); if (eq_tps != 4) reg32 |= CDNS_PHY_SCRAMBLER_BYPASS; cdns_mhdp_reg_write(mhdp, CDNS_DPTX_PHY_CONFIG, reg32); drm_dp_dpcd_writeb(&mhdp->aux, DP_TRAINING_PATTERN_SET, (eq_tps != 4) ? eq_tps | DP_LINK_SCRAMBLING_DISABLE : CDNS_DP_TRAINING_PATTERN_4); drm_dp_dpcd_read_link_status(&mhdp->aux, link_status); do { cdns_mhdp_get_adjust_train(mhdp, link_status, lanes_data, &phy_cfg); phy_cfg.dp.lanes = mhdp->link.num_lanes; phy_cfg.dp.ssc = cdns_mhdp_get_ssc_supported(mhdp); phy_cfg.dp.set_lanes = false; phy_cfg.dp.set_rate = false; phy_cfg.dp.set_voltages = true; ret = phy_configure(mhdp->phy, &phy_cfg); if (ret) { dev_err(mhdp->dev, "%s: phy_configure() failed: %d\n", __func__, ret); goto err; } cdns_mhdp_adjust_lt(mhdp, mhdp->link.num_lanes, training_interval, lanes_data, link_status); r = drm_dp_clock_recovery_ok(link_status, mhdp->link.num_lanes); if (!r) goto err; if (drm_dp_channel_eq_ok(link_status, mhdp->link.num_lanes)) { cdns_mhdp_print_lt_status("EQ phase ok", mhdp, &phy_cfg); return true; } fail_counter_short++; cdns_mhdp_adjust_requested_eq(mhdp, link_status); } while (fail_counter_short < 5); err: cdns_mhdp_print_lt_status("EQ phase failed", mhdp, &phy_cfg); return false; } static void cdns_mhdp_adjust_requested_cr(struct cdns_mhdp_device *mhdp, u8 link_status[DP_LINK_STATUS_SIZE], u8 *req_volt, u8 *req_pre) { const u8 max_volt = CDNS_VOLT_SWING(mhdp->host.volt_swing); const u8 max_pre = CDNS_PRE_EMPHASIS(mhdp->host.pre_emphasis); unsigned int i; for (i = 0; i < mhdp->link.num_lanes; i++) { u8 val; val = mhdp->host.volt_swing & CDNS_FORCE_VOLT_SWING ? max_volt : req_volt[i]; cdns_mhdp_set_adjust_request_voltage(link_status, i, val); val = mhdp->host.pre_emphasis & CDNS_FORCE_PRE_EMPHASIS ? max_pre : req_pre[i]; cdns_mhdp_set_adjust_request_pre_emphasis(link_status, i, val); } } static void cdns_mhdp_validate_cr(struct cdns_mhdp_device *mhdp, bool *cr_done, bool *same_before_adjust, bool *max_swing_reached, u8 before_cr[CDNS_DP_MAX_NUM_LANES], u8 after_cr[DP_LINK_STATUS_SIZE], u8 *req_volt, u8 *req_pre) { const u8 max_volt = CDNS_VOLT_SWING(mhdp->host.volt_swing); const u8 max_pre = CDNS_PRE_EMPHASIS(mhdp->host.pre_emphasis); bool same_pre, same_volt; unsigned int i; u8 adjust; *same_before_adjust = false; *max_swing_reached = false; *cr_done = drm_dp_clock_recovery_ok(after_cr, mhdp->link.num_lanes); for (i = 0; i < mhdp->link.num_lanes; i++) { adjust = drm_dp_get_adjust_request_voltage(after_cr, i); req_volt[i] = min(adjust, max_volt); adjust = drm_dp_get_adjust_request_pre_emphasis(after_cr, i) >> DP_TRAIN_PRE_EMPHASIS_SHIFT; req_pre[i] = min(adjust, max_pre); same_pre = (before_cr[i] & DP_TRAIN_PRE_EMPHASIS_MASK) == req_pre[i] << DP_TRAIN_PRE_EMPHASIS_SHIFT; same_volt = (before_cr[i] & DP_TRAIN_VOLTAGE_SWING_MASK) == req_volt[i]; if (same_pre && same_volt) *same_before_adjust = true; /* 3.1.5.2 in DP Standard v1.4. Table 3-1 */ if (!*cr_done && req_volt[i] + req_pre[i] >= 3) { *max_swing_reached = true; return; } } } static bool cdns_mhdp_link_training_cr(struct cdns_mhdp_device *mhdp) { u8 lanes_data[CDNS_DP_MAX_NUM_LANES], fail_counter_short = 0, fail_counter_cr_long = 0; u8 link_status[DP_LINK_STATUS_SIZE]; bool cr_done; union phy_configure_opts phy_cfg; int ret; dev_dbg(mhdp->dev, "Starting CR phase\n"); ret = cdns_mhdp_link_training_init(mhdp); if (ret) goto err; drm_dp_dpcd_read_link_status(&mhdp->aux, link_status); do { u8 requested_adjust_volt_swing[CDNS_DP_MAX_NUM_LANES] = {}; u8 requested_adjust_pre_emphasis[CDNS_DP_MAX_NUM_LANES] = {}; bool same_before_adjust, max_swing_reached; cdns_mhdp_get_adjust_train(mhdp, link_status, lanes_data, &phy_cfg); phy_cfg.dp.lanes = mhdp->link.num_lanes; phy_cfg.dp.ssc = cdns_mhdp_get_ssc_supported(mhdp); phy_cfg.dp.set_lanes = false; phy_cfg.dp.set_rate = false; phy_cfg.dp.set_voltages = true; ret = phy_configure(mhdp->phy, &phy_cfg); if (ret) { dev_err(mhdp->dev, "%s: phy_configure() failed: %d\n", __func__, ret); goto err; } cdns_mhdp_adjust_lt(mhdp, mhdp->link.num_lanes, 100, lanes_data, link_status); cdns_mhdp_validate_cr(mhdp, &cr_done, &same_before_adjust, &max_swing_reached, lanes_data, link_status, requested_adjust_volt_swing, requested_adjust_pre_emphasis); if (max_swing_reached) { dev_err(mhdp->dev, "CR: max swing reached\n"); goto err; } if (cr_done) { cdns_mhdp_print_lt_status("CR phase ok", mhdp, &phy_cfg); return true; } /* Not all CR_DONE bits set */ fail_counter_cr_long++; if (same_before_adjust) { fail_counter_short++; continue; } fail_counter_short = 0; /* * Voltage swing/pre-emphasis adjust requested * during CR phase */ cdns_mhdp_adjust_requested_cr(mhdp, link_status, requested_adjust_volt_swing, requested_adjust_pre_emphasis); } while (fail_counter_short < 5 && fail_counter_cr_long < 10); err: cdns_mhdp_print_lt_status("CR phase failed", mhdp, &phy_cfg); return false; } static void cdns_mhdp_lower_link_rate(struct cdns_mhdp_link *link) { switch (drm_dp_link_rate_to_bw_code(link->rate)) { case DP_LINK_BW_2_7: link->rate = drm_dp_bw_code_to_link_rate(DP_LINK_BW_1_62); break; case DP_LINK_BW_5_4: link->rate = drm_dp_bw_code_to_link_rate(DP_LINK_BW_2_7); break; case DP_LINK_BW_8_1: link->rate = drm_dp_bw_code_to_link_rate(DP_LINK_BW_5_4); break; } } static int cdns_mhdp_link_training(struct cdns_mhdp_device *mhdp, unsigned int training_interval) { u32 reg32; const u8 eq_tps = cdns_mhdp_eq_training_pattern_supported(mhdp); int ret; while (1) { if (!cdns_mhdp_link_training_cr(mhdp)) { if (drm_dp_link_rate_to_bw_code(mhdp->link.rate) != DP_LINK_BW_1_62) { dev_dbg(mhdp->dev, "Reducing link rate during CR phase\n"); cdns_mhdp_lower_link_rate(&mhdp->link); continue; } else if (mhdp->link.num_lanes > 1) { dev_dbg(mhdp->dev, "Reducing lanes number during CR phase\n"); mhdp->link.num_lanes >>= 1; mhdp->link.rate = cdns_mhdp_max_link_rate(mhdp); continue; } dev_err(mhdp->dev, "Link training failed during CR phase\n"); goto err; } if (cdns_mhdp_link_training_channel_eq(mhdp, eq_tps, training_interval)) break; if (mhdp->link.num_lanes > 1) { dev_dbg(mhdp->dev, "Reducing lanes number during EQ phase\n"); mhdp->link.num_lanes >>= 1; continue; } else if (drm_dp_link_rate_to_bw_code(mhdp->link.rate) != DP_LINK_BW_1_62) { dev_dbg(mhdp->dev, "Reducing link rate during EQ phase\n"); cdns_mhdp_lower_link_rate(&mhdp->link); mhdp->link.num_lanes = cdns_mhdp_max_num_lanes(mhdp); continue; } dev_err(mhdp->dev, "Link training failed during EQ phase\n"); goto err; } dev_dbg(mhdp->dev, "Link training ok. Lanes: %u, Rate %u Mbps\n", mhdp->link.num_lanes, mhdp->link.rate / 100); drm_dp_dpcd_writeb(&mhdp->aux, DP_TRAINING_PATTERN_SET, mhdp->host.scrambler ? 0 : DP_LINK_SCRAMBLING_DISABLE); ret = cdns_mhdp_reg_read(mhdp, CDNS_DP_FRAMER_GLOBAL_CONFIG, &reg32); if (ret < 0) { dev_err(mhdp->dev, "Failed to read CDNS_DP_FRAMER_GLOBAL_CONFIG %d\n", ret); return ret; } reg32 &= ~GENMASK(1, 0); reg32 |= CDNS_DP_NUM_LANES(mhdp->link.num_lanes); reg32 |= CDNS_DP_WR_FAILING_EDGE_VSYNC; reg32 |= CDNS_DP_FRAMER_EN; cdns_mhdp_reg_write(mhdp, CDNS_DP_FRAMER_GLOBAL_CONFIG, reg32); /* Reset PHY config */ reg32 = CDNS_PHY_COMMON_CONFIG | CDNS_PHY_TRAINING_TYPE(1); if (!mhdp->host.scrambler) reg32 |= CDNS_PHY_SCRAMBLER_BYPASS; cdns_mhdp_reg_write(mhdp, CDNS_DPTX_PHY_CONFIG, reg32); return 0; err: /* Reset PHY config */ reg32 = CDNS_PHY_COMMON_CONFIG | CDNS_PHY_TRAINING_TYPE(1); if (!mhdp->host.scrambler) reg32 |= CDNS_PHY_SCRAMBLER_BYPASS; cdns_mhdp_reg_write(mhdp, CDNS_DPTX_PHY_CONFIG, reg32); drm_dp_dpcd_writeb(&mhdp->aux, DP_TRAINING_PATTERN_SET, DP_TRAINING_PATTERN_DISABLE); return -EIO; } static u32 cdns_mhdp_get_training_interval_us(struct cdns_mhdp_device *mhdp, u32 interval) { if (interval == 0) return 400; if (interval < 5) return 4000 << (interval - 1); dev_err(mhdp->dev, "wrong training interval returned by DPCD: %d\n", interval); return 0; } static void cdns_mhdp_fill_host_caps(struct cdns_mhdp_device *mhdp) { unsigned int link_rate; /* Get source capabilities based on PHY attributes */ mhdp->host.lanes_cnt = mhdp->phy->attrs.bus_width; if (!mhdp->host.lanes_cnt) mhdp->host.lanes_cnt = 4; link_rate = mhdp->phy->attrs.max_link_rate; if (!link_rate) link_rate = drm_dp_bw_code_to_link_rate(DP_LINK_BW_8_1); else /* PHY uses Mb/s, DRM uses tens of kb/s. */ link_rate *= 100; mhdp->host.link_rate = link_rate; mhdp->host.volt_swing = CDNS_VOLT_SWING(3); mhdp->host.pre_emphasis = CDNS_PRE_EMPHASIS(3); mhdp->host.pattern_supp = CDNS_SUPPORT_TPS(1) | CDNS_SUPPORT_TPS(2) | CDNS_SUPPORT_TPS(3) | CDNS_SUPPORT_TPS(4); mhdp->host.lane_mapping = CDNS_LANE_MAPPING_NORMAL; mhdp->host.fast_link = false; mhdp->host.enhanced = true; mhdp->host.scrambler = true; mhdp->host.ssc = false; } static void cdns_mhdp_fill_sink_caps(struct cdns_mhdp_device *mhdp, u8 dpcd[DP_RECEIVER_CAP_SIZE]) { mhdp->sink.link_rate = mhdp->link.rate; mhdp->sink.lanes_cnt = mhdp->link.num_lanes; mhdp->sink.enhanced = !!(mhdp->link.capabilities & DP_LINK_CAP_ENHANCED_FRAMING); /* Set SSC support */ mhdp->sink.ssc = !!(dpcd[DP_MAX_DOWNSPREAD] & DP_MAX_DOWNSPREAD_0_5); /* Set TPS support */ mhdp->sink.pattern_supp = CDNS_SUPPORT_TPS(1) | CDNS_SUPPORT_TPS(2); if (drm_dp_tps3_supported(dpcd)) mhdp->sink.pattern_supp |= CDNS_SUPPORT_TPS(3); if (drm_dp_tps4_supported(dpcd)) mhdp->sink.pattern_supp |= CDNS_SUPPORT_TPS(4); /* Set fast link support */ mhdp->sink.fast_link = !!(dpcd[DP_MAX_DOWNSPREAD] & DP_NO_AUX_HANDSHAKE_LINK_TRAINING); } static int cdns_mhdp_link_up(struct cdns_mhdp_device *mhdp) { u8 dpcd[DP_RECEIVER_CAP_SIZE], amp[2]; u32 resp, interval, interval_us; u8 ext_cap_chk = 0; unsigned int addr; int err; WARN_ON(!mutex_is_locked(&mhdp->link_mutex)); drm_dp_dpcd_readb(&mhdp->aux, DP_TRAINING_AUX_RD_INTERVAL, &ext_cap_chk); if (ext_cap_chk & DP_EXTENDED_RECEIVER_CAP_FIELD_PRESENT) addr = DP_DP13_DPCD_REV; else addr = DP_DPCD_REV; err = drm_dp_dpcd_read(&mhdp->aux, addr, dpcd, DP_RECEIVER_CAP_SIZE); if (err < 0) { dev_err(mhdp->dev, "Failed to read receiver capabilities\n"); return err; } mhdp->link.revision = dpcd[0]; mhdp->link.rate = drm_dp_bw_code_to_link_rate(dpcd[1]); mhdp->link.num_lanes = dpcd[2] & DP_MAX_LANE_COUNT_MASK; if (dpcd[2] & DP_ENHANCED_FRAME_CAP) mhdp->link.capabilities |= DP_LINK_CAP_ENHANCED_FRAMING; dev_dbg(mhdp->dev, "Set sink device power state via DPCD\n"); cdns_mhdp_link_power_up(&mhdp->aux, &mhdp->link); cdns_mhdp_fill_sink_caps(mhdp, dpcd); mhdp->link.rate = cdns_mhdp_max_link_rate(mhdp); mhdp->link.num_lanes = cdns_mhdp_max_num_lanes(mhdp); /* Disable framer for link training */ err = cdns_mhdp_reg_read(mhdp, CDNS_DP_FRAMER_GLOBAL_CONFIG, &resp); if (err < 0) { dev_err(mhdp->dev, "Failed to read CDNS_DP_FRAMER_GLOBAL_CONFIG %d\n", err); return err; } resp &= ~CDNS_DP_FRAMER_EN; cdns_mhdp_reg_write(mhdp, CDNS_DP_FRAMER_GLOBAL_CONFIG, resp); /* Spread AMP if required, enable 8b/10b coding */ amp[0] = cdns_mhdp_get_ssc_supported(mhdp) ? DP_SPREAD_AMP_0_5 : 0; amp[1] = DP_SET_ANSI_8B10B; drm_dp_dpcd_write(&mhdp->aux, DP_DOWNSPREAD_CTRL, amp, 2); if (mhdp->host.fast_link & mhdp->sink.fast_link) { dev_err(mhdp->dev, "fastlink not supported\n"); return -EOPNOTSUPP; } interval = dpcd[DP_TRAINING_AUX_RD_INTERVAL] & DP_TRAINING_AUX_RD_MASK; interval_us = cdns_mhdp_get_training_interval_us(mhdp, interval); if (!interval_us || cdns_mhdp_link_training(mhdp, interval_us)) { dev_err(mhdp->dev, "Link training failed. Exiting.\n"); return -EIO; } mhdp->link_up = true; return 0; } static void cdns_mhdp_link_down(struct cdns_mhdp_device *mhdp) { WARN_ON(!mutex_is_locked(&mhdp->link_mutex)); if (mhdp->plugged) cdns_mhdp_link_power_down(&mhdp->aux, &mhdp->link); mhdp->link_up = false; } static struct edid *cdns_mhdp_get_edid(struct cdns_mhdp_device *mhdp, struct drm_connector *connector) { if (!mhdp->plugged) return NULL; return drm_do_get_edid(connector, cdns_mhdp_get_edid_block, mhdp); } static int cdns_mhdp_get_modes(struct drm_connector *connector) { struct cdns_mhdp_device *mhdp = connector_to_mhdp(connector); struct edid *edid; int num_modes; if (!mhdp->plugged) return 0; edid = cdns_mhdp_get_edid(mhdp, connector); if (!edid) { dev_err(mhdp->dev, "Failed to read EDID\n"); return 0; } drm_connector_update_edid_property(connector, edid); num_modes = drm_add_edid_modes(connector, edid); kfree(edid); /* * HACK: Warn about unsupported display formats until we deal * with them correctly. */ if (connector->display_info.color_formats && !(connector->display_info.color_formats & mhdp->display_fmt.color_format)) dev_warn(mhdp->dev, "%s: No supported color_format found (0x%08x)\n", __func__, connector->display_info.color_formats); if (connector->display_info.bpc && connector->display_info.bpc < mhdp->display_fmt.bpc) dev_warn(mhdp->dev, "%s: Display bpc only %d < %d\n", __func__, connector->display_info.bpc, mhdp->display_fmt.bpc); return num_modes; } static int cdns_mhdp_connector_detect(struct drm_connector *conn, struct drm_modeset_acquire_ctx *ctx, bool force) { struct cdns_mhdp_device *mhdp = connector_to_mhdp(conn); return cdns_mhdp_detect(mhdp); } static u32 cdns_mhdp_get_bpp(struct cdns_mhdp_display_fmt *fmt) { u32 bpp; if (fmt->y_only) return fmt->bpc; switch (fmt->color_format) { case DRM_COLOR_FORMAT_RGB444: case DRM_COLOR_FORMAT_YCBCR444: bpp = fmt->bpc * 3; break; case DRM_COLOR_FORMAT_YCBCR422: bpp = fmt->bpc * 2; break; case DRM_COLOR_FORMAT_YCBCR420: bpp = fmt->bpc * 3 / 2; break; default: bpp = fmt->bpc * 3; WARN_ON(1); } return bpp; } static bool cdns_mhdp_bandwidth_ok(struct cdns_mhdp_device *mhdp, const struct drm_display_mode *mode, unsigned int lanes, unsigned int rate) { u32 max_bw, req_bw, bpp; /* * mode->clock is expressed in kHz. Multiplying by bpp and dividing by 8 * we get the number of kB/s. DisplayPort applies a 8b-10b encoding, the * value thus equals the bandwidth in 10kb/s units, which matches the * units of the rate parameter. */ bpp = cdns_mhdp_get_bpp(&mhdp->display_fmt); req_bw = mode->clock * bpp / 8; max_bw = lanes * rate; if (req_bw > max_bw) { dev_dbg(mhdp->dev, "Unsupported Mode: %s, Req BW: %u, Available Max BW:%u\n", mode->name, req_bw, max_bw); return false; } return true; } static enum drm_mode_status cdns_mhdp_mode_valid(struct drm_connector *conn, struct drm_display_mode *mode) { struct cdns_mhdp_device *mhdp = connector_to_mhdp(conn); mutex_lock(&mhdp->link_mutex); if (!cdns_mhdp_bandwidth_ok(mhdp, mode, mhdp->link.num_lanes, mhdp->link.rate)) { mutex_unlock(&mhdp->link_mutex); return MODE_CLOCK_HIGH; } mutex_unlock(&mhdp->link_mutex); return MODE_OK; } static int cdns_mhdp_connector_atomic_check(struct drm_connector *conn, struct drm_atomic_state *state) { struct cdns_mhdp_device *mhdp = connector_to_mhdp(conn); struct drm_connector_state *old_state, *new_state; struct drm_crtc_state *crtc_state; u64 old_cp, new_cp; if (!mhdp->hdcp_supported) return 0; old_state = drm_atomic_get_old_connector_state(state, conn); new_state = drm_atomic_get_new_connector_state(state, conn); old_cp = old_state->content_protection; new_cp = new_state->content_protection; if (old_state->hdcp_content_type != new_state->hdcp_content_type && new_cp != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) { new_state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED; goto mode_changed; } if (!new_state->crtc) { if (old_cp == DRM_MODE_CONTENT_PROTECTION_ENABLED) new_state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED; return 0; } if (old_cp == new_cp || (old_cp == DRM_MODE_CONTENT_PROTECTION_DESIRED && new_cp == DRM_MODE_CONTENT_PROTECTION_ENABLED)) return 0; mode_changed: crtc_state = drm_atomic_get_new_crtc_state(state, new_state->crtc); crtc_state->mode_changed = true; return 0; } static const struct drm_connector_helper_funcs cdns_mhdp_conn_helper_funcs = { .detect_ctx = cdns_mhdp_connector_detect, .get_modes = cdns_mhdp_get_modes, .mode_valid = cdns_mhdp_mode_valid, .atomic_check = cdns_mhdp_connector_atomic_check, }; static const struct drm_connector_funcs cdns_mhdp_conn_funcs = { .fill_modes = drm_helper_probe_single_connector_modes, .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state, .atomic_destroy_state = drm_atomic_helper_connector_destroy_state, .reset = drm_atomic_helper_connector_reset, .destroy = drm_connector_cleanup, }; static int cdns_mhdp_connector_init(struct cdns_mhdp_device *mhdp) { u32 bus_format = MEDIA_BUS_FMT_RGB121212_1X36; struct drm_connector *conn = &mhdp->connector; struct drm_bridge *bridge = &mhdp->bridge; int ret; if (!bridge->encoder) { dev_err(mhdp->dev, "Parent encoder object not found"); return -ENODEV; } conn->polled = DRM_CONNECTOR_POLL_HPD; ret = drm_connector_init(bridge->dev, conn, &cdns_mhdp_conn_funcs, DRM_MODE_CONNECTOR_DisplayPort); if (ret) { dev_err(mhdp->dev, "Failed to initialize connector with drm\n"); return ret; } drm_connector_helper_add(conn, &cdns_mhdp_conn_helper_funcs); ret = drm_display_info_set_bus_formats(&conn->display_info, &bus_format, 1); if (ret) return ret; ret = drm_connector_attach_encoder(conn, bridge->encoder); if (ret) { dev_err(mhdp->dev, "Failed to attach connector to encoder\n"); return ret; } if (mhdp->hdcp_supported) ret = drm_connector_attach_content_protection_property(conn, true); return ret; } static int cdns_mhdp_attach(struct drm_bridge *bridge, enum drm_bridge_attach_flags flags) { struct cdns_mhdp_device *mhdp = bridge_to_mhdp(bridge); bool hw_ready; int ret; dev_dbg(mhdp->dev, "%s\n", __func__); mhdp->aux.drm_dev = bridge->dev; ret = drm_dp_aux_register(&mhdp->aux); if (ret < 0) return ret; if (!(flags & DRM_BRIDGE_ATTACH_NO_CONNECTOR)) { ret = cdns_mhdp_connector_init(mhdp); if (ret) goto aux_unregister; } spin_lock(&mhdp->start_lock); mhdp->bridge_attached = true; hw_ready = mhdp->hw_state == MHDP_HW_READY; spin_unlock(&mhdp->start_lock); /* Enable SW event interrupts */ if (hw_ready) cdns_mhdp_bridge_hpd_enable(bridge); return 0; aux_unregister: drm_dp_aux_unregister(&mhdp->aux); return ret; } static void cdns_mhdp_configure_video(struct cdns_mhdp_device *mhdp, const struct drm_display_mode *mode) { unsigned int dp_framer_sp = 0, msa_horizontal_1, msa_vertical_1, bnd_hsync2vsync, hsync2vsync_pol_ctrl, misc0 = 0, misc1 = 0, pxl_repr, front_porch, back_porch, msa_h0, msa_v0, hsync, vsync, dp_vertical_1; u8 stream_id = mhdp->stream_id; u32 bpp, bpc, pxlfmt, framer; int ret; pxlfmt = mhdp->display_fmt.color_format; bpc = mhdp->display_fmt.bpc; /* * If YCBCR supported and stream not SD, use ITU709 * Need to handle ITU version with YCBCR420 when supported */ if ((pxlfmt == DRM_COLOR_FORMAT_YCBCR444 || pxlfmt == DRM_COLOR_FORMAT_YCBCR422) && mode->crtc_vdisplay >= 720) misc0 = DP_YCBCR_COEFFICIENTS_ITU709; bpp = cdns_mhdp_get_bpp(&mhdp->display_fmt); switch (pxlfmt) { case DRM_COLOR_FORMAT_RGB444: pxl_repr = CDNS_DP_FRAMER_RGB << CDNS_DP_FRAMER_PXL_FORMAT; misc0 |= DP_COLOR_FORMAT_RGB; break; case DRM_COLOR_FORMAT_YCBCR444: pxl_repr = CDNS_DP_FRAMER_YCBCR444 << CDNS_DP_FRAMER_PXL_FORMAT; misc0 |= DP_COLOR_FORMAT_YCbCr444 | DP_TEST_DYNAMIC_RANGE_CEA; break; case DRM_COLOR_FORMAT_YCBCR422: pxl_repr = CDNS_DP_FRAMER_YCBCR422 << CDNS_DP_FRAMER_PXL_FORMAT; misc0 |= DP_COLOR_FORMAT_YCbCr422 | DP_TEST_DYNAMIC_RANGE_CEA; break; case DRM_COLOR_FORMAT_YCBCR420: pxl_repr = CDNS_DP_FRAMER_YCBCR420 << CDNS_DP_FRAMER_PXL_FORMAT; break; default: pxl_repr = CDNS_DP_FRAMER_Y_ONLY << CDNS_DP_FRAMER_PXL_FORMAT; } switch (bpc) { case 6: misc0 |= DP_TEST_BIT_DEPTH_6; pxl_repr |= CDNS_DP_FRAMER_6_BPC; break; case 8: misc0 |= DP_TEST_BIT_DEPTH_8; pxl_repr |= CDNS_DP_FRAMER_8_BPC; break; case 10: misc0 |= DP_TEST_BIT_DEPTH_10; pxl_repr |= CDNS_DP_FRAMER_10_BPC; break; case 12: misc0 |= DP_TEST_BIT_DEPTH_12; pxl_repr |= CDNS_DP_FRAMER_12_BPC; break; case 16: misc0 |= DP_TEST_BIT_DEPTH_16; pxl_repr |= CDNS_DP_FRAMER_16_BPC; break; } bnd_hsync2vsync = CDNS_IP_BYPASS_V_INTERFACE; if (mode->flags & DRM_MODE_FLAG_INTERLACE) bnd_hsync2vsync |= CDNS_IP_DET_INTERLACE_FORMAT; cdns_mhdp_reg_write(mhdp, CDNS_BND_HSYNC2VSYNC(stream_id), bnd_hsync2vsync); hsync2vsync_pol_ctrl = 0; if (mode->flags & DRM_MODE_FLAG_NHSYNC) hsync2vsync_pol_ctrl |= CDNS_H2V_HSYNC_POL_ACTIVE_LOW; if (mode->flags & DRM_MODE_FLAG_NVSYNC) hsync2vsync_pol_ctrl |= CDNS_H2V_VSYNC_POL_ACTIVE_LOW; cdns_mhdp_reg_write(mhdp, CDNS_HSYNC2VSYNC_POL_CTRL(stream_id), hsync2vsync_pol_ctrl); cdns_mhdp_reg_write(mhdp, CDNS_DP_FRAMER_PXL_REPR(stream_id), pxl_repr); if (mode->flags & DRM_MODE_FLAG_INTERLACE) dp_framer_sp |= CDNS_DP_FRAMER_INTERLACE; if (mode->flags & DRM_MODE_FLAG_NHSYNC) dp_framer_sp |= CDNS_DP_FRAMER_HSYNC_POL_LOW; if (mode->flags & DRM_MODE_FLAG_NVSYNC) dp_framer_sp |= CDNS_DP_FRAMER_VSYNC_POL_LOW; cdns_mhdp_reg_write(mhdp, CDNS_DP_FRAMER_SP(stream_id), dp_framer_sp); front_porch = mode->crtc_hsync_start - mode->crtc_hdisplay; back_porch = mode->crtc_htotal - mode->crtc_hsync_end; cdns_mhdp_reg_write(mhdp, CDNS_DP_FRONT_BACK_PORCH(stream_id), CDNS_DP_FRONT_PORCH(front_porch) | CDNS_DP_BACK_PORCH(back_porch)); cdns_mhdp_reg_write(mhdp, CDNS_DP_BYTE_COUNT(stream_id), mode->crtc_hdisplay * bpp / 8); msa_h0 = mode->crtc_htotal - mode->crtc_hsync_start; cdns_mhdp_reg_write(mhdp, CDNS_DP_MSA_HORIZONTAL_0(stream_id), CDNS_DP_MSAH0_H_TOTAL(mode->crtc_htotal) | CDNS_DP_MSAH0_HSYNC_START(msa_h0)); hsync = mode->crtc_hsync_end - mode->crtc_hsync_start; msa_horizontal_1 = CDNS_DP_MSAH1_HSYNC_WIDTH(hsync) | CDNS_DP_MSAH1_HDISP_WIDTH(mode->crtc_hdisplay); if (mode->flags & DRM_MODE_FLAG_NHSYNC) msa_horizontal_1 |= CDNS_DP_MSAH1_HSYNC_POL_LOW; cdns_mhdp_reg_write(mhdp, CDNS_DP_MSA_HORIZONTAL_1(stream_id), msa_horizontal_1); msa_v0 = mode->crtc_vtotal - mode->crtc_vsync_start; cdns_mhdp_reg_write(mhdp, CDNS_DP_MSA_VERTICAL_0(stream_id), CDNS_DP_MSAV0_V_TOTAL(mode->crtc_vtotal) | CDNS_DP_MSAV0_VSYNC_START(msa_v0)); vsync = mode->crtc_vsync_end - mode->crtc_vsync_start; msa_vertical_1 = CDNS_DP_MSAV1_VSYNC_WIDTH(vsync) | CDNS_DP_MSAV1_VDISP_WIDTH(mode->crtc_vdisplay); if (mode->flags & DRM_MODE_FLAG_NVSYNC) msa_vertical_1 |= CDNS_DP_MSAV1_VSYNC_POL_LOW; cdns_mhdp_reg_write(mhdp, CDNS_DP_MSA_VERTICAL_1(stream_id), msa_vertical_1); if ((mode->flags & DRM_MODE_FLAG_INTERLACE) && mode->crtc_vtotal % 2 == 0) misc1 = DP_TEST_INTERLACED; if (mhdp->display_fmt.y_only) misc1 |= CDNS_DP_TEST_COLOR_FORMAT_RAW_Y_ONLY; /* Use VSC SDP for Y420 */ if (pxlfmt == DRM_COLOR_FORMAT_YCBCR420) misc1 = CDNS_DP_TEST_VSC_SDP; cdns_mhdp_reg_write(mhdp, CDNS_DP_MSA_MISC(stream_id), misc0 | (misc1 << 8)); cdns_mhdp_reg_write(mhdp, CDNS_DP_HORIZONTAL(stream_id), CDNS_DP_H_HSYNC_WIDTH(hsync) | CDNS_DP_H_H_TOTAL(mode->crtc_hdisplay)); cdns_mhdp_reg_write(mhdp, CDNS_DP_VERTICAL_0(stream_id), CDNS_DP_V0_VHEIGHT(mode->crtc_vdisplay) | CDNS_DP_V0_VSTART(msa_v0)); dp_vertical_1 = CDNS_DP_V1_VTOTAL(mode->crtc_vtotal); if ((mode->flags & DRM_MODE_FLAG_INTERLACE) && mode->crtc_vtotal % 2 == 0) dp_vertical_1 |= CDNS_DP_V1_VTOTAL_EVEN; cdns_mhdp_reg_write(mhdp, CDNS_DP_VERTICAL_1(stream_id), dp_vertical_1); cdns_mhdp_reg_write_bit(mhdp, CDNS_DP_VB_ID(stream_id), 2, 1, (mode->flags & DRM_MODE_FLAG_INTERLACE) ? CDNS_DP_VB_ID_INTERLACED : 0); ret = cdns_mhdp_reg_read(mhdp, CDNS_DP_FRAMER_GLOBAL_CONFIG, &framer); if (ret < 0) { dev_err(mhdp->dev, "Failed to read CDNS_DP_FRAMER_GLOBAL_CONFIG %d\n", ret); return; } framer |= CDNS_DP_FRAMER_EN; framer &= ~CDNS_DP_NO_VIDEO_MODE; cdns_mhdp_reg_write(mhdp, CDNS_DP_FRAMER_GLOBAL_CONFIG, framer); } static void cdns_mhdp_sst_enable(struct cdns_mhdp_device *mhdp, const struct drm_display_mode *mode) { u32 rate, vs, required_bandwidth, available_bandwidth; s32 line_thresh1, line_thresh2, line_thresh = 0; int pxlclock = mode->crtc_clock; u32 tu_size = 64; u32 bpp; /* Get rate in MSymbols per second per lane */ rate = mhdp->link.rate / 1000; bpp = cdns_mhdp_get_bpp(&mhdp->display_fmt); required_bandwidth = pxlclock * bpp / 8; available_bandwidth = mhdp->link.num_lanes * rate; vs = tu_size * required_bandwidth / available_bandwidth; vs /= 1000; if (vs == tu_size) vs = tu_size - 1; line_thresh1 = ((vs + 1) << 5) * 8 / bpp; line_thresh2 = (pxlclock << 5) / 1000 / rate * (vs + 1) - (1 << 5); line_thresh = line_thresh1 - line_thresh2 / (s32)mhdp->link.num_lanes; line_thresh = (line_thresh >> 5) + 2; mhdp->stream_id = 0; cdns_mhdp_reg_write(mhdp, CDNS_DP_FRAMER_TU, CDNS_DP_FRAMER_TU_VS(vs) | CDNS_DP_FRAMER_TU_SIZE(tu_size) | CDNS_DP_FRAMER_TU_CNT_RST_EN); cdns_mhdp_reg_write(mhdp, CDNS_DP_LINE_THRESH(0), line_thresh & GENMASK(5, 0)); cdns_mhdp_reg_write(mhdp, CDNS_DP_STREAM_CONFIG_2(0), CDNS_DP_SC2_TU_VS_DIFF((tu_size - vs > 3) ? 0 : tu_size - vs)); cdns_mhdp_configure_video(mhdp, mode); } static void cdns_mhdp_atomic_enable(struct drm_bridge *bridge, struct drm_bridge_state *bridge_state) { struct cdns_mhdp_device *mhdp = bridge_to_mhdp(bridge); struct drm_atomic_state *state = bridge_state->base.state; struct cdns_mhdp_bridge_state *mhdp_state; struct drm_crtc_state *crtc_state; struct drm_connector *connector; struct drm_connector_state *conn_state; struct drm_bridge_state *new_state; const struct drm_display_mode *mode; u32 resp; int ret; dev_dbg(mhdp->dev, "bridge enable\n"); mutex_lock(&mhdp->link_mutex); if (mhdp->plugged && !mhdp->link_up) { ret = cdns_mhdp_link_up(mhdp); if (ret < 0) goto out; } if (mhdp->info && mhdp->info->ops && mhdp->info->ops->enable) mhdp->info->ops->enable(mhdp); /* Enable VIF clock for stream 0 */ ret = cdns_mhdp_reg_read(mhdp, CDNS_DPTX_CAR, &resp); if (ret < 0) { dev_err(mhdp->dev, "Failed to read CDNS_DPTX_CAR %d\n", ret); goto out; } cdns_mhdp_reg_write(mhdp, CDNS_DPTX_CAR, resp | CDNS_VIF_CLK_EN | CDNS_VIF_CLK_RSTN); connector = drm_atomic_get_new_connector_for_encoder(state, bridge->encoder); if (WARN_ON(!connector)) goto out; conn_state = drm_atomic_get_new_connector_state(state, connector); if (WARN_ON(!conn_state)) goto out; if (mhdp->hdcp_supported && mhdp->hw_state == MHDP_HW_READY && conn_state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED) { mutex_unlock(&mhdp->link_mutex); cdns_mhdp_hdcp_enable(mhdp, conn_state->hdcp_content_type); mutex_lock(&mhdp->link_mutex); } crtc_state = drm_atomic_get_new_crtc_state(state, conn_state->crtc); if (WARN_ON(!crtc_state)) goto out; mode = &crtc_state->adjusted_mode; new_state = drm_atomic_get_new_bridge_state(state, bridge); if (WARN_ON(!new_state)) goto out; if (!cdns_mhdp_bandwidth_ok(mhdp, mode, mhdp->link.num_lanes, mhdp->link.rate)) { ret = -EINVAL; goto out; } cdns_mhdp_sst_enable(mhdp, mode); mhdp_state = to_cdns_mhdp_bridge_state(new_state); mhdp_state->current_mode = drm_mode_duplicate(bridge->dev, mode); drm_mode_set_name(mhdp_state->current_mode); dev_dbg(mhdp->dev, "%s: Enabling mode %s\n", __func__, mode->name); mhdp->bridge_enabled = true; out: mutex_unlock(&mhdp->link_mutex); if (ret < 0) schedule_work(&mhdp->modeset_retry_work); } static void cdns_mhdp_atomic_disable(struct drm_bridge *bridge, struct drm_bridge_state *bridge_state) { struct cdns_mhdp_device *mhdp = bridge_to_mhdp(bridge); u32 resp; dev_dbg(mhdp->dev, "%s\n", __func__); mutex_lock(&mhdp->link_mutex); if (mhdp->hdcp_supported) cdns_mhdp_hdcp_disable(mhdp); mhdp->bridge_enabled = false; cdns_mhdp_reg_read(mhdp, CDNS_DP_FRAMER_GLOBAL_CONFIG, &resp); resp &= ~CDNS_DP_FRAMER_EN; resp |= CDNS_DP_NO_VIDEO_MODE; cdns_mhdp_reg_write(mhdp, CDNS_DP_FRAMER_GLOBAL_CONFIG, resp); cdns_mhdp_link_down(mhdp); /* Disable VIF clock for stream 0 */ cdns_mhdp_reg_read(mhdp, CDNS_DPTX_CAR, &resp); cdns_mhdp_reg_write(mhdp, CDNS_DPTX_CAR, resp & ~(CDNS_VIF_CLK_EN | CDNS_VIF_CLK_RSTN)); if (mhdp->info && mhdp->info->ops && mhdp->info->ops->disable) mhdp->info->ops->disable(mhdp); mutex_unlock(&mhdp->link_mutex); } static void cdns_mhdp_detach(struct drm_bridge *bridge) { struct cdns_mhdp_device *mhdp = bridge_to_mhdp(bridge); dev_dbg(mhdp->dev, "%s\n", __func__); drm_dp_aux_unregister(&mhdp->aux); spin_lock(&mhdp->start_lock); mhdp->bridge_attached = false; spin_unlock(&mhdp->start_lock); writel(~0, mhdp->regs + CDNS_APB_INT_MASK); } static struct drm_bridge_state * cdns_mhdp_bridge_atomic_duplicate_state(struct drm_bridge *bridge) { struct cdns_mhdp_bridge_state *state; state = kzalloc(sizeof(*state), GFP_KERNEL); if (!state) return NULL; __drm_atomic_helper_bridge_duplicate_state(bridge, &state->base); return &state->base; } static void cdns_mhdp_bridge_atomic_destroy_state(struct drm_bridge *bridge, struct drm_bridge_state *state) { struct cdns_mhdp_bridge_state *cdns_mhdp_state; cdns_mhdp_state = to_cdns_mhdp_bridge_state(state); if (cdns_mhdp_state->current_mode) { drm_mode_destroy(bridge->dev, cdns_mhdp_state->current_mode); cdns_mhdp_state->current_mode = NULL; } kfree(cdns_mhdp_state); } static struct drm_bridge_state * cdns_mhdp_bridge_atomic_reset(struct drm_bridge *bridge) { struct cdns_mhdp_bridge_state *cdns_mhdp_state; cdns_mhdp_state = kzalloc(sizeof(*cdns_mhdp_state), GFP_KERNEL); if (!cdns_mhdp_state) return NULL; __drm_atomic_helper_bridge_reset(bridge, &cdns_mhdp_state->base); return &cdns_mhdp_state->base; } static u32 *cdns_mhdp_get_input_bus_fmts(struct drm_bridge *bridge, struct drm_bridge_state *bridge_state, struct drm_crtc_state *crtc_state, struct drm_connector_state *conn_state, u32 output_fmt, unsigned int *num_input_fmts) { u32 *input_fmts; *num_input_fmts = 0; input_fmts = kzalloc(sizeof(*input_fmts), GFP_KERNEL); if (!input_fmts) return NULL; *num_input_fmts = 1; input_fmts[0] = MEDIA_BUS_FMT_RGB121212_1X36; return input_fmts; } static int cdns_mhdp_atomic_check(struct drm_bridge *bridge, struct drm_bridge_state *bridge_state, struct drm_crtc_state *crtc_state, struct drm_connector_state *conn_state) { struct cdns_mhdp_device *mhdp = bridge_to_mhdp(bridge); const struct drm_display_mode *mode = &crtc_state->adjusted_mode; mutex_lock(&mhdp->link_mutex); if (!cdns_mhdp_bandwidth_ok(mhdp, mode, mhdp->link.num_lanes, mhdp->link.rate)) { dev_err(mhdp->dev, "%s: Not enough BW for %s (%u lanes at %u Mbps)\n", __func__, mode->name, mhdp->link.num_lanes, mhdp->link.rate / 100); mutex_unlock(&mhdp->link_mutex); return -EINVAL; } /* * There might be flags negotiation supported in future. * Set the bus flags in atomic_check statically for now. */ if (mhdp->info) bridge_state->input_bus_cfg.flags = *mhdp->info->input_bus_flags; mutex_unlock(&mhdp->link_mutex); return 0; } static enum drm_connector_status cdns_mhdp_bridge_detect(struct drm_bridge *bridge) { struct cdns_mhdp_device *mhdp = bridge_to_mhdp(bridge); return cdns_mhdp_detect(mhdp); } static struct edid *cdns_mhdp_bridge_get_edid(struct drm_bridge *bridge, struct drm_connector *connector) { struct cdns_mhdp_device *mhdp = bridge_to_mhdp(bridge); return cdns_mhdp_get_edid(mhdp, connector); } static const struct drm_bridge_funcs cdns_mhdp_bridge_funcs = { .atomic_enable = cdns_mhdp_atomic_enable, .atomic_disable = cdns_mhdp_atomic_disable, .atomic_check = cdns_mhdp_atomic_check, .attach = cdns_mhdp_attach, .detach = cdns_mhdp_detach, .atomic_duplicate_state = cdns_mhdp_bridge_atomic_duplicate_state, .atomic_destroy_state = cdns_mhdp_bridge_atomic_destroy_state, .atomic_reset = cdns_mhdp_bridge_atomic_reset, .atomic_get_input_bus_fmts = cdns_mhdp_get_input_bus_fmts, .detect = cdns_mhdp_bridge_detect, .get_edid = cdns_mhdp_bridge_get_edid, .hpd_enable = cdns_mhdp_bridge_hpd_enable, .hpd_disable = cdns_mhdp_bridge_hpd_disable, }; static bool cdns_mhdp_detect_hpd(struct cdns_mhdp_device *mhdp, bool *hpd_pulse) { int hpd_event, hpd_status; *hpd_pulse = false; hpd_event = cdns_mhdp_read_hpd_event(mhdp); /* Getting event bits failed, bail out */ if (hpd_event < 0) { dev_warn(mhdp->dev, "%s: read event failed: %d\n", __func__, hpd_event); return false; } hpd_status = cdns_mhdp_get_hpd_status(mhdp); if (hpd_status < 0) { dev_warn(mhdp->dev, "%s: get hpd status failed: %d\n", __func__, hpd_status); return false; } if (hpd_event & DPTX_READ_EVENT_HPD_PULSE) *hpd_pulse = true; return !!hpd_status; } static int cdns_mhdp_update_link_status(struct cdns_mhdp_device *mhdp) { struct cdns_mhdp_bridge_state *cdns_bridge_state; struct drm_display_mode *current_mode; bool old_plugged = mhdp->plugged; struct drm_bridge_state *state; u8 status[DP_LINK_STATUS_SIZE]; bool hpd_pulse; int ret = 0; mutex_lock(&mhdp->link_mutex); mhdp->plugged = cdns_mhdp_detect_hpd(mhdp, &hpd_pulse); if (!mhdp->plugged) { cdns_mhdp_link_down(mhdp); mhdp->link.rate = mhdp->host.link_rate; mhdp->link.num_lanes = mhdp->host.lanes_cnt; goto out; } /* * If we get a HPD pulse event and we were and still are connected, * check the link status. If link status is ok, there's nothing to do * as we don't handle DP interrupts. If link status is bad, continue * with full link setup. */ if (hpd_pulse && old_plugged == mhdp->plugged) { ret = drm_dp_dpcd_read_link_status(&mhdp->aux, status); /* * If everything looks fine, just return, as we don't handle * DP IRQs. */ if (ret > 0 && drm_dp_channel_eq_ok(status, mhdp->link.num_lanes) && drm_dp_clock_recovery_ok(status, mhdp->link.num_lanes)) goto out; /* If link is bad, mark link as down so that we do a new LT */ mhdp->link_up = false; } if (!mhdp->link_up) { ret = cdns_mhdp_link_up(mhdp); if (ret < 0) goto out; } if (mhdp->bridge_enabled) { state = drm_priv_to_bridge_state(mhdp->bridge.base.state); if (!state) { ret = -EINVAL; goto out; } cdns_bridge_state = to_cdns_mhdp_bridge_state(state); if (!cdns_bridge_state) { ret = -EINVAL; goto out; } current_mode = cdns_bridge_state->current_mode; if (!current_mode) { ret = -EINVAL; goto out; } if (!cdns_mhdp_bandwidth_ok(mhdp, current_mode, mhdp->link.num_lanes, mhdp->link.rate)) { ret = -EINVAL; goto out; } dev_dbg(mhdp->dev, "%s: Enabling mode %s\n", __func__, current_mode->name); cdns_mhdp_sst_enable(mhdp, current_mode); } out: mutex_unlock(&mhdp->link_mutex); return ret; } static void cdns_mhdp_modeset_retry_fn(struct work_struct *work) { struct cdns_mhdp_device *mhdp; struct drm_connector *conn; mhdp = container_of(work, typeof(*mhdp), modeset_retry_work); conn = &mhdp->connector; /* Grab the locks before changing connector property */ mutex_lock(&conn->dev->mode_config.mutex); /* * Set connector link status to BAD and send a Uevent to notify * userspace to do a modeset. */ drm_connector_set_link_status_property(conn, DRM_MODE_LINK_STATUS_BAD); mutex_unlock(&conn->dev->mode_config.mutex); /* Send Hotplug uevent so userspace can reprobe */ drm_kms_helper_hotplug_event(mhdp->bridge.dev); } static irqreturn_t cdns_mhdp_irq_handler(int irq, void *data) { struct cdns_mhdp_device *mhdp = data; u32 apb_stat, sw_ev0; bool bridge_attached; apb_stat = readl(mhdp->regs + CDNS_APB_INT_STATUS); if (!(apb_stat & CDNS_APB_INT_MASK_SW_EVENT_INT)) return IRQ_NONE; sw_ev0 = readl(mhdp->regs + CDNS_SW_EVENT0); /* * Calling drm_kms_helper_hotplug_event() when not attached * to drm device causes an oops because the drm_bridge->dev * is NULL. See cdns_mhdp_fw_cb() comments for details about the * problems related drm_kms_helper_hotplug_event() call. */ spin_lock(&mhdp->start_lock); bridge_attached = mhdp->bridge_attached; spin_unlock(&mhdp->start_lock); if (bridge_attached && (sw_ev0 & CDNS_DPTX_HPD)) { schedule_work(&mhdp->hpd_work); } if (sw_ev0 & ~CDNS_DPTX_HPD) { mhdp->sw_events |= (sw_ev0 & ~CDNS_DPTX_HPD); wake_up(&mhdp->sw_events_wq); } return IRQ_HANDLED; } u32 cdns_mhdp_wait_for_sw_event(struct cdns_mhdp_device *mhdp, u32 event) { u32 ret; ret = wait_event_timeout(mhdp->sw_events_wq, mhdp->sw_events & event, msecs_to_jiffies(500)); if (!ret) { dev_dbg(mhdp->dev, "SW event 0x%x timeout\n", event); goto sw_event_out; } ret = mhdp->sw_events; mhdp->sw_events &= ~event; sw_event_out: return ret; } static void cdns_mhdp_hpd_work(struct work_struct *work) { struct cdns_mhdp_device *mhdp = container_of(work, struct cdns_mhdp_device, hpd_work); int ret; ret = cdns_mhdp_update_link_status(mhdp); if (mhdp->connector.dev) { if (ret < 0) schedule_work(&mhdp->modeset_retry_work); else drm_kms_helper_hotplug_event(mhdp->bridge.dev); } else { drm_bridge_hpd_notify(&mhdp->bridge, cdns_mhdp_detect(mhdp)); } } static int cdns_mhdp_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; struct cdns_mhdp_device *mhdp; unsigned long rate; struct clk *clk; int ret; int irq; mhdp = devm_kzalloc(dev, sizeof(*mhdp), GFP_KERNEL); if (!mhdp) return -ENOMEM; clk = devm_clk_get(dev, NULL); if (IS_ERR(clk)) { dev_err(dev, "couldn't get clk: %ld\n", PTR_ERR(clk)); return PTR_ERR(clk); } mhdp->clk = clk; mhdp->dev = dev; mutex_init(&mhdp->mbox_mutex); mutex_init(&mhdp->link_mutex); spin_lock_init(&mhdp->start_lock); drm_dp_aux_init(&mhdp->aux); mhdp->aux.dev = dev; mhdp->aux.transfer = cdns_mhdp_transfer; mhdp->regs = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(mhdp->regs)) { dev_err(dev, "Failed to get memory resource\n"); return PTR_ERR(mhdp->regs); } mhdp->sapb_regs = devm_platform_ioremap_resource_byname(pdev, "mhdptx-sapb"); if (IS_ERR(mhdp->sapb_regs)) { mhdp->hdcp_supported = false; dev_warn(dev, "Failed to get SAPB memory resource, HDCP not supported\n"); } else { mhdp->hdcp_supported = true; } mhdp->phy = devm_of_phy_get_by_index(dev, pdev->dev.of_node, 0); if (IS_ERR(mhdp->phy)) { dev_err(dev, "no PHY configured\n"); return PTR_ERR(mhdp->phy); } platform_set_drvdata(pdev, mhdp); mhdp->info = of_device_get_match_data(dev); clk_prepare_enable(clk); pm_runtime_enable(dev); ret = pm_runtime_resume_and_get(dev); if (ret < 0) { dev_err(dev, "pm_runtime_resume_and_get failed\n"); pm_runtime_disable(dev); goto clk_disable; } if (mhdp->info && mhdp->info->ops && mhdp->info->ops->init) { ret = mhdp->info->ops->init(mhdp); if (ret != 0) { dev_err(dev, "MHDP platform initialization failed: %d\n", ret); goto runtime_put; } } rate = clk_get_rate(clk); writel(rate % 1000000, mhdp->regs + CDNS_SW_CLK_L); writel(rate / 1000000, mhdp->regs + CDNS_SW_CLK_H); dev_dbg(dev, "func clk rate %lu Hz\n", rate); writel(~0, mhdp->regs + CDNS_APB_INT_MASK); irq = platform_get_irq(pdev, 0); ret = devm_request_threaded_irq(mhdp->dev, irq, NULL, cdns_mhdp_irq_handler, IRQF_ONESHOT, "mhdp8546", mhdp); if (ret) { dev_err(dev, "cannot install IRQ %d\n", irq); ret = -EIO; goto plat_fini; } cdns_mhdp_fill_host_caps(mhdp); /* Initialize link rate and num of lanes to host values */ mhdp->link.rate = mhdp->host.link_rate; mhdp->link.num_lanes = mhdp->host.lanes_cnt; /* The only currently supported format */ mhdp->display_fmt.y_only = false; mhdp->display_fmt.color_format = DRM_COLOR_FORMAT_RGB444; mhdp->display_fmt.bpc = 8; mhdp->bridge.of_node = pdev->dev.of_node; mhdp->bridge.funcs = &cdns_mhdp_bridge_funcs; mhdp->bridge.ops = DRM_BRIDGE_OP_DETECT | DRM_BRIDGE_OP_EDID | DRM_BRIDGE_OP_HPD; mhdp->bridge.type = DRM_MODE_CONNECTOR_DisplayPort; ret = phy_init(mhdp->phy); if (ret) { dev_err(mhdp->dev, "Failed to initialize PHY: %d\n", ret); goto plat_fini; } /* Initialize the work for modeset in case of link train failure */ INIT_WORK(&mhdp->modeset_retry_work, cdns_mhdp_modeset_retry_fn); INIT_WORK(&mhdp->hpd_work, cdns_mhdp_hpd_work); init_waitqueue_head(&mhdp->fw_load_wq); init_waitqueue_head(&mhdp->sw_events_wq); ret = cdns_mhdp_load_firmware(mhdp); if (ret) goto phy_exit; if (mhdp->hdcp_supported) cdns_mhdp_hdcp_init(mhdp); drm_bridge_add(&mhdp->bridge); return 0; phy_exit: phy_exit(mhdp->phy); plat_fini: if (mhdp->info && mhdp->info->ops && mhdp->info->ops->exit) mhdp->info->ops->exit(mhdp); runtime_put: pm_runtime_put_sync(dev); pm_runtime_disable(dev); clk_disable: clk_disable_unprepare(mhdp->clk); return ret; } static int cdns_mhdp_remove(struct platform_device *pdev) { struct cdns_mhdp_device *mhdp = platform_get_drvdata(pdev); unsigned long timeout = msecs_to_jiffies(100); bool stop_fw = false; int ret; drm_bridge_remove(&mhdp->bridge); ret = wait_event_timeout(mhdp->fw_load_wq, mhdp->hw_state == MHDP_HW_READY, timeout); if (ret == 0) dev_err(mhdp->dev, "%s: Timeout waiting for fw loading\n", __func__); else stop_fw = true; spin_lock(&mhdp->start_lock); mhdp->hw_state = MHDP_HW_STOPPED; spin_unlock(&mhdp->start_lock); if (stop_fw) ret = cdns_mhdp_set_firmware_active(mhdp, false); phy_exit(mhdp->phy); if (mhdp->info && mhdp->info->ops && mhdp->info->ops->exit) mhdp->info->ops->exit(mhdp); pm_runtime_put_sync(&pdev->dev); pm_runtime_disable(&pdev->dev); cancel_work_sync(&mhdp->modeset_retry_work); flush_work(&mhdp->hpd_work); /* Ignoring mhdp->hdcp.check_work and mhdp->hdcp.prop_work here. */ clk_disable_unprepare(mhdp->clk); return ret; } static const struct of_device_id mhdp_ids[] = { { .compatible = "cdns,mhdp8546", }, #ifdef CONFIG_DRM_CDNS_MHDP8546_J721E { .compatible = "ti,j721e-mhdp8546", .data = &(const struct cdns_mhdp_platform_info) { .input_bus_flags = &mhdp_ti_j721e_bridge_input_bus_flags, .ops = &mhdp_ti_j721e_ops, }, }, #endif { /* sentinel */ } }; MODULE_DEVICE_TABLE(of, mhdp_ids); static struct platform_driver mhdp_driver = { .driver = { .name = "cdns-mhdp8546", .of_match_table = mhdp_ids, }, .probe = cdns_mhdp_probe, .remove = cdns_mhdp_remove, }; module_platform_driver(mhdp_driver); MODULE_FIRMWARE(FW_NAME); MODULE_AUTHOR("Quentin Schulz <[email protected]>"); MODULE_AUTHOR("Swapnil Jakhade <[email protected]>"); MODULE_AUTHOR("Yuti Amonkar <[email protected]>"); MODULE_AUTHOR("Tomi Valkeinen <[email protected]>"); MODULE_AUTHOR("Jyri Sarha <[email protected]>"); MODULE_DESCRIPTION("Cadence MHDP8546 DP bridge driver"); MODULE_LICENSE("GPL"); MODULE_ALIAS("platform:cdns-mhdp8546");
linux-master
drivers/gpu/drm/bridge/cadence/cdns-mhdp8546-core.c
// SPDX-License-Identifier: GPL-2.0 /* * TI j721e Cadence DSI wrapper * * Copyright (C) 2022 Texas Instruments Incorporated - http://www.ti.com/ * Author: Rahul T R <[email protected]> */ #include <linux/io.h> #include <linux/platform_device.h> #include "cdns-dsi-j721e.h" #define DSI_WRAP_REVISION 0x0 #define DSI_WRAP_DPI_CONTROL 0x4 #define DSI_WRAP_DSC_CONTROL 0x8 #define DSI_WRAP_DPI_SECURE 0xc #define DSI_WRAP_DSI_0_ASF_STATUS 0x10 #define DSI_WRAP_DPI_0_EN BIT(0) #define DSI_WRAP_DSI2_MUX_SEL BIT(4) static int cdns_dsi_j721e_init(struct cdns_dsi *dsi) { struct platform_device *pdev = to_platform_device(dsi->base.dev); dsi->j721e_regs = devm_platform_ioremap_resource(pdev, 1); return PTR_ERR_OR_ZERO(dsi->j721e_regs); } static void cdns_dsi_j721e_enable(struct cdns_dsi *dsi) { /* * Enable DPI0 as its input. DSS0 DPI2 is connected * to DSI DPI0. This is the only supported configuration on * J721E. */ writel(DSI_WRAP_DPI_0_EN, dsi->j721e_regs + DSI_WRAP_DPI_CONTROL); } static void cdns_dsi_j721e_disable(struct cdns_dsi *dsi) { /* Put everything to defaults */ writel(0, dsi->j721e_regs + DSI_WRAP_DPI_CONTROL); } const struct cdns_dsi_platform_ops dsi_ti_j721e_ops = { .init = cdns_dsi_j721e_init, .enable = cdns_dsi_j721e_enable, .disable = cdns_dsi_j721e_disable, };
linux-master
drivers/gpu/drm/bridge/cadence/cdns-dsi-j721e.c
// SPDX-License-Identifier: GPL-2.0 /* * TI j721e Cadence MHDP8546 DP wrapper * * Copyright (C) 2020 Texas Instruments Incorporated - http://www.ti.com/ * Author: Jyri Sarha <[email protected]> */ #include <linux/io.h> #include <linux/platform_device.h> #include "cdns-mhdp8546-j721e.h" #define REVISION 0x00 #define DPTX_IPCFG 0x04 #define ECC_MEM_CFG 0x08 #define DPTX_DSC_CFG 0x0c #define DPTX_SRC_CFG 0x10 #define DPTX_VIF_SECURE_MODE_CFG 0x14 #define DPTX_VIF_CONN_STATUS 0x18 #define PHY_CLK_STATUS 0x1c #define DPTX_SRC_AIF_EN BIT(16) #define DPTX_SRC_VIF_3_IN30B BIT(11) #define DPTX_SRC_VIF_2_IN30B BIT(10) #define DPTX_SRC_VIF_1_IN30B BIT(9) #define DPTX_SRC_VIF_0_IN30B BIT(8) #define DPTX_SRC_VIF_3_SEL_DPI5 BIT(7) #define DPTX_SRC_VIF_3_SEL_DPI3 0 #define DPTX_SRC_VIF_2_SEL_DPI4 BIT(6) #define DPTX_SRC_VIF_2_SEL_DPI2 0 #define DPTX_SRC_VIF_1_SEL_DPI3 BIT(5) #define DPTX_SRC_VIF_1_SEL_DPI1 0 #define DPTX_SRC_VIF_0_SEL_DPI2 BIT(4) #define DPTX_SRC_VIF_0_SEL_DPI0 0 #define DPTX_SRC_VIF_3_EN BIT(3) #define DPTX_SRC_VIF_2_EN BIT(2) #define DPTX_SRC_VIF_1_EN BIT(1) #define DPTX_SRC_VIF_0_EN BIT(0) /* TODO turn DPTX_IPCFG fw_mem_clk_en at pm_runtime_suspend. */ static int cdns_mhdp_j721e_init(struct cdns_mhdp_device *mhdp) { struct platform_device *pdev = to_platform_device(mhdp->dev); mhdp->j721e_regs = devm_platform_ioremap_resource(pdev, 1); return PTR_ERR_OR_ZERO(mhdp->j721e_regs); } static void cdns_mhdp_j721e_enable(struct cdns_mhdp_device *mhdp) { /* * Enable VIF_0 and select DPI2 as its input. DSS0 DPI0 is connected * to eDP DPI2. This is the only supported SST configuration on * J721E. */ writel(DPTX_SRC_VIF_0_EN | DPTX_SRC_VIF_0_SEL_DPI2, mhdp->j721e_regs + DPTX_SRC_CFG); } static void cdns_mhdp_j721e_disable(struct cdns_mhdp_device *mhdp) { /* Put everything to defaults */ writel(0, mhdp->j721e_regs + DPTX_DSC_CFG); } const struct mhdp_platform_ops mhdp_ti_j721e_ops = { .init = cdns_mhdp_j721e_init, .enable = cdns_mhdp_j721e_enable, .disable = cdns_mhdp_j721e_disable, }; const u32 mhdp_ti_j721e_bridge_input_bus_flags = DRM_BUS_FLAG_PIXDATA_SAMPLE_NEGEDGE | DRM_BUS_FLAG_SYNC_SAMPLE_NEGEDGE | DRM_BUS_FLAG_DE_HIGH;
linux-master
drivers/gpu/drm/bridge/cadence/cdns-mhdp8546-j721e.c
// SPDX-License-Identifier: MIT /* * Copyright (C) 2013-2017 Oracle Corporation * This file is based on ast_drv.c * Copyright 2012 Red Hat Inc. * Authors: Dave Airlie <[email protected]> * Michael Thayer <[email protected], * Hans de Goede <[email protected]> */ #include <linux/module.h> #include <linux/pci.h> #include <linux/vt_kern.h> #include <drm/drm_aperture.h> #include <drm/drm_drv.h> #include <drm/drm_fbdev_generic.h> #include <drm/drm_file.h> #include <drm/drm_ioctl.h> #include <drm/drm_managed.h> #include <drm/drm_modeset_helper.h> #include <drm/drm_module.h> #include "vbox_drv.h" static int vbox_modeset = -1; MODULE_PARM_DESC(modeset, "Disable/Enable modesetting"); module_param_named(modeset, vbox_modeset, int, 0400); static const struct drm_driver driver; static const struct pci_device_id pciidlist[] = { { PCI_DEVICE(0x80ee, 0xbeef) }, { } }; MODULE_DEVICE_TABLE(pci, pciidlist); static int vbox_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) { struct vbox_private *vbox; int ret = 0; if (!vbox_check_supported(VBE_DISPI_ID_HGSMI)) return -ENODEV; ret = drm_aperture_remove_conflicting_pci_framebuffers(pdev, &driver); if (ret) return ret; vbox = devm_drm_dev_alloc(&pdev->dev, &driver, struct vbox_private, ddev); if (IS_ERR(vbox)) return PTR_ERR(vbox); pci_set_drvdata(pdev, vbox); mutex_init(&vbox->hw_mutex); ret = pcim_enable_device(pdev); if (ret) return ret; ret = vbox_hw_init(vbox); if (ret) return ret; ret = vbox_mm_init(vbox); if (ret) goto err_hw_fini; ret = vbox_mode_init(vbox); if (ret) goto err_hw_fini; ret = vbox_irq_init(vbox); if (ret) goto err_mode_fini; ret = drm_dev_register(&vbox->ddev, 0); if (ret) goto err_irq_fini; drm_fbdev_generic_setup(&vbox->ddev, 32); return 0; err_irq_fini: vbox_irq_fini(vbox); err_mode_fini: vbox_mode_fini(vbox); err_hw_fini: vbox_hw_fini(vbox); return ret; } static void vbox_pci_remove(struct pci_dev *pdev) { struct vbox_private *vbox = pci_get_drvdata(pdev); drm_dev_unregister(&vbox->ddev); vbox_irq_fini(vbox); vbox_mode_fini(vbox); vbox_hw_fini(vbox); } static int vbox_pm_suspend(struct device *dev) { struct vbox_private *vbox = dev_get_drvdata(dev); struct pci_dev *pdev = to_pci_dev(dev); int error; error = drm_mode_config_helper_suspend(&vbox->ddev); if (error) return error; pci_save_state(pdev); pci_disable_device(pdev); pci_set_power_state(pdev, PCI_D3hot); return 0; } static int vbox_pm_resume(struct device *dev) { struct vbox_private *vbox = dev_get_drvdata(dev); struct pci_dev *pdev = to_pci_dev(dev); if (pci_enable_device(pdev)) return -EIO; return drm_mode_config_helper_resume(&vbox->ddev); } static int vbox_pm_freeze(struct device *dev) { struct vbox_private *vbox = dev_get_drvdata(dev); return drm_mode_config_helper_suspend(&vbox->ddev); } static int vbox_pm_thaw(struct device *dev) { struct vbox_private *vbox = dev_get_drvdata(dev); return drm_mode_config_helper_resume(&vbox->ddev); } static int vbox_pm_poweroff(struct device *dev) { struct vbox_private *vbox = dev_get_drvdata(dev); return drm_mode_config_helper_suspend(&vbox->ddev); } static const struct dev_pm_ops vbox_pm_ops = { .suspend = vbox_pm_suspend, .resume = vbox_pm_resume, .freeze = vbox_pm_freeze, .thaw = vbox_pm_thaw, .poweroff = vbox_pm_poweroff, .restore = vbox_pm_resume, }; static struct pci_driver vbox_pci_driver = { .name = DRIVER_NAME, .id_table = pciidlist, .probe = vbox_pci_probe, .remove = vbox_pci_remove, .driver.pm = pm_sleep_ptr(&vbox_pm_ops), }; DEFINE_DRM_GEM_FOPS(vbox_fops); static const struct drm_driver driver = { .driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_ATOMIC, .fops = &vbox_fops, .name = DRIVER_NAME, .desc = DRIVER_DESC, .date = DRIVER_DATE, .major = DRIVER_MAJOR, .minor = DRIVER_MINOR, .patchlevel = DRIVER_PATCHLEVEL, DRM_GEM_VRAM_DRIVER, }; drm_module_pci_driver_if_modeset(vbox_pci_driver, vbox_modeset); MODULE_AUTHOR("Oracle Corporation"); MODULE_AUTHOR("Hans de Goede <[email protected]>"); MODULE_DESCRIPTION(DRIVER_DESC); MODULE_LICENSE("GPL and additional rights");
linux-master
drivers/gpu/drm/vboxvideo/vbox_drv.c
// SPDX-License-Identifier: MIT /* * Copyright (C) 2013-2017 Oracle Corporation * This file is based on ast_main.c * Copyright 2012 Red Hat Inc. * Authors: Dave Airlie <[email protected]>, * Michael Thayer <[email protected], * Hans de Goede <[email protected]> */ #include <linux/pci.h> #include <linux/vbox_err.h> #include <drm/drm_damage_helper.h> #include "vbox_drv.h" #include "vboxvideo_guest.h" #include "vboxvideo_vbe.h" void vbox_report_caps(struct vbox_private *vbox) { u32 caps = VBVACAPS_DISABLE_CURSOR_INTEGRATION | VBVACAPS_IRQ | VBVACAPS_USE_VBVA_ONLY; /* The host only accepts VIDEO_MODE_HINTS if it is send separately. */ hgsmi_send_caps_info(vbox->guest_pool, caps); caps |= VBVACAPS_VIDEO_MODE_HINTS; hgsmi_send_caps_info(vbox->guest_pool, caps); } static int vbox_accel_init(struct vbox_private *vbox) { struct pci_dev *pdev = to_pci_dev(vbox->ddev.dev); struct vbva_buffer *vbva; unsigned int i; vbox->vbva_info = devm_kcalloc(vbox->ddev.dev, vbox->num_crtcs, sizeof(*vbox->vbva_info), GFP_KERNEL); if (!vbox->vbva_info) return -ENOMEM; /* Take a command buffer for each screen from the end of usable VRAM. */ vbox->available_vram_size -= vbox->num_crtcs * VBVA_MIN_BUFFER_SIZE; vbox->vbva_buffers = pci_iomap_range(pdev, 0, vbox->available_vram_size, vbox->num_crtcs * VBVA_MIN_BUFFER_SIZE); if (!vbox->vbva_buffers) return -ENOMEM; for (i = 0; i < vbox->num_crtcs; ++i) { vbva_setup_buffer_context(&vbox->vbva_info[i], vbox->available_vram_size + i * VBVA_MIN_BUFFER_SIZE, VBVA_MIN_BUFFER_SIZE); vbva = (void __force *)vbox->vbva_buffers + i * VBVA_MIN_BUFFER_SIZE; if (!vbva_enable(&vbox->vbva_info[i], vbox->guest_pool, vbva, i)) { /* very old host or driver error. */ DRM_ERROR("vboxvideo: vbva_enable failed\n"); } } return 0; } static void vbox_accel_fini(struct vbox_private *vbox) { unsigned int i; for (i = 0; i < vbox->num_crtcs; ++i) vbva_disable(&vbox->vbva_info[i], vbox->guest_pool, i); } /* Do we support the 4.3 plus mode hint reporting interface? */ static bool have_hgsmi_mode_hints(struct vbox_private *vbox) { u32 have_hints, have_cursor; int ret; ret = hgsmi_query_conf(vbox->guest_pool, VBOX_VBVA_CONF32_MODE_HINT_REPORTING, &have_hints); if (ret) return false; ret = hgsmi_query_conf(vbox->guest_pool, VBOX_VBVA_CONF32_GUEST_CURSOR_REPORTING, &have_cursor); if (ret) return false; return have_hints == VINF_SUCCESS && have_cursor == VINF_SUCCESS; } bool vbox_check_supported(u16 id) { u16 dispi_id; vbox_write_ioport(VBE_DISPI_INDEX_ID, id); dispi_id = inw(VBE_DISPI_IOPORT_DATA); return dispi_id == id; } int vbox_hw_init(struct vbox_private *vbox) { struct pci_dev *pdev = to_pci_dev(vbox->ddev.dev); int ret = -ENOMEM; vbox->full_vram_size = inl(VBE_DISPI_IOPORT_DATA); vbox->any_pitch = vbox_check_supported(VBE_DISPI_ID_ANYX); DRM_INFO("VRAM %08x\n", vbox->full_vram_size); /* Map guest-heap at end of vram */ vbox->guest_heap = pci_iomap_range(pdev, 0, GUEST_HEAP_OFFSET(vbox), GUEST_HEAP_SIZE); if (!vbox->guest_heap) return -ENOMEM; /* Create guest-heap mem-pool use 2^4 = 16 byte chunks */ vbox->guest_pool = devm_gen_pool_create(vbox->ddev.dev, 4, -1, "vboxvideo-accel"); if (IS_ERR(vbox->guest_pool)) return PTR_ERR(vbox->guest_pool); ret = gen_pool_add_virt(vbox->guest_pool, (unsigned long)vbox->guest_heap, GUEST_HEAP_OFFSET(vbox), GUEST_HEAP_USABLE_SIZE, -1); if (ret) return ret; ret = hgsmi_test_query_conf(vbox->guest_pool); if (ret) { DRM_ERROR("vboxvideo: hgsmi_test_query_conf failed\n"); return ret; } /* Reduce available VRAM size to reflect the guest heap. */ vbox->available_vram_size = GUEST_HEAP_OFFSET(vbox); /* Linux drm represents monitors as a 32-bit array. */ hgsmi_query_conf(vbox->guest_pool, VBOX_VBVA_CONF32_MONITOR_COUNT, &vbox->num_crtcs); vbox->num_crtcs = clamp_t(u32, vbox->num_crtcs, 1, VBOX_MAX_SCREENS); if (!have_hgsmi_mode_hints(vbox)) { ret = -ENOTSUPP; return ret; } vbox->last_mode_hints = devm_kcalloc(vbox->ddev.dev, vbox->num_crtcs, sizeof(struct vbva_modehint), GFP_KERNEL); if (!vbox->last_mode_hints) return -ENOMEM; ret = vbox_accel_init(vbox); if (ret) return ret; return 0; } void vbox_hw_fini(struct vbox_private *vbox) { vbox_accel_fini(vbox); }
linux-master
drivers/gpu/drm/vboxvideo/vbox_main.c
// SPDX-License-Identifier: MIT /* * Copyright (C) 2013-2017 Oracle Corporation * This file is based on ast_ttm.c * Copyright 2012 Red Hat Inc. * Authors: Dave Airlie <[email protected]> * Michael Thayer <[email protected]> */ #include <linux/pci.h> #include <drm/drm_file.h> #include "vbox_drv.h" int vbox_mm_init(struct vbox_private *vbox) { int ret; resource_size_t base, size; struct drm_device *dev = &vbox->ddev; struct pci_dev *pdev = to_pci_dev(dev->dev); base = pci_resource_start(pdev, 0); size = pci_resource_len(pdev, 0); /* Don't fail on errors, but performance might be reduced. */ devm_arch_phys_wc_add(&pdev->dev, base, size); ret = drmm_vram_helper_init(dev, base, vbox->available_vram_size); if (ret) { DRM_ERROR("Error initializing VRAM MM; %d\n", ret); return ret; } return 0; }
linux-master
drivers/gpu/drm/vboxvideo/vbox_ttm.c
// SPDX-License-Identifier: MIT /* Copyright (C) 2006-2017 Oracle Corporation */ #include <linux/vbox_err.h> #include "vbox_drv.h" #include "vboxvideo_guest.h" #include "hgsmi_channels.h" /* * There is a hardware ring buffer in the graphics device video RAM, formerly * in the VBox VMMDev PCI memory space. * All graphics commands go there serialized by vbva_buffer_begin_update. * and vbva_buffer_end_update. * * free_offset is writing position. data_offset is reading position. * free_offset == data_offset means buffer is empty. * There must be always gap between data_offset and free_offset when data * are in the buffer. * Guest only changes free_offset, host changes data_offset. */ static u32 vbva_buffer_available(const struct vbva_buffer *vbva) { s32 diff = vbva->data_offset - vbva->free_offset; return diff > 0 ? diff : vbva->data_len + diff; } static void vbva_buffer_place_data_at(struct vbva_buf_ctx *vbva_ctx, const void *p, u32 len, u32 offset) { struct vbva_buffer *vbva = vbva_ctx->vbva; u32 bytes_till_boundary = vbva->data_len - offset; u8 *dst = &vbva->data[offset]; s32 diff = len - bytes_till_boundary; if (diff <= 0) { /* Chunk will not cross buffer boundary. */ memcpy(dst, p, len); } else { /* Chunk crosses buffer boundary. */ memcpy(dst, p, bytes_till_boundary); memcpy(&vbva->data[0], (u8 *)p + bytes_till_boundary, diff); } } static void vbva_buffer_flush(struct gen_pool *ctx) { struct vbva_flush *p; p = hgsmi_buffer_alloc(ctx, sizeof(*p), HGSMI_CH_VBVA, VBVA_FLUSH); if (!p) return; p->reserved = 0; hgsmi_buffer_submit(ctx, p); hgsmi_buffer_free(ctx, p); } bool vbva_write(struct vbva_buf_ctx *vbva_ctx, struct gen_pool *ctx, const void *p, u32 len) { struct vbva_record *record; struct vbva_buffer *vbva; u32 available; vbva = vbva_ctx->vbva; record = vbva_ctx->record; if (!vbva || vbva_ctx->buffer_overflow || !record || !(record->len_and_flags & VBVA_F_RECORD_PARTIAL)) return false; available = vbva_buffer_available(vbva); while (len > 0) { u32 chunk = len; if (chunk >= available) { vbva_buffer_flush(ctx); available = vbva_buffer_available(vbva); } if (chunk >= available) { if (WARN_ON(available <= vbva->partial_write_tresh)) { vbva_ctx->buffer_overflow = true; return false; } chunk = available - vbva->partial_write_tresh; } vbva_buffer_place_data_at(vbva_ctx, p, chunk, vbva->free_offset); vbva->free_offset = (vbva->free_offset + chunk) % vbva->data_len; record->len_and_flags += chunk; available -= chunk; len -= chunk; p += chunk; } return true; } static bool vbva_inform_host(struct vbva_buf_ctx *vbva_ctx, struct gen_pool *ctx, s32 screen, bool enable) { struct vbva_enable_ex *p; bool ret; p = hgsmi_buffer_alloc(ctx, sizeof(*p), HGSMI_CH_VBVA, VBVA_ENABLE); if (!p) return false; p->base.flags = enable ? VBVA_F_ENABLE : VBVA_F_DISABLE; p->base.offset = vbva_ctx->buffer_offset; p->base.result = VERR_NOT_SUPPORTED; if (screen >= 0) { p->base.flags |= VBVA_F_EXTENDED | VBVA_F_ABSOFFSET; p->screen_id = screen; } hgsmi_buffer_submit(ctx, p); if (enable) ret = p->base.result >= 0; else ret = true; hgsmi_buffer_free(ctx, p); return ret; } bool vbva_enable(struct vbva_buf_ctx *vbva_ctx, struct gen_pool *ctx, struct vbva_buffer *vbva, s32 screen) { bool ret = false; memset(vbva, 0, sizeof(*vbva)); vbva->partial_write_tresh = 256; vbva->data_len = vbva_ctx->buffer_length - sizeof(struct vbva_buffer); vbva_ctx->vbva = vbva; ret = vbva_inform_host(vbva_ctx, ctx, screen, true); if (!ret) vbva_disable(vbva_ctx, ctx, screen); return ret; } void vbva_disable(struct vbva_buf_ctx *vbva_ctx, struct gen_pool *ctx, s32 screen) { vbva_ctx->buffer_overflow = false; vbva_ctx->record = NULL; vbva_ctx->vbva = NULL; vbva_inform_host(vbva_ctx, ctx, screen, false); } bool vbva_buffer_begin_update(struct vbva_buf_ctx *vbva_ctx, struct gen_pool *ctx) { struct vbva_record *record; u32 next; if (!vbva_ctx->vbva || !(vbva_ctx->vbva->host_flags.host_events & VBVA_F_MODE_ENABLED)) return false; WARN_ON(vbva_ctx->buffer_overflow || vbva_ctx->record); next = (vbva_ctx->vbva->record_free_index + 1) % VBVA_MAX_RECORDS; /* Flush if all slots in the records queue are used */ if (next == vbva_ctx->vbva->record_first_index) vbva_buffer_flush(ctx); /* If even after flush there is no place then fail the request */ if (next == vbva_ctx->vbva->record_first_index) return false; record = &vbva_ctx->vbva->records[vbva_ctx->vbva->record_free_index]; record->len_and_flags = VBVA_F_RECORD_PARTIAL; vbva_ctx->vbva->record_free_index = next; /* Remember which record we are using. */ vbva_ctx->record = record; return true; } void vbva_buffer_end_update(struct vbva_buf_ctx *vbva_ctx) { struct vbva_record *record = vbva_ctx->record; WARN_ON(!vbva_ctx->vbva || !record || !(record->len_and_flags & VBVA_F_RECORD_PARTIAL)); /* Mark the record completed. */ record->len_and_flags &= ~VBVA_F_RECORD_PARTIAL; vbva_ctx->buffer_overflow = false; vbva_ctx->record = NULL; } void vbva_setup_buffer_context(struct vbva_buf_ctx *vbva_ctx, u32 buffer_offset, u32 buffer_length) { vbva_ctx->buffer_offset = buffer_offset; vbva_ctx->buffer_length = buffer_length; }
linux-master
drivers/gpu/drm/vboxvideo/vbva_base.c
// SPDX-License-Identifier: MIT /* * Copyright (C) 2016-2017 Oracle Corporation * This file is based on qxl_irq.c * Copyright 2013 Red Hat Inc. * Authors: Dave Airlie * Alon Levy * Michael Thayer <[email protected], * Hans de Goede <[email protected]> */ #include <linux/pci.h> #include <drm/drm_drv.h> #include <drm/drm_probe_helper.h> #include "vbox_drv.h" #include "vboxvideo.h" static void vbox_clear_irq(void) { outl((u32)~0, VGA_PORT_HGSMI_HOST); } static u32 vbox_get_flags(struct vbox_private *vbox) { return readl(vbox->guest_heap + HOST_FLAGS_OFFSET); } void vbox_report_hotplug(struct vbox_private *vbox) { schedule_work(&vbox->hotplug_work); } static irqreturn_t vbox_irq_handler(int irq, void *arg) { struct drm_device *dev = (struct drm_device *)arg; struct vbox_private *vbox = to_vbox_dev(dev); u32 host_flags = vbox_get_flags(vbox); if (!(host_flags & HGSMIHOSTFLAGS_IRQ)) return IRQ_NONE; /* * Due to a bug in the initial host implementation of hot-plug irqs, * the hot-plug and cursor capability flags were never cleared. * Fortunately we can tell when they would have been set by checking * that the VSYNC flag is not set. */ if (host_flags & (HGSMIHOSTFLAGS_HOTPLUG | HGSMIHOSTFLAGS_CURSOR_CAPABILITIES) && !(host_flags & HGSMIHOSTFLAGS_VSYNC)) vbox_report_hotplug(vbox); vbox_clear_irq(); return IRQ_HANDLED; } /* * Check that the position hints provided by the host are suitable for GNOME * shell (i.e. all screens disjoint and hints for all enabled screens) and if * not replace them with default ones. Providing valid hints improves the * chances that we will get a known screen layout for pointer mapping. */ static void validate_or_set_position_hints(struct vbox_private *vbox) { struct vbva_modehint *hintsi, *hintsj; bool valid = true; u16 currentx = 0; int i, j; for (i = 0; i < vbox->num_crtcs; ++i) { for (j = 0; j < i; ++j) { hintsi = &vbox->last_mode_hints[i]; hintsj = &vbox->last_mode_hints[j]; if (hintsi->enabled && hintsj->enabled) { if (hintsi->dx >= 0xffff || hintsi->dy >= 0xffff || hintsj->dx >= 0xffff || hintsj->dy >= 0xffff || (hintsi->dx < hintsj->dx + (hintsj->cx & 0x8fff) && hintsi->dx + (hintsi->cx & 0x8fff) > hintsj->dx) || (hintsi->dy < hintsj->dy + (hintsj->cy & 0x8fff) && hintsi->dy + (hintsi->cy & 0x8fff) > hintsj->dy)) valid = false; } } } if (!valid) for (i = 0; i < vbox->num_crtcs; ++i) { if (vbox->last_mode_hints[i].enabled) { vbox->last_mode_hints[i].dx = currentx; vbox->last_mode_hints[i].dy = 0; currentx += vbox->last_mode_hints[i].cx & 0x8fff; } } } /* Query the host for the most recent video mode hints. */ static void vbox_update_mode_hints(struct vbox_private *vbox) { struct drm_connector_list_iter conn_iter; struct drm_device *dev = &vbox->ddev; struct drm_connector *connector; struct vbox_connector *vbox_conn; struct vbva_modehint *hints; u16 flags; bool disconnected; unsigned int crtc_id; int ret; ret = hgsmi_get_mode_hints(vbox->guest_pool, vbox->num_crtcs, vbox->last_mode_hints); if (ret) { DRM_ERROR("vboxvideo: hgsmi_get_mode_hints failed: %d\n", ret); return; } validate_or_set_position_hints(vbox); drm_modeset_lock(&dev->mode_config.connection_mutex, NULL); drm_connector_list_iter_begin(dev, &conn_iter); drm_for_each_connector_iter(connector, &conn_iter) { vbox_conn = to_vbox_connector(connector); hints = &vbox->last_mode_hints[vbox_conn->vbox_crtc->crtc_id]; if (hints->magic != VBVAMODEHINT_MAGIC) continue; disconnected = !(hints->enabled); crtc_id = vbox_conn->vbox_crtc->crtc_id; vbox_conn->mode_hint.width = hints->cx; vbox_conn->mode_hint.height = hints->cy; vbox_conn->vbox_crtc->x_hint = hints->dx; vbox_conn->vbox_crtc->y_hint = hints->dy; vbox_conn->mode_hint.disconnected = disconnected; if (vbox_conn->vbox_crtc->disconnected == disconnected) continue; if (disconnected) flags = VBVA_SCREEN_F_ACTIVE | VBVA_SCREEN_F_DISABLED; else flags = VBVA_SCREEN_F_ACTIVE | VBVA_SCREEN_F_BLANK; hgsmi_process_display_info(vbox->guest_pool, crtc_id, 0, 0, 0, hints->cx * 4, hints->cx, hints->cy, 0, flags); vbox_conn->vbox_crtc->disconnected = disconnected; } drm_connector_list_iter_end(&conn_iter); drm_modeset_unlock(&dev->mode_config.connection_mutex); } static void vbox_hotplug_worker(struct work_struct *work) { struct vbox_private *vbox = container_of(work, struct vbox_private, hotplug_work); vbox_update_mode_hints(vbox); drm_kms_helper_hotplug_event(&vbox->ddev); } int vbox_irq_init(struct vbox_private *vbox) { struct drm_device *dev = &vbox->ddev; struct pci_dev *pdev = to_pci_dev(dev->dev); INIT_WORK(&vbox->hotplug_work, vbox_hotplug_worker); vbox_update_mode_hints(vbox); /* PCI devices require shared interrupts. */ return request_irq(pdev->irq, vbox_irq_handler, IRQF_SHARED, dev->driver->name, dev); } void vbox_irq_fini(struct vbox_private *vbox) { struct drm_device *dev = &vbox->ddev; struct pci_dev *pdev = to_pci_dev(dev->dev); free_irq(pdev->irq, dev); flush_work(&vbox->hotplug_work); }
linux-master
drivers/gpu/drm/vboxvideo/vbox_irq.c
// SPDX-License-Identifier: MIT /* * Copyright (C) 2013-2017 Oracle Corporation * This file is based on ast_mode.c * Copyright 2012 Red Hat Inc. * Parts based on xf86-video-ast * Copyright (c) 2005 ASPEED Technology Inc. * Authors: Dave Airlie <[email protected]> * Michael Thayer <[email protected], * Hans de Goede <[email protected]> */ #include <linux/iosys-map.h> #include <linux/export.h> #include <drm/drm_atomic.h> #include <drm/drm_atomic_helper.h> #include <drm/drm_edid.h> #include <drm/drm_fb_helper.h> #include <drm/drm_fourcc.h> #include <drm/drm_framebuffer.h> #include <drm/drm_gem_atomic_helper.h> #include <drm/drm_gem_framebuffer_helper.h> #include <drm/drm_plane_helper.h> #include <drm/drm_probe_helper.h> #include "hgsmi_channels.h" #include "vbox_drv.h" #include "vboxvideo.h" /* * Set a graphics mode. Poke any required values into registers, do an HGSMI * mode set and tell the host we support advanced graphics functions. */ static void vbox_do_modeset(struct drm_crtc *crtc) { struct drm_framebuffer *fb = crtc->primary->state->fb; struct vbox_crtc *vbox_crtc = to_vbox_crtc(crtc); struct vbox_private *vbox; int width, height, bpp, pitch; u16 flags; s32 x_offset, y_offset; vbox = to_vbox_dev(crtc->dev); width = vbox_crtc->width ? vbox_crtc->width : 640; height = vbox_crtc->height ? vbox_crtc->height : 480; bpp = fb ? fb->format->cpp[0] * 8 : 32; pitch = fb ? fb->pitches[0] : width * bpp / 8; x_offset = vbox->single_framebuffer ? vbox_crtc->x : vbox_crtc->x_hint; y_offset = vbox->single_framebuffer ? vbox_crtc->y : vbox_crtc->y_hint; /* * This is the old way of setting graphics modes. It assumed one screen * and a frame-buffer at the start of video RAM. On older versions of * VirtualBox, certain parts of the code still assume that the first * screen is programmed this way, so try to fake it. */ if (vbox_crtc->crtc_id == 0 && fb && vbox_crtc->fb_offset / pitch < 0xffff - crtc->y && vbox_crtc->fb_offset % (bpp / 8) == 0) { vbox_write_ioport(VBE_DISPI_INDEX_XRES, width); vbox_write_ioport(VBE_DISPI_INDEX_YRES, height); vbox_write_ioport(VBE_DISPI_INDEX_VIRT_WIDTH, pitch * 8 / bpp); vbox_write_ioport(VBE_DISPI_INDEX_BPP, bpp); vbox_write_ioport(VBE_DISPI_INDEX_ENABLE, VBE_DISPI_ENABLED); vbox_write_ioport(VBE_DISPI_INDEX_X_OFFSET, vbox_crtc->fb_offset % pitch / bpp * 8 + vbox_crtc->x); vbox_write_ioport(VBE_DISPI_INDEX_Y_OFFSET, vbox_crtc->fb_offset / pitch + vbox_crtc->y); } flags = VBVA_SCREEN_F_ACTIVE; flags |= (fb && crtc->state->enable) ? 0 : VBVA_SCREEN_F_BLANK; flags |= vbox_crtc->disconnected ? VBVA_SCREEN_F_DISABLED : 0; hgsmi_process_display_info(vbox->guest_pool, vbox_crtc->crtc_id, x_offset, y_offset, vbox_crtc->x * bpp / 8 + vbox_crtc->y * pitch, pitch, width, height, bpp, flags); } static int vbox_set_view(struct drm_crtc *crtc) { struct vbox_crtc *vbox_crtc = to_vbox_crtc(crtc); struct vbox_private *vbox = to_vbox_dev(crtc->dev); struct vbva_infoview *p; /* * Tell the host about the view. This design originally targeted the * Windows XP driver architecture and assumed that each screen would * have a dedicated frame buffer with the command buffer following it, * the whole being a "view". The host works out which screen a command * buffer belongs to by checking whether it is in the first view, then * whether it is in the second and so on. The first match wins. We * cheat around this by making the first view be the managed memory * plus the first command buffer, the second the same plus the second * buffer and so on. */ p = hgsmi_buffer_alloc(vbox->guest_pool, sizeof(*p), HGSMI_CH_VBVA, VBVA_INFO_VIEW); if (!p) return -ENOMEM; p->view_index = vbox_crtc->crtc_id; p->view_offset = vbox_crtc->fb_offset; p->view_size = vbox->available_vram_size - vbox_crtc->fb_offset + vbox_crtc->crtc_id * VBVA_MIN_BUFFER_SIZE; p->max_screen_size = vbox->available_vram_size - vbox_crtc->fb_offset; hgsmi_buffer_submit(vbox->guest_pool, p); hgsmi_buffer_free(vbox->guest_pool, p); return 0; } /* * Try to map the layout of virtual screens to the range of the input device. * Return true if we need to re-set the crtc modes due to screen offset * changes. */ static bool vbox_set_up_input_mapping(struct vbox_private *vbox) { struct drm_crtc *crtci; struct drm_connector *connectori; struct drm_framebuffer *fb, *fb1 = NULL; bool single_framebuffer = true; bool old_single_framebuffer = vbox->single_framebuffer; u16 width = 0, height = 0; /* * Are we using an X.Org-style single large frame-buffer for all crtcs? * If so then screen layout can be deduced from the crtc offsets. * Same fall-back if this is the fbdev frame-buffer. */ list_for_each_entry(crtci, &vbox->ddev.mode_config.crtc_list, head) { fb = crtci->primary->state->fb; if (!fb) continue; if (!fb1) { fb1 = fb; if (fb1 == vbox->ddev.fb_helper->fb) break; } else if (fb != fb1) { single_framebuffer = false; } } if (!fb1) return false; if (single_framebuffer) { vbox->single_framebuffer = true; vbox->input_mapping_width = fb1->width; vbox->input_mapping_height = fb1->height; return old_single_framebuffer != vbox->single_framebuffer; } /* Otherwise calculate the total span of all screens. */ list_for_each_entry(connectori, &vbox->ddev.mode_config.connector_list, head) { struct vbox_connector *vbox_connector = to_vbox_connector(connectori); struct vbox_crtc *vbox_crtc = vbox_connector->vbox_crtc; width = max_t(u16, width, vbox_crtc->x_hint + vbox_connector->mode_hint.width); height = max_t(u16, height, vbox_crtc->y_hint + vbox_connector->mode_hint.height); } vbox->single_framebuffer = false; vbox->input_mapping_width = width; vbox->input_mapping_height = height; return old_single_framebuffer != vbox->single_framebuffer; } static void vbox_crtc_set_base_and_mode(struct drm_crtc *crtc, struct drm_framebuffer *fb, int x, int y) { struct drm_gem_vram_object *gbo = drm_gem_vram_of_gem(fb->obj[0]); struct vbox_private *vbox = to_vbox_dev(crtc->dev); struct vbox_crtc *vbox_crtc = to_vbox_crtc(crtc); bool needs_modeset = drm_atomic_crtc_needs_modeset(crtc->state); mutex_lock(&vbox->hw_mutex); if (crtc->state->enable) { vbox_crtc->width = crtc->state->mode.hdisplay; vbox_crtc->height = crtc->state->mode.vdisplay; } vbox_crtc->x = x; vbox_crtc->y = y; vbox_crtc->fb_offset = drm_gem_vram_offset(gbo); /* vbox_do_modeset() checks vbox->single_framebuffer so update it now */ if (needs_modeset && vbox_set_up_input_mapping(vbox)) { struct drm_crtc *crtci; list_for_each_entry(crtci, &vbox->ddev.mode_config.crtc_list, head) { if (crtci == crtc) continue; vbox_do_modeset(crtci); } } vbox_set_view(crtc); vbox_do_modeset(crtc); if (needs_modeset) hgsmi_update_input_mapping(vbox->guest_pool, 0, 0, vbox->input_mapping_width, vbox->input_mapping_height); mutex_unlock(&vbox->hw_mutex); } static void vbox_crtc_atomic_enable(struct drm_crtc *crtc, struct drm_atomic_state *state) { } static void vbox_crtc_atomic_disable(struct drm_crtc *crtc, struct drm_atomic_state *state) { } static void vbox_crtc_atomic_flush(struct drm_crtc *crtc, struct drm_atomic_state *state) { } static const struct drm_crtc_helper_funcs vbox_crtc_helper_funcs = { .atomic_enable = vbox_crtc_atomic_enable, .atomic_disable = vbox_crtc_atomic_disable, .atomic_flush = vbox_crtc_atomic_flush, }; static void vbox_crtc_destroy(struct drm_crtc *crtc) { drm_crtc_cleanup(crtc); kfree(crtc); } static const struct drm_crtc_funcs vbox_crtc_funcs = { .set_config = drm_atomic_helper_set_config, .page_flip = drm_atomic_helper_page_flip, /* .gamma_set = vbox_crtc_gamma_set, */ .destroy = vbox_crtc_destroy, .reset = drm_atomic_helper_crtc_reset, .atomic_duplicate_state = drm_atomic_helper_crtc_duplicate_state, .atomic_destroy_state = drm_atomic_helper_crtc_destroy_state, }; static int vbox_primary_atomic_check(struct drm_plane *plane, struct drm_atomic_state *state) { struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state, plane); struct drm_crtc_state *crtc_state = NULL; if (new_state->crtc) { crtc_state = drm_atomic_get_existing_crtc_state(state, new_state->crtc); if (WARN_ON(!crtc_state)) return -EINVAL; } return drm_atomic_helper_check_plane_state(new_state, crtc_state, DRM_PLANE_NO_SCALING, DRM_PLANE_NO_SCALING, false, true); } static void vbox_primary_atomic_update(struct drm_plane *plane, struct drm_atomic_state *state) { struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state, plane); struct drm_crtc *crtc = new_state->crtc; struct drm_framebuffer *fb = new_state->fb; struct vbox_private *vbox = to_vbox_dev(fb->dev); struct drm_mode_rect *clips; uint32_t num_clips, i; vbox_crtc_set_base_and_mode(crtc, fb, new_state->src_x >> 16, new_state->src_y >> 16); /* Send information about dirty rectangles to VBVA. */ clips = drm_plane_get_damage_clips(new_state); num_clips = drm_plane_get_damage_clips_count(new_state); if (!num_clips) return; mutex_lock(&vbox->hw_mutex); for (i = 0; i < num_clips; ++i, ++clips) { struct vbva_cmd_hdr cmd_hdr; unsigned int crtc_id = to_vbox_crtc(crtc)->crtc_id; cmd_hdr.x = (s16)clips->x1; cmd_hdr.y = (s16)clips->y1; cmd_hdr.w = (u16)clips->x2 - clips->x1; cmd_hdr.h = (u16)clips->y2 - clips->y1; if (!vbva_buffer_begin_update(&vbox->vbva_info[crtc_id], vbox->guest_pool)) continue; vbva_write(&vbox->vbva_info[crtc_id], vbox->guest_pool, &cmd_hdr, sizeof(cmd_hdr)); vbva_buffer_end_update(&vbox->vbva_info[crtc_id]); } mutex_unlock(&vbox->hw_mutex); } static void vbox_primary_atomic_disable(struct drm_plane *plane, struct drm_atomic_state *state) { struct drm_plane_state *old_state = drm_atomic_get_old_plane_state(state, plane); struct drm_crtc *crtc = old_state->crtc; /* vbox_do_modeset checks plane->state->fb and will disable if NULL */ vbox_crtc_set_base_and_mode(crtc, old_state->fb, old_state->src_x >> 16, old_state->src_y >> 16); } static int vbox_cursor_atomic_check(struct drm_plane *plane, struct drm_atomic_state *state) { struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state, plane); struct drm_crtc_state *crtc_state = NULL; u32 width = new_state->crtc_w; u32 height = new_state->crtc_h; int ret; if (new_state->crtc) { crtc_state = drm_atomic_get_existing_crtc_state(state, new_state->crtc); if (WARN_ON(!crtc_state)) return -EINVAL; } ret = drm_atomic_helper_check_plane_state(new_state, crtc_state, DRM_PLANE_NO_SCALING, DRM_PLANE_NO_SCALING, true, true); if (ret) return ret; if (!new_state->fb) return 0; if (width > VBOX_MAX_CURSOR_WIDTH || height > VBOX_MAX_CURSOR_HEIGHT || width == 0 || height == 0) return -EINVAL; return 0; } /* * Copy the ARGB image and generate the mask, which is needed in case the host * does not support ARGB cursors. The mask is a 1BPP bitmap with the bit set * if the corresponding alpha value in the ARGB image is greater than 0xF0. */ static void copy_cursor_image(u8 *src, u8 *dst, u32 width, u32 height, size_t mask_size) { size_t line_size = (width + 7) / 8; u32 i, j; memcpy(dst + mask_size, src, width * height * 4); for (i = 0; i < height; ++i) for (j = 0; j < width; ++j) if (((u32 *)src)[i * width + j] > 0xf0000000) dst[i * line_size + j / 8] |= (0x80 >> (j % 8)); } static void vbox_cursor_atomic_update(struct drm_plane *plane, struct drm_atomic_state *state) { struct drm_plane_state *old_state = drm_atomic_get_old_plane_state(state, plane); struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state, plane); struct vbox_private *vbox = container_of(plane->dev, struct vbox_private, ddev); struct vbox_crtc *vbox_crtc = to_vbox_crtc(new_state->crtc); struct drm_framebuffer *fb = new_state->fb; u32 width = new_state->crtc_w; u32 height = new_state->crtc_h; struct drm_shadow_plane_state *shadow_plane_state = to_drm_shadow_plane_state(new_state); struct iosys_map map = shadow_plane_state->data[0]; u8 *src = map.vaddr; /* TODO: Use mapping abstraction properly */ size_t data_size, mask_size; u32 flags; /* * VirtualBox uses the host windowing system to draw the cursor so * moves are a no-op, we only need to upload new cursor sprites. */ if (fb == old_state->fb) return; mutex_lock(&vbox->hw_mutex); vbox_crtc->cursor_enabled = true; /* * The mask must be calculated based on the alpha * channel, one bit per ARGB word, and must be 32-bit * padded. */ mask_size = ((width + 7) / 8 * height + 3) & ~3; data_size = width * height * 4 + mask_size; copy_cursor_image(src, vbox->cursor_data, width, height, mask_size); flags = VBOX_MOUSE_POINTER_VISIBLE | VBOX_MOUSE_POINTER_SHAPE | VBOX_MOUSE_POINTER_ALPHA; hgsmi_update_pointer_shape(vbox->guest_pool, flags, min_t(u32, max(fb->hot_x, 0), width), min_t(u32, max(fb->hot_y, 0), height), width, height, vbox->cursor_data, data_size); mutex_unlock(&vbox->hw_mutex); } static void vbox_cursor_atomic_disable(struct drm_plane *plane, struct drm_atomic_state *state) { struct drm_plane_state *old_state = drm_atomic_get_old_plane_state(state, plane); struct vbox_private *vbox = container_of(plane->dev, struct vbox_private, ddev); struct vbox_crtc *vbox_crtc = to_vbox_crtc(old_state->crtc); bool cursor_enabled = false; struct drm_crtc *crtci; mutex_lock(&vbox->hw_mutex); vbox_crtc->cursor_enabled = false; list_for_each_entry(crtci, &vbox->ddev.mode_config.crtc_list, head) { if (to_vbox_crtc(crtci)->cursor_enabled) cursor_enabled = true; } if (!cursor_enabled) hgsmi_update_pointer_shape(vbox->guest_pool, 0, 0, 0, 0, 0, NULL, 0); mutex_unlock(&vbox->hw_mutex); } static const u32 vbox_cursor_plane_formats[] = { DRM_FORMAT_ARGB8888, }; static const struct drm_plane_helper_funcs vbox_cursor_helper_funcs = { .atomic_check = vbox_cursor_atomic_check, .atomic_update = vbox_cursor_atomic_update, .atomic_disable = vbox_cursor_atomic_disable, DRM_GEM_SHADOW_PLANE_HELPER_FUNCS, }; static const struct drm_plane_funcs vbox_cursor_plane_funcs = { .update_plane = drm_atomic_helper_update_plane, .disable_plane = drm_atomic_helper_disable_plane, .destroy = drm_plane_helper_destroy, DRM_GEM_SHADOW_PLANE_FUNCS, }; static const u32 vbox_primary_plane_formats[] = { DRM_FORMAT_XRGB8888, DRM_FORMAT_ARGB8888, }; static const struct drm_plane_helper_funcs vbox_primary_helper_funcs = { .atomic_check = vbox_primary_atomic_check, .atomic_update = vbox_primary_atomic_update, .atomic_disable = vbox_primary_atomic_disable, DRM_GEM_VRAM_PLANE_HELPER_FUNCS, }; static const struct drm_plane_funcs vbox_primary_plane_funcs = { .update_plane = drm_atomic_helper_update_plane, .disable_plane = drm_atomic_helper_disable_plane, .destroy = drm_plane_helper_destroy, .reset = drm_atomic_helper_plane_reset, .atomic_duplicate_state = drm_atomic_helper_plane_duplicate_state, .atomic_destroy_state = drm_atomic_helper_plane_destroy_state, }; static struct drm_plane *vbox_create_plane(struct vbox_private *vbox, unsigned int possible_crtcs, enum drm_plane_type type) { const struct drm_plane_helper_funcs *helper_funcs = NULL; const struct drm_plane_funcs *funcs; struct drm_plane *plane; const u32 *formats; int num_formats; int err; if (type == DRM_PLANE_TYPE_PRIMARY) { funcs = &vbox_primary_plane_funcs; formats = vbox_primary_plane_formats; helper_funcs = &vbox_primary_helper_funcs; num_formats = ARRAY_SIZE(vbox_primary_plane_formats); } else if (type == DRM_PLANE_TYPE_CURSOR) { funcs = &vbox_cursor_plane_funcs; formats = vbox_cursor_plane_formats; helper_funcs = &vbox_cursor_helper_funcs; num_formats = ARRAY_SIZE(vbox_cursor_plane_formats); } else { return ERR_PTR(-EINVAL); } plane = kzalloc(sizeof(*plane), GFP_KERNEL); if (!plane) return ERR_PTR(-ENOMEM); err = drm_universal_plane_init(&vbox->ddev, plane, possible_crtcs, funcs, formats, num_formats, NULL, type, NULL); if (err) goto free_plane; drm_plane_helper_add(plane, helper_funcs); return plane; free_plane: kfree(plane); return ERR_PTR(-EINVAL); } static struct vbox_crtc *vbox_crtc_init(struct drm_device *dev, unsigned int i) { struct vbox_private *vbox = container_of(dev, struct vbox_private, ddev); struct drm_plane *cursor = NULL; struct vbox_crtc *vbox_crtc; struct drm_plane *primary; u32 caps = 0; int ret; ret = hgsmi_query_conf(vbox->guest_pool, VBOX_VBVA_CONF32_CURSOR_CAPABILITIES, &caps); if (ret) return ERR_PTR(ret); vbox_crtc = kzalloc(sizeof(*vbox_crtc), GFP_KERNEL); if (!vbox_crtc) return ERR_PTR(-ENOMEM); primary = vbox_create_plane(vbox, 1 << i, DRM_PLANE_TYPE_PRIMARY); if (IS_ERR(primary)) { ret = PTR_ERR(primary); goto free_mem; } if ((caps & VBOX_VBVA_CURSOR_CAPABILITY_HARDWARE)) { cursor = vbox_create_plane(vbox, 1 << i, DRM_PLANE_TYPE_CURSOR); if (IS_ERR(cursor)) { ret = PTR_ERR(cursor); goto clean_primary; } } else { DRM_WARN("VirtualBox host is too old, no cursor support\n"); } vbox_crtc->crtc_id = i; ret = drm_crtc_init_with_planes(dev, &vbox_crtc->base, primary, cursor, &vbox_crtc_funcs, NULL); if (ret) goto clean_cursor; drm_mode_crtc_set_gamma_size(&vbox_crtc->base, 256); drm_crtc_helper_add(&vbox_crtc->base, &vbox_crtc_helper_funcs); return vbox_crtc; clean_cursor: if (cursor) { drm_plane_cleanup(cursor); kfree(cursor); } clean_primary: drm_plane_cleanup(primary); kfree(primary); free_mem: kfree(vbox_crtc); return ERR_PTR(ret); } static void vbox_encoder_destroy(struct drm_encoder *encoder) { drm_encoder_cleanup(encoder); kfree(encoder); } static const struct drm_encoder_funcs vbox_enc_funcs = { .destroy = vbox_encoder_destroy, }; static struct drm_encoder *vbox_encoder_init(struct drm_device *dev, unsigned int i) { struct vbox_encoder *vbox_encoder; vbox_encoder = kzalloc(sizeof(*vbox_encoder), GFP_KERNEL); if (!vbox_encoder) return NULL; drm_encoder_init(dev, &vbox_encoder->base, &vbox_enc_funcs, DRM_MODE_ENCODER_DAC, NULL); vbox_encoder->base.possible_crtcs = 1 << i; return &vbox_encoder->base; } /* * Generate EDID data with a mode-unique serial number for the virtual * monitor to try to persuade Unity that different modes correspond to * different monitors and it should not try to force the same resolution on * them. */ static void vbox_set_edid(struct drm_connector *connector, int width, int height) { enum { EDID_SIZE = 128 }; unsigned char edid[EDID_SIZE] = { 0x00, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x00, /* header */ 0x58, 0x58, /* manufacturer (VBX) */ 0x00, 0x00, /* product code */ 0x00, 0x00, 0x00, 0x00, /* serial number goes here */ 0x01, /* week of manufacture */ 0x00, /* year of manufacture */ 0x01, 0x03, /* EDID version */ 0x80, /* capabilities - digital */ 0x00, /* horiz. res in cm, zero for projectors */ 0x00, /* vert. res in cm */ 0x78, /* display gamma (120 == 2.2). */ 0xEE, /* features (standby, suspend, off, RGB, std */ /* colour space, preferred timing mode) */ 0xEE, 0x91, 0xA3, 0x54, 0x4C, 0x99, 0x26, 0x0F, 0x50, 0x54, /* chromaticity for standard colour space. */ 0x00, 0x00, 0x00, /* no default timings */ 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, /* no standard timings */ 0x00, 0x00, 0x00, 0x06, 0x00, 0x00, 0x06, 0x00, 0x02, 0x02, 0x02, 0x02, /* descriptor block 1 goes below */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* descriptor block 2, monitor ranges */ 0x00, 0x00, 0x00, 0xFD, 0x00, 0x00, 0xC8, 0x00, 0xC8, 0x64, 0x00, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, /* 0-200Hz vertical, 0-200KHz horizontal, 1000MHz pixel clock */ 0x20, /* descriptor block 3, monitor name */ 0x00, 0x00, 0x00, 0xFC, 0x00, 'V', 'B', 'O', 'X', ' ', 'm', 'o', 'n', 'i', 't', 'o', 'r', '\n', /* descriptor block 4: dummy data */ 0x00, 0x00, 0x00, 0x10, 0x00, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x00, /* number of extensions */ 0x00 /* checksum goes here */ }; int clock = (width + 6) * (height + 6) * 60 / 10000; unsigned int i, sum = 0; edid[12] = width & 0xff; edid[13] = width >> 8; edid[14] = height & 0xff; edid[15] = height >> 8; edid[54] = clock & 0xff; edid[55] = clock >> 8; edid[56] = width & 0xff; edid[58] = (width >> 4) & 0xf0; edid[59] = height & 0xff; edid[61] = (height >> 4) & 0xf0; for (i = 0; i < EDID_SIZE - 1; ++i) sum += edid[i]; edid[EDID_SIZE - 1] = (0x100 - (sum & 0xFF)) & 0xFF; drm_connector_update_edid_property(connector, (struct edid *)edid); } static int vbox_get_modes(struct drm_connector *connector) { struct vbox_connector *vbox_connector = NULL; struct drm_display_mode *mode = NULL; struct vbox_private *vbox = NULL; unsigned int num_modes = 0; int preferred_width, preferred_height; vbox_connector = to_vbox_connector(connector); vbox = to_vbox_dev(connector->dev); hgsmi_report_flags_location(vbox->guest_pool, GUEST_HEAP_OFFSET(vbox) + HOST_FLAGS_OFFSET); if (vbox_connector->vbox_crtc->crtc_id == 0) vbox_report_caps(vbox); num_modes = drm_add_modes_noedid(connector, 2560, 1600); preferred_width = vbox_connector->mode_hint.width ? vbox_connector->mode_hint.width : 1024; preferred_height = vbox_connector->mode_hint.height ? vbox_connector->mode_hint.height : 768; mode = drm_cvt_mode(connector->dev, preferred_width, preferred_height, 60, false, false, false); if (mode) { mode->type |= DRM_MODE_TYPE_PREFERRED; drm_mode_probed_add(connector, mode); ++num_modes; } vbox_set_edid(connector, preferred_width, preferred_height); if (vbox_connector->vbox_crtc->x_hint != -1) drm_object_property_set_value(&connector->base, vbox->ddev.mode_config.suggested_x_property, vbox_connector->vbox_crtc->x_hint); else drm_object_property_set_value(&connector->base, vbox->ddev.mode_config.suggested_x_property, 0); if (vbox_connector->vbox_crtc->y_hint != -1) drm_object_property_set_value(&connector->base, vbox->ddev.mode_config.suggested_y_property, vbox_connector->vbox_crtc->y_hint); else drm_object_property_set_value(&connector->base, vbox->ddev.mode_config.suggested_y_property, 0); return num_modes; } static void vbox_connector_destroy(struct drm_connector *connector) { drm_connector_unregister(connector); drm_connector_cleanup(connector); kfree(connector); } static enum drm_connector_status vbox_connector_detect(struct drm_connector *connector, bool force) { struct vbox_connector *vbox_connector; vbox_connector = to_vbox_connector(connector); return vbox_connector->mode_hint.disconnected ? connector_status_disconnected : connector_status_connected; } static int vbox_fill_modes(struct drm_connector *connector, u32 max_x, u32 max_y) { struct vbox_connector *vbox_connector; struct drm_device *dev; struct drm_display_mode *mode, *iterator; vbox_connector = to_vbox_connector(connector); dev = vbox_connector->base.dev; list_for_each_entry_safe(mode, iterator, &connector->modes, head) { list_del(&mode->head); drm_mode_destroy(dev, mode); } return drm_helper_probe_single_connector_modes(connector, max_x, max_y); } static const struct drm_connector_helper_funcs vbox_connector_helper_funcs = { .get_modes = vbox_get_modes, }; static const struct drm_connector_funcs vbox_connector_funcs = { .detect = vbox_connector_detect, .fill_modes = vbox_fill_modes, .destroy = vbox_connector_destroy, .reset = drm_atomic_helper_connector_reset, .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state, .atomic_destroy_state = drm_atomic_helper_connector_destroy_state, }; static int vbox_connector_init(struct drm_device *dev, struct vbox_crtc *vbox_crtc, struct drm_encoder *encoder) { struct vbox_connector *vbox_connector; struct drm_connector *connector; vbox_connector = kzalloc(sizeof(*vbox_connector), GFP_KERNEL); if (!vbox_connector) return -ENOMEM; connector = &vbox_connector->base; vbox_connector->vbox_crtc = vbox_crtc; drm_connector_init(dev, connector, &vbox_connector_funcs, DRM_MODE_CONNECTOR_VGA); drm_connector_helper_add(connector, &vbox_connector_helper_funcs); connector->interlace_allowed = 0; connector->doublescan_allowed = 0; drm_mode_create_suggested_offset_properties(dev); drm_object_attach_property(&connector->base, dev->mode_config.suggested_x_property, 0); drm_object_attach_property(&connector->base, dev->mode_config.suggested_y_property, 0); drm_connector_attach_encoder(connector, encoder); return 0; } static const struct drm_mode_config_funcs vbox_mode_funcs = { .fb_create = drm_gem_fb_create_with_dirty, .mode_valid = drm_vram_helper_mode_valid, .atomic_check = drm_atomic_helper_check, .atomic_commit = drm_atomic_helper_commit, }; int vbox_mode_init(struct vbox_private *vbox) { struct drm_device *dev = &vbox->ddev; struct drm_encoder *encoder; struct vbox_crtc *vbox_crtc; unsigned int i; int ret; drm_mode_config_init(dev); dev->mode_config.funcs = (void *)&vbox_mode_funcs; dev->mode_config.min_width = 0; dev->mode_config.min_height = 0; dev->mode_config.preferred_depth = 24; dev->mode_config.max_width = VBE_DISPI_MAX_XRES; dev->mode_config.max_height = VBE_DISPI_MAX_YRES; for (i = 0; i < vbox->num_crtcs; ++i) { vbox_crtc = vbox_crtc_init(dev, i); if (IS_ERR(vbox_crtc)) { ret = PTR_ERR(vbox_crtc); goto err_drm_mode_cleanup; } encoder = vbox_encoder_init(dev, i); if (!encoder) { ret = -ENOMEM; goto err_drm_mode_cleanup; } ret = vbox_connector_init(dev, vbox_crtc, encoder); if (ret) goto err_drm_mode_cleanup; } drm_mode_config_reset(dev); return 0; err_drm_mode_cleanup: drm_mode_config_cleanup(dev); return ret; } void vbox_mode_fini(struct vbox_private *vbox) { drm_mode_config_cleanup(&vbox->ddev); }
linux-master
drivers/gpu/drm/vboxvideo/vbox_mode.c
// SPDX-License-Identifier: MIT /* Copyright (C) 2006-2017 Oracle Corporation */ #include <linux/vbox_err.h> #include "vbox_drv.h" #include "vboxvideo_guest.h" #include "vboxvideo_vbe.h" #include "hgsmi_channels.h" /** * hgsmi_process_display_info - Set a video mode via an HGSMI request. * The views must have been initialised first * using @a VBoxHGSMISendViewInfo and if the mode * is being set on the first display then it must * be set first using registers. * @ctx: The context containing the heap to use. * @display: The screen number. * @origin_x: The horizontal displacement relative to the first scrn. * @origin_y: The vertical displacement relative to the first screen. * @start_offset: The offset of the visible area of the framebuffer * relative to the framebuffer start. * @pitch: The offset in bytes between the starts of two adjecent * scan lines in video RAM. * @width: The mode width. * @height: The mode height. * @bpp: The colour depth of the mode. * @flags: Flags. */ void hgsmi_process_display_info(struct gen_pool *ctx, u32 display, s32 origin_x, s32 origin_y, u32 start_offset, u32 pitch, u32 width, u32 height, u16 bpp, u16 flags) { struct vbva_infoscreen *p; p = hgsmi_buffer_alloc(ctx, sizeof(*p), HGSMI_CH_VBVA, VBVA_INFO_SCREEN); if (!p) return; p->view_index = display; p->origin_x = origin_x; p->origin_y = origin_y; p->start_offset = start_offset; p->line_size = pitch; p->width = width; p->height = height; p->bits_per_pixel = bpp; p->flags = flags; hgsmi_buffer_submit(ctx, p); hgsmi_buffer_free(ctx, p); } /** * hgsmi_update_input_mapping - Report the rectangle relative to which absolute * pointer events should be expressed. This * information remains valid until the next VBVA * resize event for any screen, at which time it is * reset to the bounding rectangle of all virtual * screens. * Return: 0 or negative errno value. * @ctx: The context containing the heap to use. * @origin_x: Upper left X co-ordinate relative to the first screen. * @origin_y: Upper left Y co-ordinate relative to the first screen. * @width: Rectangle width. * @height: Rectangle height. */ int hgsmi_update_input_mapping(struct gen_pool *ctx, s32 origin_x, s32 origin_y, u32 width, u32 height) { struct vbva_report_input_mapping *p; p = hgsmi_buffer_alloc(ctx, sizeof(*p), HGSMI_CH_VBVA, VBVA_REPORT_INPUT_MAPPING); if (!p) return -ENOMEM; p->x = origin_x; p->y = origin_y; p->cx = width; p->cy = height; hgsmi_buffer_submit(ctx, p); hgsmi_buffer_free(ctx, p); return 0; } /** * hgsmi_get_mode_hints - Get most recent video mode hints. * Return: 0 or negative errno value. * @ctx: The context containing the heap to use. * @screens: The number of screens to query hints for, starting at 0. * @hints: Array of vbva_modehint structures for receiving the hints. */ int hgsmi_get_mode_hints(struct gen_pool *ctx, unsigned int screens, struct vbva_modehint *hints) { struct vbva_query_mode_hints *p; size_t size; if (WARN_ON(!hints)) return -EINVAL; size = screens * sizeof(struct vbva_modehint); p = hgsmi_buffer_alloc(ctx, sizeof(*p) + size, HGSMI_CH_VBVA, VBVA_QUERY_MODE_HINTS); if (!p) return -ENOMEM; p->hints_queried_count = screens; p->hint_structure_guest_size = sizeof(struct vbva_modehint); p->rc = VERR_NOT_SUPPORTED; hgsmi_buffer_submit(ctx, p); if (p->rc < 0) { hgsmi_buffer_free(ctx, p); return -EIO; } memcpy(hints, ((u8 *)p) + sizeof(struct vbva_query_mode_hints), size); hgsmi_buffer_free(ctx, p); return 0; }
linux-master
drivers/gpu/drm/vboxvideo/modesetting.c
// SPDX-License-Identifier: MIT /* * Copyright (C) 2017 Oracle Corporation * Authors: Hans de Goede <[email protected]> */ #include "vbox_drv.h" #include "vboxvideo_vbe.h" #include "hgsmi_defs.h" /* One-at-a-Time Hash from https://www.burtleburtle.net/bob/hash/doobs.html */ static u32 hgsmi_hash_process(u32 hash, const u8 *data, int size) { while (size--) { hash += *data++; hash += (hash << 10); hash ^= (hash >> 6); } return hash; } static u32 hgsmi_hash_end(u32 hash) { hash += (hash << 3); hash ^= (hash >> 11); hash += (hash << 15); return hash; } /* Not really a checksum but that is the naming used in all vbox code */ static u32 hgsmi_checksum(u32 offset, const struct hgsmi_buffer_header *header, const struct hgsmi_buffer_tail *tail) { u32 checksum; checksum = hgsmi_hash_process(0, (u8 *)&offset, sizeof(offset)); checksum = hgsmi_hash_process(checksum, (u8 *)header, sizeof(*header)); /* 4 -> Do not checksum the checksum itself */ checksum = hgsmi_hash_process(checksum, (u8 *)tail, 4); return hgsmi_hash_end(checksum); } void *hgsmi_buffer_alloc(struct gen_pool *guest_pool, size_t size, u8 channel, u16 channel_info) { struct hgsmi_buffer_header *h; struct hgsmi_buffer_tail *t; size_t total_size; dma_addr_t offset; total_size = size + sizeof(*h) + sizeof(*t); h = gen_pool_dma_alloc(guest_pool, total_size, &offset); if (!h) return NULL; t = (struct hgsmi_buffer_tail *)((u8 *)h + sizeof(*h) + size); h->flags = HGSMI_BUFFER_HEADER_F_SEQ_SINGLE; h->data_size = size; h->channel = channel; h->channel_info = channel_info; memset(&h->u.header_data, 0, sizeof(h->u.header_data)); t->reserved = 0; t->checksum = hgsmi_checksum(offset, h, t); return (u8 *)h + sizeof(*h); } void hgsmi_buffer_free(struct gen_pool *guest_pool, void *buf) { struct hgsmi_buffer_header *h = (struct hgsmi_buffer_header *)((u8 *)buf - sizeof(*h)); size_t total_size = h->data_size + sizeof(*h) + sizeof(struct hgsmi_buffer_tail); gen_pool_free(guest_pool, (unsigned long)h, total_size); } int hgsmi_buffer_submit(struct gen_pool *guest_pool, void *buf) { phys_addr_t offset; offset = gen_pool_virt_to_phys(guest_pool, (unsigned long)buf - sizeof(struct hgsmi_buffer_header)); outl(offset, VGA_PORT_HGSMI_GUEST); /* Make the compiler aware that the host has changed memory. */ mb(); return 0; }
linux-master
drivers/gpu/drm/vboxvideo/vbox_hgsmi.c
// SPDX-License-Identifier: MIT /* Copyright (C) 2006-2017 Oracle Corporation */ #include <linux/vbox_err.h> #include "vbox_drv.h" #include "vboxvideo_guest.h" #include "vboxvideo_vbe.h" #include "hgsmi_channels.h" #include "hgsmi_ch_setup.h" /** * hgsmi_report_flags_location - Inform the host of the location of * the host flags in VRAM via an HGSMI cmd. * Return: 0 or negative errno value. * @ctx: The context of the guest heap to use. * @location: The offset chosen for the flags within guest VRAM. */ int hgsmi_report_flags_location(struct gen_pool *ctx, u32 location) { struct hgsmi_buffer_location *p; p = hgsmi_buffer_alloc(ctx, sizeof(*p), HGSMI_CH_HGSMI, HGSMI_CC_HOST_FLAGS_LOCATION); if (!p) return -ENOMEM; p->buf_location = location; p->buf_len = sizeof(struct hgsmi_host_flags); hgsmi_buffer_submit(ctx, p); hgsmi_buffer_free(ctx, p); return 0; } /** * hgsmi_send_caps_info - Notify the host of HGSMI-related guest capabilities * via an HGSMI command. * Return: 0 or negative errno value. * @ctx: The context of the guest heap to use. * @caps: The capabilities to report, see vbva_caps. */ int hgsmi_send_caps_info(struct gen_pool *ctx, u32 caps) { struct vbva_caps *p; p = hgsmi_buffer_alloc(ctx, sizeof(*p), HGSMI_CH_VBVA, VBVA_INFO_CAPS); if (!p) return -ENOMEM; p->rc = VERR_NOT_IMPLEMENTED; p->caps = caps; hgsmi_buffer_submit(ctx, p); WARN_ON_ONCE(p->rc < 0); hgsmi_buffer_free(ctx, p); return 0; } int hgsmi_test_query_conf(struct gen_pool *ctx) { u32 value = 0; int ret; ret = hgsmi_query_conf(ctx, U32_MAX, &value); if (ret) return ret; return value == U32_MAX ? 0 : -EIO; } /** * hgsmi_query_conf - Query the host for an HGSMI configuration * parameter via an HGSMI command. * Return: 0 or negative errno value. * @ctx: The context containing the heap used. * @index: The index of the parameter to query. * @value_ret: Where to store the value of the parameter on success. */ int hgsmi_query_conf(struct gen_pool *ctx, u32 index, u32 *value_ret) { struct vbva_conf32 *p; p = hgsmi_buffer_alloc(ctx, sizeof(*p), HGSMI_CH_VBVA, VBVA_QUERY_CONF32); if (!p) return -ENOMEM; p->index = index; p->value = U32_MAX; hgsmi_buffer_submit(ctx, p); *value_ret = p->value; hgsmi_buffer_free(ctx, p); return 0; } /** * hgsmi_update_pointer_shape - Pass the host a new mouse pointer shape * via an HGSMI command. * Return: 0 or negative errno value. * @ctx: The context containing the heap to be used. * @flags: Cursor flags. * @hot_x: Horizontal position of the hot spot. * @hot_y: Vertical position of the hot spot. * @width: Width in pixels of the cursor. * @height: Height in pixels of the cursor. * @pixels: Pixel data, @see VMMDevReqMousePointer for the format. * @len: Size in bytes of the pixel data. */ int hgsmi_update_pointer_shape(struct gen_pool *ctx, u32 flags, u32 hot_x, u32 hot_y, u32 width, u32 height, u8 *pixels, u32 len) { struct vbva_mouse_pointer_shape *p; u32 pixel_len = 0; int rc; if (flags & VBOX_MOUSE_POINTER_SHAPE) { /* * Size of the pointer data: * sizeof (AND mask) + sizeof (XOR_MASK) */ pixel_len = ((((width + 7) / 8) * height + 3) & ~3) + width * 4 * height; if (pixel_len > len) return -EINVAL; /* * If shape is supplied, then always create the pointer visible. * See comments in 'vboxUpdatePointerShape' */ flags |= VBOX_MOUSE_POINTER_VISIBLE; } p = hgsmi_buffer_alloc(ctx, sizeof(*p) + pixel_len, HGSMI_CH_VBVA, VBVA_MOUSE_POINTER_SHAPE); if (!p) return -ENOMEM; p->result = VINF_SUCCESS; p->flags = flags; p->hot_X = hot_x; p->hot_y = hot_y; p->width = width; p->height = height; if (pixel_len) memcpy(p->data, pixels, pixel_len); hgsmi_buffer_submit(ctx, p); switch (p->result) { case VINF_SUCCESS: rc = 0; break; case VERR_NO_MEMORY: rc = -ENOMEM; break; case VERR_NOT_SUPPORTED: rc = -EBUSY; break; default: rc = -EINVAL; } hgsmi_buffer_free(ctx, p); return rc; } /** * hgsmi_cursor_position - Report the guest cursor position. The host may * wish to use this information to re-position its * own cursor (though this is currently unlikely). * The current host cursor position is returned. * Return: 0 or negative errno value. * @ctx: The context containing the heap used. * @report_position: Are we reporting a position? * @x: Guest cursor X position. * @y: Guest cursor Y position. * @x_host: Host cursor X position is stored here. Optional. * @y_host: Host cursor Y position is stored here. Optional. */ int hgsmi_cursor_position(struct gen_pool *ctx, bool report_position, u32 x, u32 y, u32 *x_host, u32 *y_host) { struct vbva_cursor_position *p; p = hgsmi_buffer_alloc(ctx, sizeof(*p), HGSMI_CH_VBVA, VBVA_CURSOR_POSITION); if (!p) return -ENOMEM; p->report_position = report_position; p->x = x; p->y = y; hgsmi_buffer_submit(ctx, p); *x_host = p->x; *y_host = p->y; hgsmi_buffer_free(ctx, p); return 0; }
linux-master
drivers/gpu/drm/vboxvideo/hgsmi_base.c
// SPDX-License-Identifier: GPL-2.0 /* * Copyright (C) 2018 Texas Instruments Incorporated - https://www.ti.com/ * Author: Tomi Valkeinen <[email protected]> */ #include <linux/platform_device.h> #include <drm/drm_drv.h> #include <drm/drm_print.h> #include "tidss_crtc.h" #include "tidss_dispc.h" #include "tidss_drv.h" #include "tidss_irq.h" #include "tidss_plane.h" /* call with wait_lock and dispc runtime held */ static void tidss_irq_update(struct tidss_device *tidss) { assert_spin_locked(&tidss->wait_lock); dispc_set_irqenable(tidss->dispc, tidss->irq_mask); } void tidss_irq_enable_vblank(struct drm_crtc *crtc) { struct drm_device *ddev = crtc->dev; struct tidss_device *tidss = to_tidss(ddev); struct tidss_crtc *tcrtc = to_tidss_crtc(crtc); u32 hw_videoport = tcrtc->hw_videoport; unsigned long flags; spin_lock_irqsave(&tidss->wait_lock, flags); tidss->irq_mask |= DSS_IRQ_VP_VSYNC_EVEN(hw_videoport) | DSS_IRQ_VP_VSYNC_ODD(hw_videoport); tidss_irq_update(tidss); spin_unlock_irqrestore(&tidss->wait_lock, flags); } void tidss_irq_disable_vblank(struct drm_crtc *crtc) { struct drm_device *ddev = crtc->dev; struct tidss_device *tidss = to_tidss(ddev); struct tidss_crtc *tcrtc = to_tidss_crtc(crtc); u32 hw_videoport = tcrtc->hw_videoport; unsigned long flags; spin_lock_irqsave(&tidss->wait_lock, flags); tidss->irq_mask &= ~(DSS_IRQ_VP_VSYNC_EVEN(hw_videoport) | DSS_IRQ_VP_VSYNC_ODD(hw_videoport)); tidss_irq_update(tidss); spin_unlock_irqrestore(&tidss->wait_lock, flags); } static irqreturn_t tidss_irq_handler(int irq, void *arg) { struct drm_device *ddev = (struct drm_device *)arg; struct tidss_device *tidss = to_tidss(ddev); unsigned int id; dispc_irq_t irqstatus; irqstatus = dispc_read_and_clear_irqstatus(tidss->dispc); for (id = 0; id < tidss->num_crtcs; id++) { struct drm_crtc *crtc = tidss->crtcs[id]; struct tidss_crtc *tcrtc = to_tidss_crtc(crtc); u32 hw_videoport = tcrtc->hw_videoport; if (irqstatus & (DSS_IRQ_VP_VSYNC_EVEN(hw_videoport) | DSS_IRQ_VP_VSYNC_ODD(hw_videoport))) tidss_crtc_vblank_irq(crtc); if (irqstatus & (DSS_IRQ_VP_FRAME_DONE(hw_videoport))) tidss_crtc_framedone_irq(crtc); if (irqstatus & DSS_IRQ_VP_SYNC_LOST(hw_videoport)) tidss_crtc_error_irq(crtc, irqstatus); } if (irqstatus & DSS_IRQ_DEVICE_OCP_ERR) dev_err_ratelimited(tidss->dev, "OCP error\n"); return IRQ_HANDLED; } void tidss_irq_resume(struct tidss_device *tidss) { unsigned long flags; spin_lock_irqsave(&tidss->wait_lock, flags); tidss_irq_update(tidss); spin_unlock_irqrestore(&tidss->wait_lock, flags); } static void tidss_irq_preinstall(struct drm_device *ddev) { struct tidss_device *tidss = to_tidss(ddev); spin_lock_init(&tidss->wait_lock); tidss_runtime_get(tidss); dispc_set_irqenable(tidss->dispc, 0); dispc_read_and_clear_irqstatus(tidss->dispc); tidss_runtime_put(tidss); } static void tidss_irq_postinstall(struct drm_device *ddev) { struct tidss_device *tidss = to_tidss(ddev); unsigned long flags; unsigned int i; tidss_runtime_get(tidss); spin_lock_irqsave(&tidss->wait_lock, flags); tidss->irq_mask = DSS_IRQ_DEVICE_OCP_ERR; for (i = 0; i < tidss->num_crtcs; ++i) { struct tidss_crtc *tcrtc = to_tidss_crtc(tidss->crtcs[i]); tidss->irq_mask |= DSS_IRQ_VP_SYNC_LOST(tcrtc->hw_videoport); tidss->irq_mask |= DSS_IRQ_VP_FRAME_DONE(tcrtc->hw_videoport); } tidss_irq_update(tidss); spin_unlock_irqrestore(&tidss->wait_lock, flags); tidss_runtime_put(tidss); } int tidss_irq_install(struct drm_device *ddev, unsigned int irq) { int ret; if (irq == IRQ_NOTCONNECTED) return -ENOTCONN; tidss_irq_preinstall(ddev); ret = request_irq(irq, tidss_irq_handler, 0, ddev->driver->name, ddev); if (ret) return ret; tidss_irq_postinstall(ddev); return 0; } void tidss_irq_uninstall(struct drm_device *ddev) { struct tidss_device *tidss = to_tidss(ddev); tidss_runtime_get(tidss); dispc_set_irqenable(tidss->dispc, 0); tidss_runtime_put(tidss); free_irq(tidss->irq, ddev); }
linux-master
drivers/gpu/drm/tidss/tidss_irq.c
// SPDX-License-Identifier: GPL-2.0 /* * Copyright (C) 2018 Texas Instruments Incorporated - https://www.ti.com/ * Author: Jyri Sarha <[email protected]> */ #include <linux/device.h> #include <linux/kernel.h> #include "tidss_scale_coefs.h" /* * These are interpolated with a custom python script from DSS5 * (drivers/gpu/drm/omapdrm/dss/dispc_coef.c) coefficients. */ static const struct tidss_scale_coefs coef5_m32 = { .c2 = { 28, 34, 40, 46, 52, 58, 64, 70, 0, 2, 4, 8, 12, 16, 20, 24, }, .c1 = { 132, 138, 144, 150, 156, 162, 168, 174, 76, 84, 92, 98, 104, 110, 116, 124, }, .c0 = { 192, 192, 192, 190, 188, 186, 184, 182, 180, }, }; static const struct tidss_scale_coefs coef5_m26 = { .c2 = { 24, 28, 32, 38, 44, 50, 56, 64, 0, 2, 4, 6, 8, 12, 16, 20, }, .c1 = { 132, 138, 144, 152, 160, 166, 172, 178, 72, 80, 88, 94, 100, 108, 116, 124, }, .c0 = { 200, 202, 204, 202, 200, 196, 192, 188, 184, }, }; static const struct tidss_scale_coefs coef5_m22 = { .c2 = { 16, 20, 24, 30, 36, 42, 48, 56, 0, 0, 0, 2, 4, 8, 12, 14, }, .c1 = { 132, 140, 148, 156, 164, 172, 180, 186, 64, 72, 80, 88, 96, 104, 112, 122, }, .c0 = { 216, 216, 216, 214, 212, 208, 204, 198, 192, }, }; static const struct tidss_scale_coefs coef5_m19 = { .c2 = { 12, 14, 16, 22, 28, 34, 40, 48, 0, 0, 0, 2, 4, 4, 4, 8, }, .c1 = { 128, 140, 152, 160, 168, 176, 184, 192, 56, 64, 72, 82, 92, 100, 108, 118, }, .c0 = { 232, 232, 232, 226, 220, 218, 216, 208, 200, }, }; static const struct tidss_scale_coefs coef5_m16 = { .c2 = { 0, 2, 4, 8, 12, 18, 24, 32, 0, 0, 0, -2, -4, -4, -4, -2, }, .c1 = { 124, 138, 152, 164, 176, 186, 196, 206, 40, 48, 56, 68, 80, 90, 100, 112, }, .c0 = { 264, 262, 260, 254, 248, 242, 236, 226, 216, }, }; static const struct tidss_scale_coefs coef5_m14 = { .c2 = { -8, -6, -4, -2, 0, 6, 12, 18, 0, -2, -4, -6, -8, -8, -8, -8, }, .c1 = { 120, 134, 148, 164, 180, 194, 208, 220, 24, 32, 40, 52, 64, 78, 92, 106, }, .c0 = { 288, 286, 284, 280, 276, 266, 256, 244, 232, }, }; static const struct tidss_scale_coefs coef5_m13 = { .c2 = { -12, -12, -12, -10, -8, -4, 0, 6, 0, -2, -4, -6, -8, -10, -12, -12, }, .c1 = { 112, 130, 148, 164, 180, 196, 212, 228, 12, 22, 32, 44, 56, 70, 84, 98, }, .c0 = { 312, 308, 304, 298, 292, 282, 272, 258, 244, }, }; static const struct tidss_scale_coefs coef5_m12 = { .c2 = { -16, -18, -20, -18, -16, -14, -12, -6, 0, -2, -4, -6, -8, -10, -12, -14, }, .c1 = { 104, 124, 144, 164, 184, 202, 220, 238, 0, 10, 20, 30, 40, 56, 72, 88, }, .c0 = { 336, 332, 328, 320, 312, 300, 288, 272, 256, }, }; static const struct tidss_scale_coefs coef5_m11 = { .c2 = { -20, -22, -24, -24, -24, -24, -24, -20, 0, -2, -4, -6, -8, -10, -12, -16, }, .c1 = { 92, 114, 136, 158, 180, 204, 228, 250, -16, -8, 0, 12, 24, 38, 52, 72, }, .c0 = { 368, 364, 360, 350, 340, 326, 312, 292, 272, }, }; static const struct tidss_scale_coefs coef5_m10 = { .c2 = { -16, -20, -24, -28, -32, -34, -36, -34, 0, 0, 0, -2, -4, -8, -12, -14, }, .c1 = { 72, 96, 120, 148, 176, 204, 232, 260, -32, -26, -20, -10, 0, 16, 32, 52, }, .c0 = { 400, 398, 396, 384, 372, 354, 336, 312, 288, }, }; static const struct tidss_scale_coefs coef5_m9 = { .c2 = { -12, -18, -24, -28, -32, -38, -44, -46, 0, 2, 4, 2, 0, -2, -4, -8, }, .c1 = { 40, 68, 96, 128, 160, 196, 232, 268, -48, -46, -44, -36, -28, -14, 0, 20, }, .c0 = { 456, 450, 444, 428, 412, 388, 364, 334, 304, }, }; static const struct tidss_scale_coefs coef5_m8 = { .c2 = { 0, -4, -8, -16, -24, -32, -40, -48, 0, 2, 4, 6, 8, 6, 4, 2, }, .c1 = { 0, 28, 56, 94, 132, 176, 220, 266, -56, -60, -64, -62, -60, -50, -40, -20, }, .c0 = { 512, 506, 500, 478, 456, 424, 392, 352, 312, }, }; static const struct tidss_scale_coefs coef3_m32 = { .c1 = { 108, 92, 76, 62, 48, 36, 24, 140, 256, 236, 216, 198, 180, 162, 144, 126, }, .c0 = { 296, 294, 292, 288, 284, 278, 272, 136, 256, }, }; static const struct tidss_scale_coefs coef3_m26 = { .c1 = { 104, 90, 76, 60, 44, 32, 20, 138, 256, 236, 216, 198, 180, 160, 140, 122, }, .c0 = { 304, 300, 296, 292, 288, 282, 276, 138, 256, }, }; static const struct tidss_scale_coefs coef3_m22 = { .c1 = { 100, 84, 68, 54, 40, 30, 20, 138, 256, 236, 216, 196, 176, 156, 136, 118, }, .c0 = { 312, 310, 308, 302, 296, 286, 276, 138, 256, }, }; static const struct tidss_scale_coefs coef3_m19 = { .c1 = { 96, 80, 64, 50, 36, 26, 16, 136, 256, 236, 216, 194, 172, 152, 132, 114, }, .c0 = { 320, 318, 316, 310, 304, 292, 280, 140, 256, }, }; static const struct tidss_scale_coefs coef3_m16 = { .c1 = { 88, 72, 56, 44, 32, 22, 12, 134, 256, 234, 212, 190, 168, 148, 128, 108, }, .c0 = { 336, 332, 328, 320, 312, 300, 288, 144, 256, }, }; static const struct tidss_scale_coefs coef3_m14 = { .c1 = { 80, 64, 48, 36, 24, 16, 8, 132, 256, 232, 208, 186, 164, 142, 120, 100, }, .c0 = { 352, 348, 344, 334, 324, 310, 296, 148, 256, }, }; static const struct tidss_scale_coefs coef3_m13 = { .c1 = { 72, 56, 40, 30, 20, 12, 4, 130, 256, 232, 208, 184, 160, 136, 112, 92, }, .c0 = { 368, 364, 360, 346, 332, 316, 300, 150, 256, }, }; static const struct tidss_scale_coefs coef3_m12 = { .c1 = { 64, 50, 36, 26, 16, 10, 4, 130, 256, 230, 204, 178, 152, 128, 104, 84, }, .c0 = { 384, 378, 372, 358, 344, 324, 304, 152, 256, }, }; static const struct tidss_scale_coefs coef3_m11 = { .c1 = { 56, 40, 24, 16, 8, 4, 0, 128, 256, 228, 200, 172, 144, 120, 96, 76, }, .c0 = { 400, 396, 392, 376, 360, 336, 312, 156, 256, }, }; static const struct tidss_scale_coefs coef3_m10 = { .c1 = { 40, 26, 12, 6, 0, -2, -4, 126, 256, 226, 196, 166, 136, 110, 84, 62, }, .c0 = { 432, 424, 416, 396, 376, 348, 320, 160, 256, }, }; static const struct tidss_scale_coefs coef3_m9 = { .c1 = { 24, 12, 0, -4, -8, -8, -8, 124, 256, 222, 188, 154, 120, 92, 64, 44, }, .c0 = { 464, 456, 448, 424, 400, 366, 332, 166, 256, }, }; static const struct tidss_scale_coefs coef3_m8 = { .c1 = { 0, -8, -16, -16, -16, -12, -8, 124, 256, 214, 172, 134, 96, 66, 36, 18, }, .c0 = { 512, 502, 492, 462, 432, 390, 348, 174, 256, }, }; const struct tidss_scale_coefs *tidss_get_scale_coefs(struct device *dev, u32 firinc, bool five_taps) { int i; int inc; static const struct { int mmin; int mmax; const struct tidss_scale_coefs *coef3; const struct tidss_scale_coefs *coef5; const char *name; } coefs[] = { { 27, 32, &coef3_m32, &coef5_m32, "M32" }, { 23, 26, &coef3_m26, &coef5_m26, "M26" }, { 20, 22, &coef3_m22, &coef5_m22, "M22" }, { 17, 19, &coef3_m19, &coef5_m19, "M19" }, { 15, 16, &coef3_m16, &coef5_m16, "M16" }, { 14, 14, &coef3_m14, &coef5_m14, "M14" }, { 13, 13, &coef3_m13, &coef5_m13, "M13" }, { 12, 12, &coef3_m12, &coef5_m12, "M12" }, { 11, 11, &coef3_m11, &coef5_m11, "M11" }, { 10, 10, &coef3_m10, &coef5_m10, "M10" }, { 9, 9, &coef3_m9, &coef5_m9, "M9" }, { 4, 8, &coef3_m8, &coef5_m8, "M8" }, /* * When upscaling more than two times, blockiness and outlines * around the image are observed when M8 tables are used. M11, * M16 and M19 tables are used to prevent this. */ { 3, 3, &coef3_m11, &coef5_m11, "M11" }, { 2, 2, &coef3_m16, &coef5_m16, "M16" }, { 0, 1, &coef3_m19, &coef5_m19, "M19" }, }; /* * inc is result of 0x200000 * in_size / out_size. This dividing * by 0x40000 scales it down to 8 * in_size / out_size. After * division the actual scaling factor is 8/inc. */ inc = firinc / 0x40000; for (i = 0; i < ARRAY_SIZE(coefs); ++i) { if (inc >= coefs[i].mmin && inc <= coefs[i].mmax) { if (five_taps) return coefs[i].coef5; else return coefs[i].coef3; } } dev_err(dev, "%s: Coefficients not found for firinc 0x%08x, inc %d\n", __func__, firinc, inc); return NULL; }
linux-master
drivers/gpu/drm/tidss/tidss_scale_coefs.c
// SPDX-License-Identifier: GPL-2.0 /* * Copyright (C) 2018 Texas Instruments Incorporated - https://www.ti.com/ * Author: Tomi Valkeinen <[email protected]> */ #include <linux/dma-fence.h> #include <drm/drm_atomic.h> #include <drm/drm_atomic_helper.h> #include <drm/drm_bridge.h> #include <drm/drm_gem_framebuffer_helper.h> #include <drm/drm_of.h> #include <drm/drm_panel.h> #include <drm/drm_vblank.h> #include "tidss_crtc.h" #include "tidss_dispc.h" #include "tidss_drv.h" #include "tidss_encoder.h" #include "tidss_kms.h" #include "tidss_plane.h" static void tidss_atomic_commit_tail(struct drm_atomic_state *old_state) { struct drm_device *ddev = old_state->dev; struct tidss_device *tidss = to_tidss(ddev); bool fence_cookie = dma_fence_begin_signalling(); dev_dbg(ddev->dev, "%s\n", __func__); tidss_runtime_get(tidss); drm_atomic_helper_commit_modeset_disables(ddev, old_state); drm_atomic_helper_commit_planes(ddev, old_state, 0); drm_atomic_helper_commit_modeset_enables(ddev, old_state); drm_atomic_helper_commit_hw_done(old_state); dma_fence_end_signalling(fence_cookie); drm_atomic_helper_wait_for_flip_done(ddev, old_state); drm_atomic_helper_cleanup_planes(ddev, old_state); tidss_runtime_put(tidss); } static const struct drm_mode_config_helper_funcs mode_config_helper_funcs = { .atomic_commit_tail = tidss_atomic_commit_tail, }; static int tidss_atomic_check(struct drm_device *ddev, struct drm_atomic_state *state) { struct drm_plane_state *opstate; struct drm_plane_state *npstate; struct drm_plane *plane; struct drm_crtc_state *cstate; struct drm_crtc *crtc; int ret, i; ret = drm_atomic_helper_check(ddev, state); if (ret) return ret; /* * Add all active planes on a CRTC to the atomic state, if * x/y/z position or activity of any plane on that CRTC * changes. This is needed for updating the plane positions in * tidss_crtc_position_planes() which is called from * crtc_atomic_enable() and crtc_atomic_flush(). We have an * extra flag to mark x,y-position changes and together * with zpos_changed the condition recognizes all the above * cases. */ for_each_oldnew_plane_in_state(state, plane, opstate, npstate, i) { if (!npstate->crtc || !npstate->visible) continue; if (!opstate->crtc || opstate->crtc_x != npstate->crtc_x || opstate->crtc_y != npstate->crtc_y) { cstate = drm_atomic_get_crtc_state(state, npstate->crtc); if (IS_ERR(cstate)) return PTR_ERR(cstate); to_tidss_crtc_state(cstate)->plane_pos_changed = true; } } for_each_new_crtc_in_state(state, crtc, cstate, i) { if (to_tidss_crtc_state(cstate)->plane_pos_changed || cstate->zpos_changed) { ret = drm_atomic_add_affected_planes(state, crtc); if (ret) return ret; } } return 0; } static const struct drm_mode_config_funcs mode_config_funcs = { .fb_create = drm_gem_fb_create, .atomic_check = tidss_atomic_check, .atomic_commit = drm_atomic_helper_commit, }; static int tidss_dispc_modeset_init(struct tidss_device *tidss) { struct device *dev = tidss->dev; unsigned int fourccs_len; const u32 *fourccs = dispc_plane_formats(tidss->dispc, &fourccs_len); unsigned int i; struct pipe { u32 hw_videoport; struct drm_bridge *bridge; u32 enc_type; }; const struct dispc_features *feat = tidss->feat; u32 max_vps = feat->num_vps; u32 max_planes = feat->num_planes; struct pipe pipes[TIDSS_MAX_PORTS]; u32 num_pipes = 0; u32 crtc_mask; /* first find all the connected panels & bridges */ for (i = 0; i < max_vps; i++) { struct drm_panel *panel; struct drm_bridge *bridge; u32 enc_type = DRM_MODE_ENCODER_NONE; int ret; ret = drm_of_find_panel_or_bridge(dev->of_node, i, 0, &panel, &bridge); if (ret == -ENODEV) { dev_dbg(dev, "no panel/bridge for port %d\n", i); continue; } else if (ret) { dev_dbg(dev, "port %d probe returned %d\n", i, ret); return ret; } if (panel) { u32 conn_type; dev_dbg(dev, "Setting up panel for port %d\n", i); switch (feat->vp_bus_type[i]) { case DISPC_VP_OLDI: enc_type = DRM_MODE_ENCODER_LVDS; conn_type = DRM_MODE_CONNECTOR_LVDS; break; case DISPC_VP_DPI: enc_type = DRM_MODE_ENCODER_DPI; conn_type = DRM_MODE_CONNECTOR_DPI; break; default: WARN_ON(1); return -EINVAL; } if (panel->connector_type != conn_type) { dev_err(dev, "%s: Panel %s has incompatible connector type for vp%d (%d != %d)\n", __func__, dev_name(panel->dev), i, panel->connector_type, conn_type); return -EINVAL; } bridge = devm_drm_panel_bridge_add(dev, panel); if (IS_ERR(bridge)) { dev_err(dev, "failed to set up panel bridge for port %d\n", i); return PTR_ERR(bridge); } } pipes[num_pipes].hw_videoport = i; pipes[num_pipes].bridge = bridge; pipes[num_pipes].enc_type = enc_type; num_pipes++; } /* all planes can be on any crtc */ crtc_mask = (1 << num_pipes) - 1; /* then create a plane, a crtc and an encoder for each panel/bridge */ for (i = 0; i < num_pipes; ++i) { struct tidss_plane *tplane; struct tidss_crtc *tcrtc; u32 hw_plane_id = feat->vid_order[tidss->num_planes]; int ret; tplane = tidss_plane_create(tidss, hw_plane_id, DRM_PLANE_TYPE_PRIMARY, crtc_mask, fourccs, fourccs_len); if (IS_ERR(tplane)) { dev_err(tidss->dev, "plane create failed\n"); return PTR_ERR(tplane); } tidss->planes[tidss->num_planes++] = &tplane->plane; tcrtc = tidss_crtc_create(tidss, pipes[i].hw_videoport, &tplane->plane); if (IS_ERR(tcrtc)) { dev_err(tidss->dev, "crtc create failed\n"); return PTR_ERR(tcrtc); } tidss->crtcs[tidss->num_crtcs++] = &tcrtc->crtc; ret = tidss_encoder_create(tidss, pipes[i].bridge, pipes[i].enc_type, 1 << tcrtc->crtc.index); if (ret) { dev_err(tidss->dev, "encoder create failed\n"); return ret; } } /* create overlay planes of the leftover planes */ while (tidss->num_planes < max_planes) { struct tidss_plane *tplane; u32 hw_plane_id = feat->vid_order[tidss->num_planes]; tplane = tidss_plane_create(tidss, hw_plane_id, DRM_PLANE_TYPE_OVERLAY, crtc_mask, fourccs, fourccs_len); if (IS_ERR(tplane)) { dev_err(tidss->dev, "plane create failed\n"); return PTR_ERR(tplane); } tidss->planes[tidss->num_planes++] = &tplane->plane; } return 0; } int tidss_modeset_init(struct tidss_device *tidss) { struct drm_device *ddev = &tidss->ddev; int ret; dev_dbg(tidss->dev, "%s\n", __func__); ret = drmm_mode_config_init(ddev); if (ret) return ret; ddev->mode_config.min_width = 8; ddev->mode_config.min_height = 8; ddev->mode_config.max_width = 8096; ddev->mode_config.max_height = 8096; ddev->mode_config.normalize_zpos = true; ddev->mode_config.funcs = &mode_config_funcs; ddev->mode_config.helper_private = &mode_config_helper_funcs; ret = tidss_dispc_modeset_init(tidss); if (ret) return ret; ret = drm_vblank_init(ddev, tidss->num_crtcs); if (ret) return ret; drm_mode_config_reset(ddev); dev_dbg(tidss->dev, "%s done\n", __func__); return 0; }
linux-master
drivers/gpu/drm/tidss/tidss_kms.c
// SPDX-License-Identifier: GPL-2.0 /* * Copyright (C) 2018 Texas Instruments Incorporated - https://www.ti.com/ * Author: Tomi Valkeinen <[email protected]> */ #include <linux/console.h> #include <linux/of.h> #include <linux/module.h> #include <linux/pm_runtime.h> #include <drm/drm_atomic.h> #include <drm/drm_atomic_helper.h> #include <drm/drm_crtc.h> #include <drm/drm_drv.h> #include <drm/drm_fbdev_dma.h> #include <drm/drm_gem_dma_helper.h> #include <drm/drm_managed.h> #include <drm/drm_module.h> #include <drm/drm_probe_helper.h> #include "tidss_dispc.h" #include "tidss_drv.h" #include "tidss_kms.h" #include "tidss_irq.h" /* Power management */ int tidss_runtime_get(struct tidss_device *tidss) { int r; dev_dbg(tidss->dev, "%s\n", __func__); r = pm_runtime_get_sync(tidss->dev); WARN_ON(r < 0); return r < 0 ? r : 0; } void tidss_runtime_put(struct tidss_device *tidss) { int r; dev_dbg(tidss->dev, "%s\n", __func__); r = pm_runtime_put_sync(tidss->dev); WARN_ON(r < 0); } static int __maybe_unused tidss_pm_runtime_suspend(struct device *dev) { struct tidss_device *tidss = dev_get_drvdata(dev); dev_dbg(dev, "%s\n", __func__); return dispc_runtime_suspend(tidss->dispc); } static int __maybe_unused tidss_pm_runtime_resume(struct device *dev) { struct tidss_device *tidss = dev_get_drvdata(dev); int r; dev_dbg(dev, "%s\n", __func__); r = dispc_runtime_resume(tidss->dispc); if (r) return r; return 0; } static int __maybe_unused tidss_suspend(struct device *dev) { struct tidss_device *tidss = dev_get_drvdata(dev); dev_dbg(dev, "%s\n", __func__); return drm_mode_config_helper_suspend(&tidss->ddev); } static int __maybe_unused tidss_resume(struct device *dev) { struct tidss_device *tidss = dev_get_drvdata(dev); dev_dbg(dev, "%s\n", __func__); return drm_mode_config_helper_resume(&tidss->ddev); } static __maybe_unused const struct dev_pm_ops tidss_pm_ops = { SET_SYSTEM_SLEEP_PM_OPS(tidss_suspend, tidss_resume) SET_RUNTIME_PM_OPS(tidss_pm_runtime_suspend, tidss_pm_runtime_resume, NULL) }; /* DRM device Information */ static void tidss_release(struct drm_device *ddev) { drm_kms_helper_poll_fini(ddev); } DEFINE_DRM_GEM_DMA_FOPS(tidss_fops); static const struct drm_driver tidss_driver = { .driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC, .fops = &tidss_fops, .release = tidss_release, DRM_GEM_DMA_DRIVER_OPS_VMAP, .name = "tidss", .desc = "TI Keystone DSS", .date = "20180215", .major = 1, .minor = 0, }; static int tidss_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; struct tidss_device *tidss; struct drm_device *ddev; int ret; int irq; dev_dbg(dev, "%s\n", __func__); tidss = devm_drm_dev_alloc(&pdev->dev, &tidss_driver, struct tidss_device, ddev); if (IS_ERR(tidss)) return PTR_ERR(tidss); ddev = &tidss->ddev; tidss->dev = dev; tidss->feat = of_device_get_match_data(dev); platform_set_drvdata(pdev, tidss); ret = dispc_init(tidss); if (ret) { dev_err(dev, "failed to initialize dispc: %d\n", ret); return ret; } pm_runtime_enable(dev); #ifndef CONFIG_PM /* If we don't have PM, we need to call resume manually */ dispc_runtime_resume(tidss->dispc); #endif ret = tidss_modeset_init(tidss); if (ret < 0) { if (ret != -EPROBE_DEFER) dev_err(dev, "failed to init DRM/KMS (%d)\n", ret); goto err_runtime_suspend; } irq = platform_get_irq(pdev, 0); if (irq < 0) { ret = irq; goto err_runtime_suspend; } tidss->irq = irq; ret = tidss_irq_install(ddev, irq); if (ret) { dev_err(dev, "tidss_irq_install failed: %d\n", ret); goto err_runtime_suspend; } drm_kms_helper_poll_init(ddev); drm_mode_config_reset(ddev); ret = drm_dev_register(ddev, 0); if (ret) { dev_err(dev, "failed to register DRM device\n"); goto err_irq_uninstall; } drm_fbdev_dma_setup(ddev, 32); dev_dbg(dev, "%s done\n", __func__); return 0; err_irq_uninstall: tidss_irq_uninstall(ddev); err_runtime_suspend: #ifndef CONFIG_PM dispc_runtime_suspend(tidss->dispc); #endif pm_runtime_disable(dev); return ret; } static void tidss_remove(struct platform_device *pdev) { struct device *dev = &pdev->dev; struct tidss_device *tidss = platform_get_drvdata(pdev); struct drm_device *ddev = &tidss->ddev; dev_dbg(dev, "%s\n", __func__); drm_dev_unregister(ddev); drm_atomic_helper_shutdown(ddev); tidss_irq_uninstall(ddev); #ifndef CONFIG_PM /* If we don't have PM, we need to call suspend manually */ dispc_runtime_suspend(tidss->dispc); #endif pm_runtime_disable(dev); /* devm allocated dispc goes away with the dev so mark it NULL */ dispc_remove(tidss); dev_dbg(dev, "%s done\n", __func__); } static void tidss_shutdown(struct platform_device *pdev) { drm_atomic_helper_shutdown(platform_get_drvdata(pdev)); } static const struct of_device_id tidss_of_table[] = { { .compatible = "ti,k2g-dss", .data = &dispc_k2g_feats, }, { .compatible = "ti,am625-dss", .data = &dispc_am625_feats, }, { .compatible = "ti,am65x-dss", .data = &dispc_am65x_feats, }, { .compatible = "ti,j721e-dss", .data = &dispc_j721e_feats, }, { } }; MODULE_DEVICE_TABLE(of, tidss_of_table); static struct platform_driver tidss_platform_driver = { .probe = tidss_probe, .remove_new = tidss_remove, .shutdown = tidss_shutdown, .driver = { .name = "tidss", .pm = pm_ptr(&tidss_pm_ops), .of_match_table = tidss_of_table, .suppress_bind_attrs = true, }, }; drm_module_platform_driver(tidss_platform_driver); MODULE_AUTHOR("Tomi Valkeinen <[email protected]>"); MODULE_DESCRIPTION("TI Keystone DSS Driver"); MODULE_LICENSE("GPL v2");
linux-master
drivers/gpu/drm/tidss/tidss_drv.c
// SPDX-License-Identifier: GPL-2.0 /* * Copyright (C) 2016-2018 Texas Instruments Incorporated - https://www.ti.com/ * Author: Jyri Sarha <[email protected]> */ #include <linux/clk.h> #include <linux/delay.h> #include <linux/dma-mapping.h> #include <linux/err.h> #include <linux/interrupt.h> #include <linux/io.h> #include <linux/kernel.h> #include <linux/media-bus-format.h> #include <linux/module.h> #include <linux/mfd/syscon.h> #include <linux/of.h> #include <linux/platform_device.h> #include <linux/pm_runtime.h> #include <linux/regmap.h> #include <linux/sys_soc.h> #include <drm/drm_blend.h> #include <drm/drm_fourcc.h> #include <drm/drm_fb_dma_helper.h> #include <drm/drm_framebuffer.h> #include <drm/drm_gem_dma_helper.h> #include <drm/drm_panel.h> #include "tidss_crtc.h" #include "tidss_dispc.h" #include "tidss_drv.h" #include "tidss_irq.h" #include "tidss_plane.h" #include "tidss_dispc_regs.h" #include "tidss_scale_coefs.h" static const u16 tidss_k2g_common_regs[DISPC_COMMON_REG_TABLE_LEN] = { [DSS_REVISION_OFF] = 0x00, [DSS_SYSCONFIG_OFF] = 0x04, [DSS_SYSSTATUS_OFF] = 0x08, [DISPC_IRQ_EOI_OFF] = 0x20, [DISPC_IRQSTATUS_RAW_OFF] = 0x24, [DISPC_IRQSTATUS_OFF] = 0x28, [DISPC_IRQENABLE_SET_OFF] = 0x2c, [DISPC_IRQENABLE_CLR_OFF] = 0x30, [DISPC_GLOBAL_MFLAG_ATTRIBUTE_OFF] = 0x40, [DISPC_GLOBAL_BUFFER_OFF] = 0x44, [DISPC_DBG_CONTROL_OFF] = 0x4c, [DISPC_DBG_STATUS_OFF] = 0x50, [DISPC_CLKGATING_DISABLE_OFF] = 0x54, }; const struct dispc_features dispc_k2g_feats = { .min_pclk_khz = 4375, .max_pclk_khz = { [DISPC_VP_DPI] = 150000, }, /* * XXX According TRM the RGB input buffer width up to 2560 should * work on 3 taps, but in practice it only works up to 1280. */ .scaling = { .in_width_max_5tap_rgb = 1280, .in_width_max_3tap_rgb = 1280, .in_width_max_5tap_yuv = 2560, .in_width_max_3tap_yuv = 2560, .upscale_limit = 16, .downscale_limit_5tap = 4, .downscale_limit_3tap = 2, /* * The max supported pixel inc value is 255. The value * of pixel inc is calculated like this: 1+(xinc-1)*bpp. * The maximum bpp of all formats supported by the HW * is 8. So the maximum supported xinc value is 32, * because 1+(32-1)*8 < 255 < 1+(33-1)*4. */ .xinc_max = 32, }, .subrev = DISPC_K2G, .common = "common", .common_regs = tidss_k2g_common_regs, .num_vps = 1, .vp_name = { "vp1" }, .ovr_name = { "ovr1" }, .vpclk_name = { "vp1" }, .vp_bus_type = { DISPC_VP_DPI }, .vp_feat = { .color = { .has_ctm = true, .gamma_size = 256, .gamma_type = TIDSS_GAMMA_8BIT, }, }, .num_planes = 1, .vid_name = { "vid1" }, .vid_lite = { false }, .vid_order = { 0 }, }; static const u16 tidss_am65x_common_regs[DISPC_COMMON_REG_TABLE_LEN] = { [DSS_REVISION_OFF] = 0x4, [DSS_SYSCONFIG_OFF] = 0x8, [DSS_SYSSTATUS_OFF] = 0x20, [DISPC_IRQ_EOI_OFF] = 0x24, [DISPC_IRQSTATUS_RAW_OFF] = 0x28, [DISPC_IRQSTATUS_OFF] = 0x2c, [DISPC_IRQENABLE_SET_OFF] = 0x30, [DISPC_IRQENABLE_CLR_OFF] = 0x40, [DISPC_VID_IRQENABLE_OFF] = 0x44, [DISPC_VID_IRQSTATUS_OFF] = 0x58, [DISPC_VP_IRQENABLE_OFF] = 0x70, [DISPC_VP_IRQSTATUS_OFF] = 0x7c, [WB_IRQENABLE_OFF] = 0x88, [WB_IRQSTATUS_OFF] = 0x8c, [DISPC_GLOBAL_MFLAG_ATTRIBUTE_OFF] = 0x90, [DISPC_GLOBAL_OUTPUT_ENABLE_OFF] = 0x94, [DISPC_GLOBAL_BUFFER_OFF] = 0x98, [DSS_CBA_CFG_OFF] = 0x9c, [DISPC_DBG_CONTROL_OFF] = 0xa0, [DISPC_DBG_STATUS_OFF] = 0xa4, [DISPC_CLKGATING_DISABLE_OFF] = 0xa8, [DISPC_SECURE_DISABLE_OFF] = 0xac, }; const struct dispc_features dispc_am65x_feats = { .max_pclk_khz = { [DISPC_VP_DPI] = 165000, [DISPC_VP_OLDI] = 165000, }, .scaling = { .in_width_max_5tap_rgb = 1280, .in_width_max_3tap_rgb = 2560, .in_width_max_5tap_yuv = 2560, .in_width_max_3tap_yuv = 4096, .upscale_limit = 16, .downscale_limit_5tap = 4, .downscale_limit_3tap = 2, /* * The max supported pixel inc value is 255. The value * of pixel inc is calculated like this: 1+(xinc-1)*bpp. * The maximum bpp of all formats supported by the HW * is 8. So the maximum supported xinc value is 32, * because 1+(32-1)*8 < 255 < 1+(33-1)*4. */ .xinc_max = 32, }, .subrev = DISPC_AM65X, .common = "common", .common_regs = tidss_am65x_common_regs, .num_vps = 2, .vp_name = { "vp1", "vp2" }, .ovr_name = { "ovr1", "ovr2" }, .vpclk_name = { "vp1", "vp2" }, .vp_bus_type = { DISPC_VP_OLDI, DISPC_VP_DPI }, .vp_feat = { .color = { .has_ctm = true, .gamma_size = 256, .gamma_type = TIDSS_GAMMA_8BIT, }, }, .num_planes = 2, /* note: vid is plane_id 0 and vidl1 is plane_id 1 */ .vid_name = { "vid", "vidl1" }, .vid_lite = { false, true, }, .vid_order = { 1, 0 }, }; static const u16 tidss_j721e_common_regs[DISPC_COMMON_REG_TABLE_LEN] = { [DSS_REVISION_OFF] = 0x4, [DSS_SYSCONFIG_OFF] = 0x8, [DSS_SYSSTATUS_OFF] = 0x20, [DISPC_IRQ_EOI_OFF] = 0x80, [DISPC_IRQSTATUS_RAW_OFF] = 0x28, [DISPC_IRQSTATUS_OFF] = 0x2c, [DISPC_IRQENABLE_SET_OFF] = 0x30, [DISPC_IRQENABLE_CLR_OFF] = 0x34, [DISPC_VID_IRQENABLE_OFF] = 0x38, [DISPC_VID_IRQSTATUS_OFF] = 0x48, [DISPC_VP_IRQENABLE_OFF] = 0x58, [DISPC_VP_IRQSTATUS_OFF] = 0x68, [WB_IRQENABLE_OFF] = 0x78, [WB_IRQSTATUS_OFF] = 0x7c, [DISPC_GLOBAL_MFLAG_ATTRIBUTE_OFF] = 0x98, [DISPC_GLOBAL_OUTPUT_ENABLE_OFF] = 0x9c, [DISPC_GLOBAL_BUFFER_OFF] = 0xa0, [DSS_CBA_CFG_OFF] = 0xa4, [DISPC_DBG_CONTROL_OFF] = 0xa8, [DISPC_DBG_STATUS_OFF] = 0xac, [DISPC_CLKGATING_DISABLE_OFF] = 0xb0, [DISPC_SECURE_DISABLE_OFF] = 0x90, [FBDC_REVISION_1_OFF] = 0xb8, [FBDC_REVISION_2_OFF] = 0xbc, [FBDC_REVISION_3_OFF] = 0xc0, [FBDC_REVISION_4_OFF] = 0xc4, [FBDC_REVISION_5_OFF] = 0xc8, [FBDC_REVISION_6_OFF] = 0xcc, [FBDC_COMMON_CONTROL_OFF] = 0xd0, [FBDC_CONSTANT_COLOR_0_OFF] = 0xd4, [FBDC_CONSTANT_COLOR_1_OFF] = 0xd8, [DISPC_CONNECTIONS_OFF] = 0xe4, [DISPC_MSS_VP1_OFF] = 0xe8, [DISPC_MSS_VP3_OFF] = 0xec, }; const struct dispc_features dispc_j721e_feats = { .max_pclk_khz = { [DISPC_VP_DPI] = 170000, [DISPC_VP_INTERNAL] = 600000, }, .scaling = { .in_width_max_5tap_rgb = 2048, .in_width_max_3tap_rgb = 4096, .in_width_max_5tap_yuv = 4096, .in_width_max_3tap_yuv = 4096, .upscale_limit = 16, .downscale_limit_5tap = 4, .downscale_limit_3tap = 2, /* * The max supported pixel inc value is 255. The value * of pixel inc is calculated like this: 1+(xinc-1)*bpp. * The maximum bpp of all formats supported by the HW * is 8. So the maximum supported xinc value is 32, * because 1+(32-1)*8 < 255 < 1+(33-1)*4. */ .xinc_max = 32, }, .subrev = DISPC_J721E, .common = "common_m", .common_regs = tidss_j721e_common_regs, .num_vps = 4, .vp_name = { "vp1", "vp2", "vp3", "vp4" }, .ovr_name = { "ovr1", "ovr2", "ovr3", "ovr4" }, .vpclk_name = { "vp1", "vp2", "vp3", "vp4" }, /* Currently hard coded VP routing (see dispc_initial_config()) */ .vp_bus_type = { DISPC_VP_INTERNAL, DISPC_VP_DPI, DISPC_VP_INTERNAL, DISPC_VP_DPI, }, .vp_feat = { .color = { .has_ctm = true, .gamma_size = 1024, .gamma_type = TIDSS_GAMMA_10BIT, }, }, .num_planes = 4, .vid_name = { "vid1", "vidl1", "vid2", "vidl2" }, .vid_lite = { 0, 1, 0, 1, }, .vid_order = { 1, 3, 0, 2 }, }; const struct dispc_features dispc_am625_feats = { .max_pclk_khz = { [DISPC_VP_DPI] = 165000, [DISPC_VP_INTERNAL] = 170000, }, .scaling = { .in_width_max_5tap_rgb = 1280, .in_width_max_3tap_rgb = 2560, .in_width_max_5tap_yuv = 2560, .in_width_max_3tap_yuv = 4096, .upscale_limit = 16, .downscale_limit_5tap = 4, .downscale_limit_3tap = 2, /* * The max supported pixel inc value is 255. The value * of pixel inc is calculated like this: 1+(xinc-1)*bpp. * The maximum bpp of all formats supported by the HW * is 8. So the maximum supported xinc value is 32, * because 1+(32-1)*8 < 255 < 1+(33-1)*4. */ .xinc_max = 32, }, .subrev = DISPC_AM625, .common = "common", .common_regs = tidss_am65x_common_regs, .num_vps = 2, .vp_name = { "vp1", "vp2" }, .ovr_name = { "ovr1", "ovr2" }, .vpclk_name = { "vp1", "vp2" }, .vp_bus_type = { DISPC_VP_INTERNAL, DISPC_VP_DPI }, .vp_feat = { .color = { .has_ctm = true, .gamma_size = 256, .gamma_type = TIDSS_GAMMA_8BIT, }, }, .num_planes = 2, /* note: vid is plane_id 0 and vidl1 is plane_id 1 */ .vid_name = { "vid", "vidl1" }, .vid_lite = { false, true, }, .vid_order = { 1, 0 }, }; static const u16 *dispc_common_regmap; struct dss_vp_data { u32 *gamma_table; }; struct dispc_device { struct tidss_device *tidss; struct device *dev; void __iomem *base_common; void __iomem *base_vid[TIDSS_MAX_PLANES]; void __iomem *base_ovr[TIDSS_MAX_PORTS]; void __iomem *base_vp[TIDSS_MAX_PORTS]; struct regmap *oldi_io_ctrl; struct clk *vp_clk[TIDSS_MAX_PORTS]; const struct dispc_features *feat; struct clk *fclk; bool is_enabled; struct dss_vp_data vp_data[TIDSS_MAX_PORTS]; u32 *fourccs; u32 num_fourccs; u32 memory_bandwidth_limit; struct dispc_errata errata; }; static void dispc_write(struct dispc_device *dispc, u16 reg, u32 val) { iowrite32(val, dispc->base_common + reg); } static u32 dispc_read(struct dispc_device *dispc, u16 reg) { return ioread32(dispc->base_common + reg); } static void dispc_vid_write(struct dispc_device *dispc, u32 hw_plane, u16 reg, u32 val) { void __iomem *base = dispc->base_vid[hw_plane]; iowrite32(val, base + reg); } static u32 dispc_vid_read(struct dispc_device *dispc, u32 hw_plane, u16 reg) { void __iomem *base = dispc->base_vid[hw_plane]; return ioread32(base + reg); } static void dispc_ovr_write(struct dispc_device *dispc, u32 hw_videoport, u16 reg, u32 val) { void __iomem *base = dispc->base_ovr[hw_videoport]; iowrite32(val, base + reg); } static u32 dispc_ovr_read(struct dispc_device *dispc, u32 hw_videoport, u16 reg) { void __iomem *base = dispc->base_ovr[hw_videoport]; return ioread32(base + reg); } static void dispc_vp_write(struct dispc_device *dispc, u32 hw_videoport, u16 reg, u32 val) { void __iomem *base = dispc->base_vp[hw_videoport]; iowrite32(val, base + reg); } static u32 dispc_vp_read(struct dispc_device *dispc, u32 hw_videoport, u16 reg) { void __iomem *base = dispc->base_vp[hw_videoport]; return ioread32(base + reg); } /* * TRM gives bitfields as start:end, where start is the higher bit * number. For example 7:0 */ static u32 FLD_MASK(u32 start, u32 end) { return ((1 << (start - end + 1)) - 1) << end; } static u32 FLD_VAL(u32 val, u32 start, u32 end) { return (val << end) & FLD_MASK(start, end); } static u32 FLD_GET(u32 val, u32 start, u32 end) { return (val & FLD_MASK(start, end)) >> end; } static u32 FLD_MOD(u32 orig, u32 val, u32 start, u32 end) { return (orig & ~FLD_MASK(start, end)) | FLD_VAL(val, start, end); } static u32 REG_GET(struct dispc_device *dispc, u32 idx, u32 start, u32 end) { return FLD_GET(dispc_read(dispc, idx), start, end); } static void REG_FLD_MOD(struct dispc_device *dispc, u32 idx, u32 val, u32 start, u32 end) { dispc_write(dispc, idx, FLD_MOD(dispc_read(dispc, idx), val, start, end)); } static u32 VID_REG_GET(struct dispc_device *dispc, u32 hw_plane, u32 idx, u32 start, u32 end) { return FLD_GET(dispc_vid_read(dispc, hw_plane, idx), start, end); } static void VID_REG_FLD_MOD(struct dispc_device *dispc, u32 hw_plane, u32 idx, u32 val, u32 start, u32 end) { dispc_vid_write(dispc, hw_plane, idx, FLD_MOD(dispc_vid_read(dispc, hw_plane, idx), val, start, end)); } static u32 VP_REG_GET(struct dispc_device *dispc, u32 vp, u32 idx, u32 start, u32 end) { return FLD_GET(dispc_vp_read(dispc, vp, idx), start, end); } static void VP_REG_FLD_MOD(struct dispc_device *dispc, u32 vp, u32 idx, u32 val, u32 start, u32 end) { dispc_vp_write(dispc, vp, idx, FLD_MOD(dispc_vp_read(dispc, vp, idx), val, start, end)); } __maybe_unused static u32 OVR_REG_GET(struct dispc_device *dispc, u32 ovr, u32 idx, u32 start, u32 end) { return FLD_GET(dispc_ovr_read(dispc, ovr, idx), start, end); } static void OVR_REG_FLD_MOD(struct dispc_device *dispc, u32 ovr, u32 idx, u32 val, u32 start, u32 end) { dispc_ovr_write(dispc, ovr, idx, FLD_MOD(dispc_ovr_read(dispc, ovr, idx), val, start, end)); } static dispc_irq_t dispc_vp_irq_from_raw(u32 stat, u32 hw_videoport) { dispc_irq_t vp_stat = 0; if (stat & BIT(0)) vp_stat |= DSS_IRQ_VP_FRAME_DONE(hw_videoport); if (stat & BIT(1)) vp_stat |= DSS_IRQ_VP_VSYNC_EVEN(hw_videoport); if (stat & BIT(2)) vp_stat |= DSS_IRQ_VP_VSYNC_ODD(hw_videoport); if (stat & BIT(4)) vp_stat |= DSS_IRQ_VP_SYNC_LOST(hw_videoport); return vp_stat; } static u32 dispc_vp_irq_to_raw(dispc_irq_t vpstat, u32 hw_videoport) { u32 stat = 0; if (vpstat & DSS_IRQ_VP_FRAME_DONE(hw_videoport)) stat |= BIT(0); if (vpstat & DSS_IRQ_VP_VSYNC_EVEN(hw_videoport)) stat |= BIT(1); if (vpstat & DSS_IRQ_VP_VSYNC_ODD(hw_videoport)) stat |= BIT(2); if (vpstat & DSS_IRQ_VP_SYNC_LOST(hw_videoport)) stat |= BIT(4); return stat; } static dispc_irq_t dispc_vid_irq_from_raw(u32 stat, u32 hw_plane) { dispc_irq_t vid_stat = 0; if (stat & BIT(0)) vid_stat |= DSS_IRQ_PLANE_FIFO_UNDERFLOW(hw_plane); return vid_stat; } static u32 dispc_vid_irq_to_raw(dispc_irq_t vidstat, u32 hw_plane) { u32 stat = 0; if (vidstat & DSS_IRQ_PLANE_FIFO_UNDERFLOW(hw_plane)) stat |= BIT(0); return stat; } static dispc_irq_t dispc_k2g_vp_read_irqstatus(struct dispc_device *dispc, u32 hw_videoport) { u32 stat = dispc_vp_read(dispc, hw_videoport, DISPC_VP_K2G_IRQSTATUS); return dispc_vp_irq_from_raw(stat, hw_videoport); } static void dispc_k2g_vp_write_irqstatus(struct dispc_device *dispc, u32 hw_videoport, dispc_irq_t vpstat) { u32 stat = dispc_vp_irq_to_raw(vpstat, hw_videoport); dispc_vp_write(dispc, hw_videoport, DISPC_VP_K2G_IRQSTATUS, stat); } static dispc_irq_t dispc_k2g_vid_read_irqstatus(struct dispc_device *dispc, u32 hw_plane) { u32 stat = dispc_vid_read(dispc, hw_plane, DISPC_VID_K2G_IRQSTATUS); return dispc_vid_irq_from_raw(stat, hw_plane); } static void dispc_k2g_vid_write_irqstatus(struct dispc_device *dispc, u32 hw_plane, dispc_irq_t vidstat) { u32 stat = dispc_vid_irq_to_raw(vidstat, hw_plane); dispc_vid_write(dispc, hw_plane, DISPC_VID_K2G_IRQSTATUS, stat); } static dispc_irq_t dispc_k2g_vp_read_irqenable(struct dispc_device *dispc, u32 hw_videoport) { u32 stat = dispc_vp_read(dispc, hw_videoport, DISPC_VP_K2G_IRQENABLE); return dispc_vp_irq_from_raw(stat, hw_videoport); } static void dispc_k2g_vp_set_irqenable(struct dispc_device *dispc, u32 hw_videoport, dispc_irq_t vpstat) { u32 stat = dispc_vp_irq_to_raw(vpstat, hw_videoport); dispc_vp_write(dispc, hw_videoport, DISPC_VP_K2G_IRQENABLE, stat); } static dispc_irq_t dispc_k2g_vid_read_irqenable(struct dispc_device *dispc, u32 hw_plane) { u32 stat = dispc_vid_read(dispc, hw_plane, DISPC_VID_K2G_IRQENABLE); return dispc_vid_irq_from_raw(stat, hw_plane); } static void dispc_k2g_vid_set_irqenable(struct dispc_device *dispc, u32 hw_plane, dispc_irq_t vidstat) { u32 stat = dispc_vid_irq_to_raw(vidstat, hw_plane); dispc_vid_write(dispc, hw_plane, DISPC_VID_K2G_IRQENABLE, stat); } static void dispc_k2g_clear_irqstatus(struct dispc_device *dispc, dispc_irq_t mask) { dispc_k2g_vp_write_irqstatus(dispc, 0, mask); dispc_k2g_vid_write_irqstatus(dispc, 0, mask); } static dispc_irq_t dispc_k2g_read_and_clear_irqstatus(struct dispc_device *dispc) { dispc_irq_t stat = 0; /* always clear the top level irqstatus */ dispc_write(dispc, DISPC_IRQSTATUS, dispc_read(dispc, DISPC_IRQSTATUS)); stat |= dispc_k2g_vp_read_irqstatus(dispc, 0); stat |= dispc_k2g_vid_read_irqstatus(dispc, 0); dispc_k2g_clear_irqstatus(dispc, stat); return stat; } static dispc_irq_t dispc_k2g_read_irqenable(struct dispc_device *dispc) { dispc_irq_t stat = 0; stat |= dispc_k2g_vp_read_irqenable(dispc, 0); stat |= dispc_k2g_vid_read_irqenable(dispc, 0); return stat; } static void dispc_k2g_set_irqenable(struct dispc_device *dispc, dispc_irq_t mask) { dispc_irq_t old_mask = dispc_k2g_read_irqenable(dispc); /* clear the irqstatus for newly enabled irqs */ dispc_k2g_clear_irqstatus(dispc, (mask ^ old_mask) & mask); dispc_k2g_vp_set_irqenable(dispc, 0, mask); dispc_k2g_vid_set_irqenable(dispc, 0, mask); dispc_write(dispc, DISPC_IRQENABLE_SET, (1 << 0) | (1 << 7)); /* flush posted write */ dispc_k2g_read_irqenable(dispc); } static dispc_irq_t dispc_k3_vp_read_irqstatus(struct dispc_device *dispc, u32 hw_videoport) { u32 stat = dispc_read(dispc, DISPC_VP_IRQSTATUS(hw_videoport)); return dispc_vp_irq_from_raw(stat, hw_videoport); } static void dispc_k3_vp_write_irqstatus(struct dispc_device *dispc, u32 hw_videoport, dispc_irq_t vpstat) { u32 stat = dispc_vp_irq_to_raw(vpstat, hw_videoport); dispc_write(dispc, DISPC_VP_IRQSTATUS(hw_videoport), stat); } static dispc_irq_t dispc_k3_vid_read_irqstatus(struct dispc_device *dispc, u32 hw_plane) { u32 stat = dispc_read(dispc, DISPC_VID_IRQSTATUS(hw_plane)); return dispc_vid_irq_from_raw(stat, hw_plane); } static void dispc_k3_vid_write_irqstatus(struct dispc_device *dispc, u32 hw_plane, dispc_irq_t vidstat) { u32 stat = dispc_vid_irq_to_raw(vidstat, hw_plane); dispc_write(dispc, DISPC_VID_IRQSTATUS(hw_plane), stat); } static dispc_irq_t dispc_k3_vp_read_irqenable(struct dispc_device *dispc, u32 hw_videoport) { u32 stat = dispc_read(dispc, DISPC_VP_IRQENABLE(hw_videoport)); return dispc_vp_irq_from_raw(stat, hw_videoport); } static void dispc_k3_vp_set_irqenable(struct dispc_device *dispc, u32 hw_videoport, dispc_irq_t vpstat) { u32 stat = dispc_vp_irq_to_raw(vpstat, hw_videoport); dispc_write(dispc, DISPC_VP_IRQENABLE(hw_videoport), stat); } static dispc_irq_t dispc_k3_vid_read_irqenable(struct dispc_device *dispc, u32 hw_plane) { u32 stat = dispc_read(dispc, DISPC_VID_IRQENABLE(hw_plane)); return dispc_vid_irq_from_raw(stat, hw_plane); } static void dispc_k3_vid_set_irqenable(struct dispc_device *dispc, u32 hw_plane, dispc_irq_t vidstat) { u32 stat = dispc_vid_irq_to_raw(vidstat, hw_plane); dispc_write(dispc, DISPC_VID_IRQENABLE(hw_plane), stat); } static void dispc_k3_clear_irqstatus(struct dispc_device *dispc, dispc_irq_t clearmask) { unsigned int i; u32 top_clear = 0; for (i = 0; i < dispc->feat->num_vps; ++i) { if (clearmask & DSS_IRQ_VP_MASK(i)) { dispc_k3_vp_write_irqstatus(dispc, i, clearmask); top_clear |= BIT(i); } } for (i = 0; i < dispc->feat->num_planes; ++i) { if (clearmask & DSS_IRQ_PLANE_MASK(i)) { dispc_k3_vid_write_irqstatus(dispc, i, clearmask); top_clear |= BIT(4 + i); } } if (dispc->feat->subrev == DISPC_K2G) return; dispc_write(dispc, DISPC_IRQSTATUS, top_clear); /* Flush posted writes */ dispc_read(dispc, DISPC_IRQSTATUS); } static dispc_irq_t dispc_k3_read_and_clear_irqstatus(struct dispc_device *dispc) { dispc_irq_t status = 0; unsigned int i; for (i = 0; i < dispc->feat->num_vps; ++i) status |= dispc_k3_vp_read_irqstatus(dispc, i); for (i = 0; i < dispc->feat->num_planes; ++i) status |= dispc_k3_vid_read_irqstatus(dispc, i); dispc_k3_clear_irqstatus(dispc, status); return status; } static dispc_irq_t dispc_k3_read_irqenable(struct dispc_device *dispc) { dispc_irq_t enable = 0; unsigned int i; for (i = 0; i < dispc->feat->num_vps; ++i) enable |= dispc_k3_vp_read_irqenable(dispc, i); for (i = 0; i < dispc->feat->num_planes; ++i) enable |= dispc_k3_vid_read_irqenable(dispc, i); return enable; } static void dispc_k3_set_irqenable(struct dispc_device *dispc, dispc_irq_t mask) { unsigned int i; u32 main_enable = 0, main_disable = 0; dispc_irq_t old_mask; old_mask = dispc_k3_read_irqenable(dispc); /* clear the irqstatus for newly enabled irqs */ dispc_k3_clear_irqstatus(dispc, (old_mask ^ mask) & mask); for (i = 0; i < dispc->feat->num_vps; ++i) { dispc_k3_vp_set_irqenable(dispc, i, mask); if (mask & DSS_IRQ_VP_MASK(i)) main_enable |= BIT(i); /* VP IRQ */ else main_disable |= BIT(i); /* VP IRQ */ } for (i = 0; i < dispc->feat->num_planes; ++i) { dispc_k3_vid_set_irqenable(dispc, i, mask); if (mask & DSS_IRQ_PLANE_MASK(i)) main_enable |= BIT(i + 4); /* VID IRQ */ else main_disable |= BIT(i + 4); /* VID IRQ */ } if (main_enable) dispc_write(dispc, DISPC_IRQENABLE_SET, main_enable); if (main_disable) dispc_write(dispc, DISPC_IRQENABLE_CLR, main_disable); /* Flush posted writes */ dispc_read(dispc, DISPC_IRQENABLE_SET); } dispc_irq_t dispc_read_and_clear_irqstatus(struct dispc_device *dispc) { switch (dispc->feat->subrev) { case DISPC_K2G: return dispc_k2g_read_and_clear_irqstatus(dispc); case DISPC_AM625: case DISPC_AM65X: case DISPC_J721E: return dispc_k3_read_and_clear_irqstatus(dispc); default: WARN_ON(1); return 0; } } void dispc_set_irqenable(struct dispc_device *dispc, dispc_irq_t mask) { switch (dispc->feat->subrev) { case DISPC_K2G: dispc_k2g_set_irqenable(dispc, mask); break; case DISPC_AM625: case DISPC_AM65X: case DISPC_J721E: dispc_k3_set_irqenable(dispc, mask); break; default: WARN_ON(1); break; } } enum dispc_oldi_mode_reg_val { SPWG_18 = 0, JEIDA_24 = 1, SPWG_24 = 2 }; struct dispc_bus_format { u32 bus_fmt; u32 data_width; bool is_oldi_fmt; enum dispc_oldi_mode_reg_val oldi_mode_reg_val; }; static const struct dispc_bus_format dispc_bus_formats[] = { { MEDIA_BUS_FMT_RGB444_1X12, 12, false, 0 }, { MEDIA_BUS_FMT_RGB565_1X16, 16, false, 0 }, { MEDIA_BUS_FMT_RGB666_1X18, 18, false, 0 }, { MEDIA_BUS_FMT_RGB888_1X24, 24, false, 0 }, { MEDIA_BUS_FMT_RGB101010_1X30, 30, false, 0 }, { MEDIA_BUS_FMT_RGB121212_1X36, 36, false, 0 }, { MEDIA_BUS_FMT_RGB666_1X7X3_SPWG, 18, true, SPWG_18 }, { MEDIA_BUS_FMT_RGB888_1X7X4_SPWG, 24, true, SPWG_24 }, { MEDIA_BUS_FMT_RGB888_1X7X4_JEIDA, 24, true, JEIDA_24 }, }; static const struct dispc_bus_format *dispc_vp_find_bus_fmt(struct dispc_device *dispc, u32 hw_videoport, u32 bus_fmt, u32 bus_flags) { unsigned int i; for (i = 0; i < ARRAY_SIZE(dispc_bus_formats); ++i) { if (dispc_bus_formats[i].bus_fmt == bus_fmt) return &dispc_bus_formats[i]; } return NULL; } int dispc_vp_bus_check(struct dispc_device *dispc, u32 hw_videoport, const struct drm_crtc_state *state) { const struct tidss_crtc_state *tstate = to_tidss_crtc_state(state); const struct dispc_bus_format *fmt; fmt = dispc_vp_find_bus_fmt(dispc, hw_videoport, tstate->bus_format, tstate->bus_flags); if (!fmt) { dev_dbg(dispc->dev, "%s: Unsupported bus format: %u\n", __func__, tstate->bus_format); return -EINVAL; } if (dispc->feat->vp_bus_type[hw_videoport] != DISPC_VP_OLDI && fmt->is_oldi_fmt) { dev_dbg(dispc->dev, "%s: %s is not OLDI-port\n", __func__, dispc->feat->vp_name[hw_videoport]); return -EINVAL; } return 0; } static void dispc_oldi_tx_power(struct dispc_device *dispc, bool power) { u32 val = power ? 0 : OLDI_PWRDN_TX; if (WARN_ON(!dispc->oldi_io_ctrl)) return; regmap_update_bits(dispc->oldi_io_ctrl, OLDI_DAT0_IO_CTRL, OLDI_PWRDN_TX, val); regmap_update_bits(dispc->oldi_io_ctrl, OLDI_DAT1_IO_CTRL, OLDI_PWRDN_TX, val); regmap_update_bits(dispc->oldi_io_ctrl, OLDI_DAT2_IO_CTRL, OLDI_PWRDN_TX, val); regmap_update_bits(dispc->oldi_io_ctrl, OLDI_DAT3_IO_CTRL, OLDI_PWRDN_TX, val); regmap_update_bits(dispc->oldi_io_ctrl, OLDI_CLK_IO_CTRL, OLDI_PWRDN_TX, val); } static void dispc_set_num_datalines(struct dispc_device *dispc, u32 hw_videoport, int num_lines) { int v; switch (num_lines) { case 12: v = 0; break; case 16: v = 1; break; case 18: v = 2; break; case 24: v = 3; break; case 30: v = 4; break; case 36: v = 5; break; default: WARN_ON(1); v = 3; } VP_REG_FLD_MOD(dispc, hw_videoport, DISPC_VP_CONTROL, v, 10, 8); } static void dispc_enable_oldi(struct dispc_device *dispc, u32 hw_videoport, const struct dispc_bus_format *fmt) { u32 oldi_cfg = 0; u32 oldi_reset_bit = BIT(5 + hw_videoport); int count = 0; /* * For the moment DUALMODESYNC, MASTERSLAVE, MODE, and SRC * bits of DISPC_VP_DSS_OLDI_CFG are set statically to 0. */ if (fmt->data_width == 24) oldi_cfg |= BIT(8); /* MSB */ else if (fmt->data_width != 18) dev_warn(dispc->dev, "%s: %d port width not supported\n", __func__, fmt->data_width); oldi_cfg |= BIT(7); /* DEPOL */ oldi_cfg = FLD_MOD(oldi_cfg, fmt->oldi_mode_reg_val, 3, 1); oldi_cfg |= BIT(12); /* SOFTRST */ oldi_cfg |= BIT(0); /* ENABLE */ dispc_vp_write(dispc, hw_videoport, DISPC_VP_DSS_OLDI_CFG, oldi_cfg); while (!(oldi_reset_bit & dispc_read(dispc, DSS_SYSSTATUS)) && count < 10000) count++; if (!(oldi_reset_bit & dispc_read(dispc, DSS_SYSSTATUS))) dev_warn(dispc->dev, "%s: timeout waiting OLDI reset done\n", __func__); } void dispc_vp_prepare(struct dispc_device *dispc, u32 hw_videoport, const struct drm_crtc_state *state) { const struct tidss_crtc_state *tstate = to_tidss_crtc_state(state); const struct dispc_bus_format *fmt; fmt = dispc_vp_find_bus_fmt(dispc, hw_videoport, tstate->bus_format, tstate->bus_flags); if (WARN_ON(!fmt)) return; if (dispc->feat->vp_bus_type[hw_videoport] == DISPC_VP_OLDI) { dispc_oldi_tx_power(dispc, true); dispc_enable_oldi(dispc, hw_videoport, fmt); } } void dispc_vp_enable(struct dispc_device *dispc, u32 hw_videoport, const struct drm_crtc_state *state) { const struct drm_display_mode *mode = &state->adjusted_mode; const struct tidss_crtc_state *tstate = to_tidss_crtc_state(state); bool align, onoff, rf, ieo, ipc, ihs, ivs; const struct dispc_bus_format *fmt; u32 hsw, hfp, hbp, vsw, vfp, vbp; fmt = dispc_vp_find_bus_fmt(dispc, hw_videoport, tstate->bus_format, tstate->bus_flags); if (WARN_ON(!fmt)) return; dispc_set_num_datalines(dispc, hw_videoport, fmt->data_width); hfp = mode->hsync_start - mode->hdisplay; hsw = mode->hsync_end - mode->hsync_start; hbp = mode->htotal - mode->hsync_end; vfp = mode->vsync_start - mode->vdisplay; vsw = mode->vsync_end - mode->vsync_start; vbp = mode->vtotal - mode->vsync_end; dispc_vp_write(dispc, hw_videoport, DISPC_VP_TIMING_H, FLD_VAL(hsw - 1, 7, 0) | FLD_VAL(hfp - 1, 19, 8) | FLD_VAL(hbp - 1, 31, 20)); dispc_vp_write(dispc, hw_videoport, DISPC_VP_TIMING_V, FLD_VAL(vsw - 1, 7, 0) | FLD_VAL(vfp, 19, 8) | FLD_VAL(vbp, 31, 20)); ivs = !!(mode->flags & DRM_MODE_FLAG_NVSYNC); ihs = !!(mode->flags & DRM_MODE_FLAG_NHSYNC); ieo = !!(tstate->bus_flags & DRM_BUS_FLAG_DE_LOW); ipc = !!(tstate->bus_flags & DRM_BUS_FLAG_PIXDATA_DRIVE_NEGEDGE); /* always use the 'rf' setting */ onoff = true; rf = !!(tstate->bus_flags & DRM_BUS_FLAG_SYNC_DRIVE_POSEDGE); /* always use aligned syncs */ align = true; /* always use DE_HIGH for OLDI */ if (dispc->feat->vp_bus_type[hw_videoport] == DISPC_VP_OLDI) ieo = false; dispc_vp_write(dispc, hw_videoport, DISPC_VP_POL_FREQ, FLD_VAL(align, 18, 18) | FLD_VAL(onoff, 17, 17) | FLD_VAL(rf, 16, 16) | FLD_VAL(ieo, 15, 15) | FLD_VAL(ipc, 14, 14) | FLD_VAL(ihs, 13, 13) | FLD_VAL(ivs, 12, 12)); dispc_vp_write(dispc, hw_videoport, DISPC_VP_SIZE_SCREEN, FLD_VAL(mode->hdisplay - 1, 11, 0) | FLD_VAL(mode->vdisplay - 1, 27, 16)); VP_REG_FLD_MOD(dispc, hw_videoport, DISPC_VP_CONTROL, 1, 0, 0); } void dispc_vp_disable(struct dispc_device *dispc, u32 hw_videoport) { VP_REG_FLD_MOD(dispc, hw_videoport, DISPC_VP_CONTROL, 0, 0, 0); } void dispc_vp_unprepare(struct dispc_device *dispc, u32 hw_videoport) { if (dispc->feat->vp_bus_type[hw_videoport] == DISPC_VP_OLDI) { dispc_vp_write(dispc, hw_videoport, DISPC_VP_DSS_OLDI_CFG, 0); dispc_oldi_tx_power(dispc, false); } } bool dispc_vp_go_busy(struct dispc_device *dispc, u32 hw_videoport) { return VP_REG_GET(dispc, hw_videoport, DISPC_VP_CONTROL, 5, 5); } void dispc_vp_go(struct dispc_device *dispc, u32 hw_videoport) { WARN_ON(VP_REG_GET(dispc, hw_videoport, DISPC_VP_CONTROL, 5, 5)); VP_REG_FLD_MOD(dispc, hw_videoport, DISPC_VP_CONTROL, 1, 5, 5); } enum c8_to_c12_mode { C8_TO_C12_REPLICATE, C8_TO_C12_MAX, C8_TO_C12_MIN }; static u16 c8_to_c12(u8 c8, enum c8_to_c12_mode mode) { u16 c12; c12 = c8 << 4; switch (mode) { case C8_TO_C12_REPLICATE: /* Copy c8 4 MSB to 4 LSB for full scale c12 */ c12 |= c8 >> 4; break; case C8_TO_C12_MAX: c12 |= 0xF; break; default: case C8_TO_C12_MIN: break; } return c12; } static u64 argb8888_to_argb12121212(u32 argb8888, enum c8_to_c12_mode m) { u8 a, r, g, b; u64 v; a = (argb8888 >> 24) & 0xff; r = (argb8888 >> 16) & 0xff; g = (argb8888 >> 8) & 0xff; b = (argb8888 >> 0) & 0xff; v = ((u64)c8_to_c12(a, m) << 36) | ((u64)c8_to_c12(r, m) << 24) | ((u64)c8_to_c12(g, m) << 12) | (u64)c8_to_c12(b, m); return v; } static void dispc_vp_set_default_color(struct dispc_device *dispc, u32 hw_videoport, u32 default_color) { u64 v; v = argb8888_to_argb12121212(default_color, C8_TO_C12_REPLICATE); dispc_ovr_write(dispc, hw_videoport, DISPC_OVR_DEFAULT_COLOR, v & 0xffffffff); dispc_ovr_write(dispc, hw_videoport, DISPC_OVR_DEFAULT_COLOR2, (v >> 32) & 0xffff); } enum drm_mode_status dispc_vp_mode_valid(struct dispc_device *dispc, u32 hw_videoport, const struct drm_display_mode *mode) { u32 hsw, hfp, hbp, vsw, vfp, vbp; enum dispc_vp_bus_type bus_type; int max_pclk; bus_type = dispc->feat->vp_bus_type[hw_videoport]; max_pclk = dispc->feat->max_pclk_khz[bus_type]; if (WARN_ON(max_pclk == 0)) return MODE_BAD; if (mode->clock < dispc->feat->min_pclk_khz) return MODE_CLOCK_LOW; if (mode->clock > max_pclk) return MODE_CLOCK_HIGH; if (mode->hdisplay > 4096) return MODE_BAD; if (mode->vdisplay > 4096) return MODE_BAD; /* TODO: add interlace support */ if (mode->flags & DRM_MODE_FLAG_INTERLACE) return MODE_NO_INTERLACE; /* * Enforce the output width is divisible by 2. Actually this * is only needed in following cases: * - YUV output selected (BT656, BT1120) * - Dithering enabled * - TDM with TDMCycleFormat == 3 * But for simplicity we enforce that always. */ if ((mode->hdisplay % 2) != 0) return MODE_BAD_HVALUE; hfp = mode->hsync_start - mode->hdisplay; hsw = mode->hsync_end - mode->hsync_start; hbp = mode->htotal - mode->hsync_end; vfp = mode->vsync_start - mode->vdisplay; vsw = mode->vsync_end - mode->vsync_start; vbp = mode->vtotal - mode->vsync_end; if (hsw < 1 || hsw > 256 || hfp < 1 || hfp > 4096 || hbp < 1 || hbp > 4096) return MODE_BAD_HVALUE; if (vsw < 1 || vsw > 256 || vfp > 4095 || vbp > 4095) return MODE_BAD_VVALUE; if (dispc->memory_bandwidth_limit) { const unsigned int bpp = 4; u64 bandwidth; bandwidth = 1000 * mode->clock; bandwidth = bandwidth * mode->hdisplay * mode->vdisplay * bpp; bandwidth = div_u64(bandwidth, mode->htotal * mode->vtotal); if (dispc->memory_bandwidth_limit < bandwidth) return MODE_BAD; } return MODE_OK; } int dispc_vp_enable_clk(struct dispc_device *dispc, u32 hw_videoport) { int ret = clk_prepare_enable(dispc->vp_clk[hw_videoport]); if (ret) dev_err(dispc->dev, "%s: enabling clk failed: %d\n", __func__, ret); return ret; } void dispc_vp_disable_clk(struct dispc_device *dispc, u32 hw_videoport) { clk_disable_unprepare(dispc->vp_clk[hw_videoport]); } /* * Calculate the percentage difference between the requested pixel clock rate * and the effective rate resulting from calculating the clock divider value. */ static unsigned int dispc_pclk_diff(unsigned long rate, unsigned long real_rate) { int r = rate / 100, rr = real_rate / 100; return (unsigned int)(abs(((rr - r) * 100) / r)); } int dispc_vp_set_clk_rate(struct dispc_device *dispc, u32 hw_videoport, unsigned long rate) { int r; unsigned long new_rate; r = clk_set_rate(dispc->vp_clk[hw_videoport], rate); if (r) { dev_err(dispc->dev, "vp%d: failed to set clk rate to %lu\n", hw_videoport, rate); return r; } new_rate = clk_get_rate(dispc->vp_clk[hw_videoport]); if (dispc_pclk_diff(rate, new_rate) > 5) dev_warn(dispc->dev, "vp%d: Clock rate %lu differs over 5%% from requested %lu\n", hw_videoport, new_rate, rate); dev_dbg(dispc->dev, "vp%d: new rate %lu Hz (requested %lu Hz)\n", hw_videoport, clk_get_rate(dispc->vp_clk[hw_videoport]), rate); return 0; } /* OVR */ static void dispc_k2g_ovr_set_plane(struct dispc_device *dispc, u32 hw_plane, u32 hw_videoport, u32 x, u32 y, u32 layer) { /* On k2g there is only one plane and no need for ovr */ dispc_vid_write(dispc, hw_plane, DISPC_VID_K2G_POSITION, x | (y << 16)); } static void dispc_am65x_ovr_set_plane(struct dispc_device *dispc, u32 hw_plane, u32 hw_videoport, u32 x, u32 y, u32 layer) { OVR_REG_FLD_MOD(dispc, hw_videoport, DISPC_OVR_ATTRIBUTES(layer), hw_plane, 4, 1); OVR_REG_FLD_MOD(dispc, hw_videoport, DISPC_OVR_ATTRIBUTES(layer), x, 17, 6); OVR_REG_FLD_MOD(dispc, hw_videoport, DISPC_OVR_ATTRIBUTES(layer), y, 30, 19); } static void dispc_j721e_ovr_set_plane(struct dispc_device *dispc, u32 hw_plane, u32 hw_videoport, u32 x, u32 y, u32 layer) { OVR_REG_FLD_MOD(dispc, hw_videoport, DISPC_OVR_ATTRIBUTES(layer), hw_plane, 4, 1); OVR_REG_FLD_MOD(dispc, hw_videoport, DISPC_OVR_ATTRIBUTES2(layer), x, 13, 0); OVR_REG_FLD_MOD(dispc, hw_videoport, DISPC_OVR_ATTRIBUTES2(layer), y, 29, 16); } void dispc_ovr_set_plane(struct dispc_device *dispc, u32 hw_plane, u32 hw_videoport, u32 x, u32 y, u32 layer) { switch (dispc->feat->subrev) { case DISPC_K2G: dispc_k2g_ovr_set_plane(dispc, hw_plane, hw_videoport, x, y, layer); break; case DISPC_AM625: case DISPC_AM65X: dispc_am65x_ovr_set_plane(dispc, hw_plane, hw_videoport, x, y, layer); break; case DISPC_J721E: dispc_j721e_ovr_set_plane(dispc, hw_plane, hw_videoport, x, y, layer); break; default: WARN_ON(1); break; } } void dispc_ovr_enable_layer(struct dispc_device *dispc, u32 hw_videoport, u32 layer, bool enable) { if (dispc->feat->subrev == DISPC_K2G) return; OVR_REG_FLD_MOD(dispc, hw_videoport, DISPC_OVR_ATTRIBUTES(layer), !!enable, 0, 0); } /* CSC */ enum csc_ctm { CSC_RR, CSC_RG, CSC_RB, CSC_GR, CSC_GG, CSC_GB, CSC_BR, CSC_BG, CSC_BB, }; enum csc_yuv2rgb { CSC_RY, CSC_RCB, CSC_RCR, CSC_GY, CSC_GCB, CSC_GCR, CSC_BY, CSC_BCB, CSC_BCR, }; enum csc_rgb2yuv { CSC_YR, CSC_YG, CSC_YB, CSC_CBR, CSC_CBG, CSC_CBB, CSC_CRR, CSC_CRG, CSC_CRB, }; struct dispc_csc_coef { void (*to_regval)(const struct dispc_csc_coef *csc, u32 *regval); int m[9]; int preoffset[3]; int postoffset[3]; enum { CLIP_LIMITED_RANGE = 0, CLIP_FULL_RANGE = 1, } cliping; const char *name; }; #define DISPC_CSC_REGVAL_LEN 8 static void dispc_csc_offset_regval(const struct dispc_csc_coef *csc, u32 *regval) { #define OVAL(x, y) (FLD_VAL(x, 15, 3) | FLD_VAL(y, 31, 19)) regval[5] = OVAL(csc->preoffset[0], csc->preoffset[1]); regval[6] = OVAL(csc->preoffset[2], csc->postoffset[0]); regval[7] = OVAL(csc->postoffset[1], csc->postoffset[2]); #undef OVAL } #define CVAL(x, y) (FLD_VAL(x, 10, 0) | FLD_VAL(y, 26, 16)) static void dispc_csc_yuv2rgb_regval(const struct dispc_csc_coef *csc, u32 *regval) { regval[0] = CVAL(csc->m[CSC_RY], csc->m[CSC_RCR]); regval[1] = CVAL(csc->m[CSC_RCB], csc->m[CSC_GY]); regval[2] = CVAL(csc->m[CSC_GCR], csc->m[CSC_GCB]); regval[3] = CVAL(csc->m[CSC_BY], csc->m[CSC_BCR]); regval[4] = CVAL(csc->m[CSC_BCB], 0); dispc_csc_offset_regval(csc, regval); } __maybe_unused static void dispc_csc_rgb2yuv_regval(const struct dispc_csc_coef *csc, u32 *regval) { regval[0] = CVAL(csc->m[CSC_YR], csc->m[CSC_YG]); regval[1] = CVAL(csc->m[CSC_YB], csc->m[CSC_CRR]); regval[2] = CVAL(csc->m[CSC_CRG], csc->m[CSC_CRB]); regval[3] = CVAL(csc->m[CSC_CBR], csc->m[CSC_CBG]); regval[4] = CVAL(csc->m[CSC_CBB], 0); dispc_csc_offset_regval(csc, regval); } static void dispc_csc_cpr_regval(const struct dispc_csc_coef *csc, u32 *regval) { regval[0] = CVAL(csc->m[CSC_RR], csc->m[CSC_RG]); regval[1] = CVAL(csc->m[CSC_RB], csc->m[CSC_GR]); regval[2] = CVAL(csc->m[CSC_GG], csc->m[CSC_GB]); regval[3] = CVAL(csc->m[CSC_BR], csc->m[CSC_BG]); regval[4] = CVAL(csc->m[CSC_BB], 0); dispc_csc_offset_regval(csc, regval); } #undef CVAL static void dispc_k2g_vid_write_csc(struct dispc_device *dispc, u32 hw_plane, const struct dispc_csc_coef *csc) { static const u16 dispc_vid_csc_coef_reg[] = { DISPC_VID_CSC_COEF(0), DISPC_VID_CSC_COEF(1), DISPC_VID_CSC_COEF(2), DISPC_VID_CSC_COEF(3), DISPC_VID_CSC_COEF(4), DISPC_VID_CSC_COEF(5), DISPC_VID_CSC_COEF(6), /* K2G has no post offset support */ }; u32 regval[DISPC_CSC_REGVAL_LEN]; unsigned int i; csc->to_regval(csc, regval); if (regval[7] != 0) dev_warn(dispc->dev, "%s: No post offset support for %s\n", __func__, csc->name); for (i = 0; i < ARRAY_SIZE(dispc_vid_csc_coef_reg); i++) dispc_vid_write(dispc, hw_plane, dispc_vid_csc_coef_reg[i], regval[i]); } static void dispc_k3_vid_write_csc(struct dispc_device *dispc, u32 hw_plane, const struct dispc_csc_coef *csc) { static const u16 dispc_vid_csc_coef_reg[DISPC_CSC_REGVAL_LEN] = { DISPC_VID_CSC_COEF(0), DISPC_VID_CSC_COEF(1), DISPC_VID_CSC_COEF(2), DISPC_VID_CSC_COEF(3), DISPC_VID_CSC_COEF(4), DISPC_VID_CSC_COEF(5), DISPC_VID_CSC_COEF(6), DISPC_VID_CSC_COEF7, }; u32 regval[DISPC_CSC_REGVAL_LEN]; unsigned int i; csc->to_regval(csc, regval); for (i = 0; i < ARRAY_SIZE(dispc_vid_csc_coef_reg); i++) dispc_vid_write(dispc, hw_plane, dispc_vid_csc_coef_reg[i], regval[i]); } /* YUV -> RGB, ITU-R BT.601, full range */ static const struct dispc_csc_coef csc_yuv2rgb_bt601_full = { dispc_csc_yuv2rgb_regval, { 256, 0, 358, /* ry, rcb, rcr |1.000 0.000 1.402|*/ 256, -88, -182, /* gy, gcb, gcr |1.000 -0.344 -0.714|*/ 256, 452, 0, }, /* by, bcb, bcr |1.000 1.772 0.000|*/ { 0, -2048, -2048, }, /* full range */ { 0, 0, 0, }, CLIP_FULL_RANGE, "BT.601 Full", }; /* YUV -> RGB, ITU-R BT.601, limited range */ static const struct dispc_csc_coef csc_yuv2rgb_bt601_lim = { dispc_csc_yuv2rgb_regval, { 298, 0, 409, /* ry, rcb, rcr |1.164 0.000 1.596|*/ 298, -100, -208, /* gy, gcb, gcr |1.164 -0.392 -0.813|*/ 298, 516, 0, }, /* by, bcb, bcr |1.164 2.017 0.000|*/ { -256, -2048, -2048, }, /* limited range */ { 0, 0, 0, }, CLIP_FULL_RANGE, "BT.601 Limited", }; /* YUV -> RGB, ITU-R BT.709, full range */ static const struct dispc_csc_coef csc_yuv2rgb_bt709_full = { dispc_csc_yuv2rgb_regval, { 256, 0, 402, /* ry, rcb, rcr |1.000 0.000 1.570|*/ 256, -48, -120, /* gy, gcb, gcr |1.000 -0.187 -0.467|*/ 256, 475, 0, }, /* by, bcb, bcr |1.000 1.856 0.000|*/ { 0, -2048, -2048, }, /* full range */ { 0, 0, 0, }, CLIP_FULL_RANGE, "BT.709 Full", }; /* YUV -> RGB, ITU-R BT.709, limited range */ static const struct dispc_csc_coef csc_yuv2rgb_bt709_lim = { dispc_csc_yuv2rgb_regval, { 298, 0, 459, /* ry, rcb, rcr |1.164 0.000 1.793|*/ 298, -55, -136, /* gy, gcb, gcr |1.164 -0.213 -0.533|*/ 298, 541, 0, }, /* by, bcb, bcr |1.164 2.112 0.000|*/ { -256, -2048, -2048, }, /* limited range */ { 0, 0, 0, }, CLIP_FULL_RANGE, "BT.709 Limited", }; static const struct { enum drm_color_encoding encoding; enum drm_color_range range; const struct dispc_csc_coef *csc; } dispc_csc_table[] = { { DRM_COLOR_YCBCR_BT601, DRM_COLOR_YCBCR_FULL_RANGE, &csc_yuv2rgb_bt601_full, }, { DRM_COLOR_YCBCR_BT601, DRM_COLOR_YCBCR_LIMITED_RANGE, &csc_yuv2rgb_bt601_lim, }, { DRM_COLOR_YCBCR_BT709, DRM_COLOR_YCBCR_FULL_RANGE, &csc_yuv2rgb_bt709_full, }, { DRM_COLOR_YCBCR_BT709, DRM_COLOR_YCBCR_LIMITED_RANGE, &csc_yuv2rgb_bt709_lim, }, }; static const struct dispc_csc_coef *dispc_find_csc(enum drm_color_encoding encoding, enum drm_color_range range) { unsigned int i; for (i = 0; i < ARRAY_SIZE(dispc_csc_table); i++) { if (dispc_csc_table[i].encoding == encoding && dispc_csc_table[i].range == range) { return dispc_csc_table[i].csc; } } return NULL; } static void dispc_vid_csc_setup(struct dispc_device *dispc, u32 hw_plane, const struct drm_plane_state *state) { const struct dispc_csc_coef *coef; coef = dispc_find_csc(state->color_encoding, state->color_range); if (!coef) { dev_err(dispc->dev, "%s: CSC (%u,%u) not found\n", __func__, state->color_encoding, state->color_range); return; } if (dispc->feat->subrev == DISPC_K2G) dispc_k2g_vid_write_csc(dispc, hw_plane, coef); else dispc_k3_vid_write_csc(dispc, hw_plane, coef); } static void dispc_vid_csc_enable(struct dispc_device *dispc, u32 hw_plane, bool enable) { VID_REG_FLD_MOD(dispc, hw_plane, DISPC_VID_ATTRIBUTES, !!enable, 9, 9); } /* SCALER */ static u32 dispc_calc_fir_inc(u32 in, u32 out) { return (u32)div_u64(0x200000ull * in, out); } enum dispc_vid_fir_coef_set { DISPC_VID_FIR_COEF_HORIZ, DISPC_VID_FIR_COEF_HORIZ_UV, DISPC_VID_FIR_COEF_VERT, DISPC_VID_FIR_COEF_VERT_UV, }; static void dispc_vid_write_fir_coefs(struct dispc_device *dispc, u32 hw_plane, enum dispc_vid_fir_coef_set coef_set, const struct tidss_scale_coefs *coefs) { static const u16 c0_regs[] = { [DISPC_VID_FIR_COEF_HORIZ] = DISPC_VID_FIR_COEFS_H0, [DISPC_VID_FIR_COEF_HORIZ_UV] = DISPC_VID_FIR_COEFS_H0_C, [DISPC_VID_FIR_COEF_VERT] = DISPC_VID_FIR_COEFS_V0, [DISPC_VID_FIR_COEF_VERT_UV] = DISPC_VID_FIR_COEFS_V0_C, }; static const u16 c12_regs[] = { [DISPC_VID_FIR_COEF_HORIZ] = DISPC_VID_FIR_COEFS_H12, [DISPC_VID_FIR_COEF_HORIZ_UV] = DISPC_VID_FIR_COEFS_H12_C, [DISPC_VID_FIR_COEF_VERT] = DISPC_VID_FIR_COEFS_V12, [DISPC_VID_FIR_COEF_VERT_UV] = DISPC_VID_FIR_COEFS_V12_C, }; const u16 c0_base = c0_regs[coef_set]; const u16 c12_base = c12_regs[coef_set]; int phase; if (!coefs) { dev_err(dispc->dev, "%s: No coefficients given.\n", __func__); return; } for (phase = 0; phase <= 8; ++phase) { u16 reg = c0_base + phase * 4; u16 c0 = coefs->c0[phase]; dispc_vid_write(dispc, hw_plane, reg, c0); } for (phase = 0; phase <= 15; ++phase) { u16 reg = c12_base + phase * 4; s16 c1, c2; u32 c12; c1 = coefs->c1[phase]; c2 = coefs->c2[phase]; c12 = FLD_VAL(c1, 19, 10) | FLD_VAL(c2, 29, 20); dispc_vid_write(dispc, hw_plane, reg, c12); } } static bool dispc_fourcc_is_yuv(u32 fourcc) { switch (fourcc) { case DRM_FORMAT_YUYV: case DRM_FORMAT_UYVY: case DRM_FORMAT_NV12: return true; default: return false; } } struct dispc_scaling_params { int xinc, yinc; u32 in_w, in_h, in_w_uv, in_h_uv; u32 fir_xinc, fir_yinc, fir_xinc_uv, fir_yinc_uv; bool scale_x, scale_y; const struct tidss_scale_coefs *xcoef, *ycoef, *xcoef_uv, *ycoef_uv; bool five_taps; }; static int dispc_vid_calc_scaling(struct dispc_device *dispc, const struct drm_plane_state *state, struct dispc_scaling_params *sp, bool lite_plane) { const struct dispc_features_scaling *f = &dispc->feat->scaling; u32 fourcc = state->fb->format->format; u32 in_width_max_5tap = f->in_width_max_5tap_rgb; u32 in_width_max_3tap = f->in_width_max_3tap_rgb; u32 downscale_limit; u32 in_width_max; memset(sp, 0, sizeof(*sp)); sp->xinc = 1; sp->yinc = 1; sp->in_w = state->src_w >> 16; sp->in_w_uv = sp->in_w; sp->in_h = state->src_h >> 16; sp->in_h_uv = sp->in_h; sp->scale_x = sp->in_w != state->crtc_w; sp->scale_y = sp->in_h != state->crtc_h; if (dispc_fourcc_is_yuv(fourcc)) { in_width_max_5tap = f->in_width_max_5tap_yuv; in_width_max_3tap = f->in_width_max_3tap_yuv; sp->in_w_uv >>= 1; sp->scale_x = true; if (fourcc == DRM_FORMAT_NV12) { sp->in_h_uv >>= 1; sp->scale_y = true; } } /* Skip the rest if no scaling is used */ if ((!sp->scale_x && !sp->scale_y) || lite_plane) return 0; if (sp->in_w > in_width_max_5tap) { sp->five_taps = false; in_width_max = in_width_max_3tap; downscale_limit = f->downscale_limit_3tap; } else { sp->five_taps = true; in_width_max = in_width_max_5tap; downscale_limit = f->downscale_limit_5tap; } if (sp->scale_x) { sp->fir_xinc = dispc_calc_fir_inc(sp->in_w, state->crtc_w); if (sp->fir_xinc < dispc_calc_fir_inc(1, f->upscale_limit)) { dev_dbg(dispc->dev, "%s: X-scaling factor %u/%u > %u\n", __func__, state->crtc_w, state->src_w >> 16, f->upscale_limit); return -EINVAL; } if (sp->fir_xinc >= dispc_calc_fir_inc(downscale_limit, 1)) { sp->xinc = DIV_ROUND_UP(DIV_ROUND_UP(sp->in_w, state->crtc_w), downscale_limit); if (sp->xinc > f->xinc_max) { dev_dbg(dispc->dev, "%s: X-scaling factor %u/%u < 1/%u\n", __func__, state->crtc_w, state->src_w >> 16, downscale_limit * f->xinc_max); return -EINVAL; } sp->in_w = (state->src_w >> 16) / sp->xinc; } while (sp->in_w > in_width_max) { sp->xinc++; sp->in_w = (state->src_w >> 16) / sp->xinc; } if (sp->xinc > f->xinc_max) { dev_dbg(dispc->dev, "%s: Too wide input buffer %u > %u\n", __func__, state->src_w >> 16, in_width_max * f->xinc_max); return -EINVAL; } /* * We need even line length for YUV formats. Decimation * can lead to odd length, so we need to make it even * again. */ if (dispc_fourcc_is_yuv(fourcc)) sp->in_w &= ~1; sp->fir_xinc = dispc_calc_fir_inc(sp->in_w, state->crtc_w); } if (sp->scale_y) { sp->fir_yinc = dispc_calc_fir_inc(sp->in_h, state->crtc_h); if (sp->fir_yinc < dispc_calc_fir_inc(1, f->upscale_limit)) { dev_dbg(dispc->dev, "%s: Y-scaling factor %u/%u > %u\n", __func__, state->crtc_h, state->src_h >> 16, f->upscale_limit); return -EINVAL; } if (sp->fir_yinc >= dispc_calc_fir_inc(downscale_limit, 1)) { sp->yinc = DIV_ROUND_UP(DIV_ROUND_UP(sp->in_h, state->crtc_h), downscale_limit); sp->in_h /= sp->yinc; sp->fir_yinc = dispc_calc_fir_inc(sp->in_h, state->crtc_h); } } dev_dbg(dispc->dev, "%s: %ux%u decim %ux%u -> %ux%u firinc %u.%03ux%u.%03u taps %u -> %ux%u\n", __func__, state->src_w >> 16, state->src_h >> 16, sp->xinc, sp->yinc, sp->in_w, sp->in_h, sp->fir_xinc / 0x200000u, ((sp->fir_xinc & 0x1FFFFFu) * 999u) / 0x1FFFFFu, sp->fir_yinc / 0x200000u, ((sp->fir_yinc & 0x1FFFFFu) * 999u) / 0x1FFFFFu, sp->five_taps ? 5 : 3, state->crtc_w, state->crtc_h); if (dispc_fourcc_is_yuv(fourcc)) { if (sp->scale_x) { sp->in_w_uv /= sp->xinc; sp->fir_xinc_uv = dispc_calc_fir_inc(sp->in_w_uv, state->crtc_w); sp->xcoef_uv = tidss_get_scale_coefs(dispc->dev, sp->fir_xinc_uv, true); } if (sp->scale_y) { sp->in_h_uv /= sp->yinc; sp->fir_yinc_uv = dispc_calc_fir_inc(sp->in_h_uv, state->crtc_h); sp->ycoef_uv = tidss_get_scale_coefs(dispc->dev, sp->fir_yinc_uv, sp->five_taps); } } if (sp->scale_x) sp->xcoef = tidss_get_scale_coefs(dispc->dev, sp->fir_xinc, true); if (sp->scale_y) sp->ycoef = tidss_get_scale_coefs(dispc->dev, sp->fir_yinc, sp->five_taps); return 0; } static void dispc_vid_set_scaling(struct dispc_device *dispc, u32 hw_plane, struct dispc_scaling_params *sp, u32 fourcc) { /* HORIZONTAL RESIZE ENABLE */ VID_REG_FLD_MOD(dispc, hw_plane, DISPC_VID_ATTRIBUTES, sp->scale_x, 7, 7); /* VERTICAL RESIZE ENABLE */ VID_REG_FLD_MOD(dispc, hw_plane, DISPC_VID_ATTRIBUTES, sp->scale_y, 8, 8); /* Skip the rest if no scaling is used */ if (!sp->scale_x && !sp->scale_y) return; /* VERTICAL 5-TAPS */ VID_REG_FLD_MOD(dispc, hw_plane, DISPC_VID_ATTRIBUTES, sp->five_taps, 21, 21); if (dispc_fourcc_is_yuv(fourcc)) { if (sp->scale_x) { dispc_vid_write(dispc, hw_plane, DISPC_VID_FIRH2, sp->fir_xinc_uv); dispc_vid_write_fir_coefs(dispc, hw_plane, DISPC_VID_FIR_COEF_HORIZ_UV, sp->xcoef_uv); } if (sp->scale_y) { dispc_vid_write(dispc, hw_plane, DISPC_VID_FIRV2, sp->fir_yinc_uv); dispc_vid_write_fir_coefs(dispc, hw_plane, DISPC_VID_FIR_COEF_VERT_UV, sp->ycoef_uv); } } if (sp->scale_x) { dispc_vid_write(dispc, hw_plane, DISPC_VID_FIRH, sp->fir_xinc); dispc_vid_write_fir_coefs(dispc, hw_plane, DISPC_VID_FIR_COEF_HORIZ, sp->xcoef); } if (sp->scale_y) { dispc_vid_write(dispc, hw_plane, DISPC_VID_FIRV, sp->fir_yinc); dispc_vid_write_fir_coefs(dispc, hw_plane, DISPC_VID_FIR_COEF_VERT, sp->ycoef); } } /* OTHER */ static const struct { u32 fourcc; u8 dss_code; } dispc_color_formats[] = { { DRM_FORMAT_ARGB4444, 0x0, }, { DRM_FORMAT_ABGR4444, 0x1, }, { DRM_FORMAT_RGBA4444, 0x2, }, { DRM_FORMAT_RGB565, 0x3, }, { DRM_FORMAT_BGR565, 0x4, }, { DRM_FORMAT_ARGB1555, 0x5, }, { DRM_FORMAT_ABGR1555, 0x6, }, { DRM_FORMAT_ARGB8888, 0x7, }, { DRM_FORMAT_ABGR8888, 0x8, }, { DRM_FORMAT_RGBA8888, 0x9, }, { DRM_FORMAT_BGRA8888, 0xa, }, { DRM_FORMAT_RGB888, 0xb, }, { DRM_FORMAT_BGR888, 0xc, }, { DRM_FORMAT_ARGB2101010, 0xe, }, { DRM_FORMAT_ABGR2101010, 0xf, }, { DRM_FORMAT_XRGB4444, 0x20, }, { DRM_FORMAT_XBGR4444, 0x21, }, { DRM_FORMAT_RGBX4444, 0x22, }, { DRM_FORMAT_XRGB1555, 0x25, }, { DRM_FORMAT_XBGR1555, 0x26, }, { DRM_FORMAT_XRGB8888, 0x27, }, { DRM_FORMAT_XBGR8888, 0x28, }, { DRM_FORMAT_RGBX8888, 0x29, }, { DRM_FORMAT_BGRX8888, 0x2a, }, { DRM_FORMAT_XRGB2101010, 0x2e, }, { DRM_FORMAT_XBGR2101010, 0x2f, }, { DRM_FORMAT_YUYV, 0x3e, }, { DRM_FORMAT_UYVY, 0x3f, }, { DRM_FORMAT_NV12, 0x3d, }, }; static void dispc_plane_set_pixel_format(struct dispc_device *dispc, u32 hw_plane, u32 fourcc) { unsigned int i; for (i = 0; i < ARRAY_SIZE(dispc_color_formats); ++i) { if (dispc_color_formats[i].fourcc == fourcc) { VID_REG_FLD_MOD(dispc, hw_plane, DISPC_VID_ATTRIBUTES, dispc_color_formats[i].dss_code, 6, 1); return; } } WARN_ON(1); } const u32 *dispc_plane_formats(struct dispc_device *dispc, unsigned int *len) { WARN_ON(!dispc->fourccs); *len = dispc->num_fourccs; return dispc->fourccs; } static s32 pixinc(int pixels, u8 ps) { if (pixels == 1) return 1; else if (pixels > 1) return 1 + (pixels - 1) * ps; else if (pixels < 0) return 1 - (-pixels + 1) * ps; WARN_ON(1); return 0; } int dispc_plane_check(struct dispc_device *dispc, u32 hw_plane, const struct drm_plane_state *state, u32 hw_videoport) { bool lite = dispc->feat->vid_lite[hw_plane]; u32 fourcc = state->fb->format->format; bool need_scaling = state->src_w >> 16 != state->crtc_w || state->src_h >> 16 != state->crtc_h; struct dispc_scaling_params scaling; int ret; if (dispc_fourcc_is_yuv(fourcc)) { if (!dispc_find_csc(state->color_encoding, state->color_range)) { dev_dbg(dispc->dev, "%s: Unsupported CSC (%u,%u) for HW plane %u\n", __func__, state->color_encoding, state->color_range, hw_plane); return -EINVAL; } } if (need_scaling) { if (lite) { dev_dbg(dispc->dev, "%s: Lite plane %u can't scale %ux%u!=%ux%u\n", __func__, hw_plane, state->src_w >> 16, state->src_h >> 16, state->crtc_w, state->crtc_h); return -EINVAL; } ret = dispc_vid_calc_scaling(dispc, state, &scaling, false); if (ret) return ret; } return 0; } static dma_addr_t dispc_plane_state_dma_addr(const struct drm_plane_state *state) { struct drm_framebuffer *fb = state->fb; struct drm_gem_dma_object *gem; u32 x = state->src_x >> 16; u32 y = state->src_y >> 16; gem = drm_fb_dma_get_gem_obj(state->fb, 0); return gem->dma_addr + fb->offsets[0] + x * fb->format->cpp[0] + y * fb->pitches[0]; } static dma_addr_t dispc_plane_state_p_uv_addr(const struct drm_plane_state *state) { struct drm_framebuffer *fb = state->fb; struct drm_gem_dma_object *gem; u32 x = state->src_x >> 16; u32 y = state->src_y >> 16; if (WARN_ON(state->fb->format->num_planes != 2)) return 0; gem = drm_fb_dma_get_gem_obj(fb, 1); return gem->dma_addr + fb->offsets[1] + (x * fb->format->cpp[1] / fb->format->hsub) + (y * fb->pitches[1] / fb->format->vsub); } void dispc_plane_setup(struct dispc_device *dispc, u32 hw_plane, const struct drm_plane_state *state, u32 hw_videoport) { bool lite = dispc->feat->vid_lite[hw_plane]; u32 fourcc = state->fb->format->format; u16 cpp = state->fb->format->cpp[0]; u32 fb_width = state->fb->pitches[0] / cpp; dma_addr_t dma_addr = dispc_plane_state_dma_addr(state); struct dispc_scaling_params scale; dispc_vid_calc_scaling(dispc, state, &scale, lite); dispc_plane_set_pixel_format(dispc, hw_plane, fourcc); dispc_vid_write(dispc, hw_plane, DISPC_VID_BA_0, dma_addr & 0xffffffff); dispc_vid_write(dispc, hw_plane, DISPC_VID_BA_EXT_0, (u64)dma_addr >> 32); dispc_vid_write(dispc, hw_plane, DISPC_VID_BA_1, dma_addr & 0xffffffff); dispc_vid_write(dispc, hw_plane, DISPC_VID_BA_EXT_1, (u64)dma_addr >> 32); dispc_vid_write(dispc, hw_plane, DISPC_VID_PICTURE_SIZE, (scale.in_w - 1) | ((scale.in_h - 1) << 16)); /* For YUV422 format we use the macropixel size for pixel inc */ if (fourcc == DRM_FORMAT_YUYV || fourcc == DRM_FORMAT_UYVY) dispc_vid_write(dispc, hw_plane, DISPC_VID_PIXEL_INC, pixinc(scale.xinc, cpp * 2)); else dispc_vid_write(dispc, hw_plane, DISPC_VID_PIXEL_INC, pixinc(scale.xinc, cpp)); dispc_vid_write(dispc, hw_plane, DISPC_VID_ROW_INC, pixinc(1 + (scale.yinc * fb_width - scale.xinc * scale.in_w), cpp)); if (state->fb->format->num_planes == 2) { u16 cpp_uv = state->fb->format->cpp[1]; u32 fb_width_uv = state->fb->pitches[1] / cpp_uv; dma_addr_t p_uv_addr = dispc_plane_state_p_uv_addr(state); dispc_vid_write(dispc, hw_plane, DISPC_VID_BA_UV_0, p_uv_addr & 0xffffffff); dispc_vid_write(dispc, hw_plane, DISPC_VID_BA_UV_EXT_0, (u64)p_uv_addr >> 32); dispc_vid_write(dispc, hw_plane, DISPC_VID_BA_UV_1, p_uv_addr & 0xffffffff); dispc_vid_write(dispc, hw_plane, DISPC_VID_BA_UV_EXT_1, (u64)p_uv_addr >> 32); dispc_vid_write(dispc, hw_plane, DISPC_VID_ROW_INC_UV, pixinc(1 + (scale.yinc * fb_width_uv - scale.xinc * scale.in_w_uv), cpp_uv)); } if (!lite) { dispc_vid_write(dispc, hw_plane, DISPC_VID_SIZE, (state->crtc_w - 1) | ((state->crtc_h - 1) << 16)); dispc_vid_set_scaling(dispc, hw_plane, &scale, fourcc); } /* enable YUV->RGB color conversion */ if (dispc_fourcc_is_yuv(fourcc)) { dispc_vid_csc_setup(dispc, hw_plane, state); dispc_vid_csc_enable(dispc, hw_plane, true); } else { dispc_vid_csc_enable(dispc, hw_plane, false); } dispc_vid_write(dispc, hw_plane, DISPC_VID_GLOBAL_ALPHA, 0xFF & (state->alpha >> 8)); if (state->pixel_blend_mode == DRM_MODE_BLEND_PREMULTI) VID_REG_FLD_MOD(dispc, hw_plane, DISPC_VID_ATTRIBUTES, 1, 28, 28); else VID_REG_FLD_MOD(dispc, hw_plane, DISPC_VID_ATTRIBUTES, 0, 28, 28); } void dispc_plane_enable(struct dispc_device *dispc, u32 hw_plane, bool enable) { VID_REG_FLD_MOD(dispc, hw_plane, DISPC_VID_ATTRIBUTES, !!enable, 0, 0); } static u32 dispc_vid_get_fifo_size(struct dispc_device *dispc, u32 hw_plane) { return VID_REG_GET(dispc, hw_plane, DISPC_VID_BUF_SIZE_STATUS, 15, 0); } static void dispc_vid_set_mflag_threshold(struct dispc_device *dispc, u32 hw_plane, u32 low, u32 high) { dispc_vid_write(dispc, hw_plane, DISPC_VID_MFLAG_THRESHOLD, FLD_VAL(high, 31, 16) | FLD_VAL(low, 15, 0)); } static void dispc_vid_set_buf_threshold(struct dispc_device *dispc, u32 hw_plane, u32 low, u32 high) { dispc_vid_write(dispc, hw_plane, DISPC_VID_BUF_THRESHOLD, FLD_VAL(high, 31, 16) | FLD_VAL(low, 15, 0)); } static void dispc_k2g_plane_init(struct dispc_device *dispc) { unsigned int hw_plane; dev_dbg(dispc->dev, "%s()\n", __func__); /* MFLAG_CTRL = ENABLED */ REG_FLD_MOD(dispc, DISPC_GLOBAL_MFLAG_ATTRIBUTE, 2, 1, 0); /* MFLAG_START = MFLAGNORMALSTARTMODE */ REG_FLD_MOD(dispc, DISPC_GLOBAL_MFLAG_ATTRIBUTE, 0, 6, 6); for (hw_plane = 0; hw_plane < dispc->feat->num_planes; hw_plane++) { u32 size = dispc_vid_get_fifo_size(dispc, hw_plane); u32 thr_low, thr_high; u32 mflag_low, mflag_high; u32 preload; thr_high = size - 1; thr_low = size / 2; mflag_high = size * 2 / 3; mflag_low = size / 3; preload = thr_low; dev_dbg(dispc->dev, "%s: bufsize %u, buf_threshold %u/%u, mflag threshold %u/%u preload %u\n", dispc->feat->vid_name[hw_plane], size, thr_high, thr_low, mflag_high, mflag_low, preload); dispc_vid_set_buf_threshold(dispc, hw_plane, thr_low, thr_high); dispc_vid_set_mflag_threshold(dispc, hw_plane, mflag_low, mflag_high); dispc_vid_write(dispc, hw_plane, DISPC_VID_PRELOAD, preload); /* * Prefetch up to fifo high-threshold value to minimize the * possibility of underflows. Note that this means the PRELOAD * register is ignored. */ VID_REG_FLD_MOD(dispc, hw_plane, DISPC_VID_ATTRIBUTES, 1, 19, 19); } } static void dispc_k3_plane_init(struct dispc_device *dispc) { unsigned int hw_plane; u32 cba_lo_pri = 1; u32 cba_hi_pri = 0; dev_dbg(dispc->dev, "%s()\n", __func__); REG_FLD_MOD(dispc, DSS_CBA_CFG, cba_lo_pri, 2, 0); REG_FLD_MOD(dispc, DSS_CBA_CFG, cba_hi_pri, 5, 3); /* MFLAG_CTRL = ENABLED */ REG_FLD_MOD(dispc, DISPC_GLOBAL_MFLAG_ATTRIBUTE, 2, 1, 0); /* MFLAG_START = MFLAGNORMALSTARTMODE */ REG_FLD_MOD(dispc, DISPC_GLOBAL_MFLAG_ATTRIBUTE, 0, 6, 6); for (hw_plane = 0; hw_plane < dispc->feat->num_planes; hw_plane++) { u32 size = dispc_vid_get_fifo_size(dispc, hw_plane); u32 thr_low, thr_high; u32 mflag_low, mflag_high; u32 preload; thr_high = size - 1; thr_low = size / 2; mflag_high = size * 2 / 3; mflag_low = size / 3; preload = thr_low; dev_dbg(dispc->dev, "%s: bufsize %u, buf_threshold %u/%u, mflag threshold %u/%u preload %u\n", dispc->feat->vid_name[hw_plane], size, thr_high, thr_low, mflag_high, mflag_low, preload); dispc_vid_set_buf_threshold(dispc, hw_plane, thr_low, thr_high); dispc_vid_set_mflag_threshold(dispc, hw_plane, mflag_low, mflag_high); dispc_vid_write(dispc, hw_plane, DISPC_VID_PRELOAD, preload); /* Prefech up to PRELOAD value */ VID_REG_FLD_MOD(dispc, hw_plane, DISPC_VID_ATTRIBUTES, 0, 19, 19); } } static void dispc_plane_init(struct dispc_device *dispc) { switch (dispc->feat->subrev) { case DISPC_K2G: dispc_k2g_plane_init(dispc); break; case DISPC_AM625: case DISPC_AM65X: case DISPC_J721E: dispc_k3_plane_init(dispc); break; default: WARN_ON(1); } } static void dispc_vp_init(struct dispc_device *dispc) { unsigned int i; dev_dbg(dispc->dev, "%s()\n", __func__); /* Enable the gamma Shadow bit-field for all VPs*/ for (i = 0; i < dispc->feat->num_vps; i++) VP_REG_FLD_MOD(dispc, i, DISPC_VP_CONFIG, 1, 2, 2); } static void dispc_initial_config(struct dispc_device *dispc) { dispc_plane_init(dispc); dispc_vp_init(dispc); /* Note: Hardcoded DPI routing on J721E for now */ if (dispc->feat->subrev == DISPC_J721E) { dispc_write(dispc, DISPC_CONNECTIONS, FLD_VAL(2, 3, 0) | /* VP1 to DPI0 */ FLD_VAL(8, 7, 4) /* VP3 to DPI1 */ ); } } static void dispc_k2g_vp_write_gamma_table(struct dispc_device *dispc, u32 hw_videoport) { u32 *table = dispc->vp_data[hw_videoport].gamma_table; u32 hwlen = dispc->feat->vp_feat.color.gamma_size; unsigned int i; dev_dbg(dispc->dev, "%s: hw_videoport %d\n", __func__, hw_videoport); if (WARN_ON(dispc->feat->vp_feat.color.gamma_type != TIDSS_GAMMA_8BIT)) return; for (i = 0; i < hwlen; ++i) { u32 v = table[i]; v |= i << 24; dispc_vp_write(dispc, hw_videoport, DISPC_VP_K2G_GAMMA_TABLE, v); } } static void dispc_am65x_vp_write_gamma_table(struct dispc_device *dispc, u32 hw_videoport) { u32 *table = dispc->vp_data[hw_videoport].gamma_table; u32 hwlen = dispc->feat->vp_feat.color.gamma_size; unsigned int i; dev_dbg(dispc->dev, "%s: hw_videoport %d\n", __func__, hw_videoport); if (WARN_ON(dispc->feat->vp_feat.color.gamma_type != TIDSS_GAMMA_8BIT)) return; for (i = 0; i < hwlen; ++i) { u32 v = table[i]; v |= i << 24; dispc_vp_write(dispc, hw_videoport, DISPC_VP_GAMMA_TABLE, v); } } static void dispc_j721e_vp_write_gamma_table(struct dispc_device *dispc, u32 hw_videoport) { u32 *table = dispc->vp_data[hw_videoport].gamma_table; u32 hwlen = dispc->feat->vp_feat.color.gamma_size; unsigned int i; dev_dbg(dispc->dev, "%s: hw_videoport %d\n", __func__, hw_videoport); if (WARN_ON(dispc->feat->vp_feat.color.gamma_type != TIDSS_GAMMA_10BIT)) return; for (i = 0; i < hwlen; ++i) { u32 v = table[i]; if (i == 0) v |= 1 << 31; dispc_vp_write(dispc, hw_videoport, DISPC_VP_GAMMA_TABLE, v); } } static void dispc_vp_write_gamma_table(struct dispc_device *dispc, u32 hw_videoport) { switch (dispc->feat->subrev) { case DISPC_K2G: dispc_k2g_vp_write_gamma_table(dispc, hw_videoport); break; case DISPC_AM625: case DISPC_AM65X: dispc_am65x_vp_write_gamma_table(dispc, hw_videoport); break; case DISPC_J721E: dispc_j721e_vp_write_gamma_table(dispc, hw_videoport); break; default: WARN_ON(1); break; } } static const struct drm_color_lut dispc_vp_gamma_default_lut[] = { { .red = 0, .green = 0, .blue = 0, }, { .red = U16_MAX, .green = U16_MAX, .blue = U16_MAX, }, }; static void dispc_vp_set_gamma(struct dispc_device *dispc, u32 hw_videoport, const struct drm_color_lut *lut, unsigned int length) { u32 *table = dispc->vp_data[hw_videoport].gamma_table; u32 hwlen = dispc->feat->vp_feat.color.gamma_size; u32 hwbits; unsigned int i; dev_dbg(dispc->dev, "%s: hw_videoport %d, lut len %u, hw len %u\n", __func__, hw_videoport, length, hwlen); if (dispc->feat->vp_feat.color.gamma_type == TIDSS_GAMMA_10BIT) hwbits = 10; else hwbits = 8; if (!lut || length < 2) { lut = dispc_vp_gamma_default_lut; length = ARRAY_SIZE(dispc_vp_gamma_default_lut); } for (i = 0; i < length - 1; ++i) { unsigned int first = i * (hwlen - 1) / (length - 1); unsigned int last = (i + 1) * (hwlen - 1) / (length - 1); unsigned int w = last - first; u16 r, g, b; unsigned int j; if (w == 0) continue; for (j = 0; j <= w; j++) { r = (lut[i].red * (w - j) + lut[i + 1].red * j) / w; g = (lut[i].green * (w - j) + lut[i + 1].green * j) / w; b = (lut[i].blue * (w - j) + lut[i + 1].blue * j) / w; r >>= 16 - hwbits; g >>= 16 - hwbits; b >>= 16 - hwbits; table[first + j] = (r << (hwbits * 2)) | (g << hwbits) | b; } } dispc_vp_write_gamma_table(dispc, hw_videoport); } static s16 dispc_S31_32_to_s2_8(s64 coef) { u64 sign_bit = 1ULL << 63; u64 cbits = (u64)coef; s16 ret; if (cbits & sign_bit) ret = -clamp_val(((cbits & ~sign_bit) >> 24), 0, 0x200); else ret = clamp_val(((cbits & ~sign_bit) >> 24), 0, 0x1FF); return ret; } static void dispc_k2g_cpr_from_ctm(const struct drm_color_ctm *ctm, struct dispc_csc_coef *cpr) { memset(cpr, 0, sizeof(*cpr)); cpr->to_regval = dispc_csc_cpr_regval; cpr->m[CSC_RR] = dispc_S31_32_to_s2_8(ctm->matrix[0]); cpr->m[CSC_RG] = dispc_S31_32_to_s2_8(ctm->matrix[1]); cpr->m[CSC_RB] = dispc_S31_32_to_s2_8(ctm->matrix[2]); cpr->m[CSC_GR] = dispc_S31_32_to_s2_8(ctm->matrix[3]); cpr->m[CSC_GG] = dispc_S31_32_to_s2_8(ctm->matrix[4]); cpr->m[CSC_GB] = dispc_S31_32_to_s2_8(ctm->matrix[5]); cpr->m[CSC_BR] = dispc_S31_32_to_s2_8(ctm->matrix[6]); cpr->m[CSC_BG] = dispc_S31_32_to_s2_8(ctm->matrix[7]); cpr->m[CSC_BB] = dispc_S31_32_to_s2_8(ctm->matrix[8]); } #define CVAL(xR, xG, xB) (FLD_VAL(xR, 9, 0) | FLD_VAL(xG, 20, 11) | \ FLD_VAL(xB, 31, 22)) static void dispc_k2g_vp_csc_cpr_regval(const struct dispc_csc_coef *csc, u32 *regval) { regval[0] = CVAL(csc->m[CSC_BB], csc->m[CSC_BG], csc->m[CSC_BR]); regval[1] = CVAL(csc->m[CSC_GB], csc->m[CSC_GG], csc->m[CSC_GR]); regval[2] = CVAL(csc->m[CSC_RB], csc->m[CSC_RG], csc->m[CSC_RR]); } #undef CVAL static void dispc_k2g_vp_write_csc(struct dispc_device *dispc, u32 hw_videoport, const struct dispc_csc_coef *csc) { static const u16 dispc_vp_cpr_coef_reg[] = { DISPC_VP_CSC_COEF0, DISPC_VP_CSC_COEF1, DISPC_VP_CSC_COEF2, /* K2G CPR is packed to three registers. */ }; u32 regval[DISPC_CSC_REGVAL_LEN]; unsigned int i; dispc_k2g_vp_csc_cpr_regval(csc, regval); for (i = 0; i < ARRAY_SIZE(dispc_vp_cpr_coef_reg); i++) dispc_vp_write(dispc, hw_videoport, dispc_vp_cpr_coef_reg[i], regval[i]); } static void dispc_k2g_vp_set_ctm(struct dispc_device *dispc, u32 hw_videoport, struct drm_color_ctm *ctm) { u32 cprenable = 0; if (ctm) { struct dispc_csc_coef cpr; dispc_k2g_cpr_from_ctm(ctm, &cpr); dispc_k2g_vp_write_csc(dispc, hw_videoport, &cpr); cprenable = 1; } VP_REG_FLD_MOD(dispc, hw_videoport, DISPC_VP_CONFIG, cprenable, 15, 15); } static s16 dispc_S31_32_to_s3_8(s64 coef) { u64 sign_bit = 1ULL << 63; u64 cbits = (u64)coef; s16 ret; if (cbits & sign_bit) ret = -clamp_val(((cbits & ~sign_bit) >> 24), 0, 0x400); else ret = clamp_val(((cbits & ~sign_bit) >> 24), 0, 0x3FF); return ret; } static void dispc_csc_from_ctm(const struct drm_color_ctm *ctm, struct dispc_csc_coef *cpr) { memset(cpr, 0, sizeof(*cpr)); cpr->to_regval = dispc_csc_cpr_regval; cpr->m[CSC_RR] = dispc_S31_32_to_s3_8(ctm->matrix[0]); cpr->m[CSC_RG] = dispc_S31_32_to_s3_8(ctm->matrix[1]); cpr->m[CSC_RB] = dispc_S31_32_to_s3_8(ctm->matrix[2]); cpr->m[CSC_GR] = dispc_S31_32_to_s3_8(ctm->matrix[3]); cpr->m[CSC_GG] = dispc_S31_32_to_s3_8(ctm->matrix[4]); cpr->m[CSC_GB] = dispc_S31_32_to_s3_8(ctm->matrix[5]); cpr->m[CSC_BR] = dispc_S31_32_to_s3_8(ctm->matrix[6]); cpr->m[CSC_BG] = dispc_S31_32_to_s3_8(ctm->matrix[7]); cpr->m[CSC_BB] = dispc_S31_32_to_s3_8(ctm->matrix[8]); } static void dispc_k3_vp_write_csc(struct dispc_device *dispc, u32 hw_videoport, const struct dispc_csc_coef *csc) { static const u16 dispc_vp_csc_coef_reg[DISPC_CSC_REGVAL_LEN] = { DISPC_VP_CSC_COEF0, DISPC_VP_CSC_COEF1, DISPC_VP_CSC_COEF2, DISPC_VP_CSC_COEF3, DISPC_VP_CSC_COEF4, DISPC_VP_CSC_COEF5, DISPC_VP_CSC_COEF6, DISPC_VP_CSC_COEF7, }; u32 regval[DISPC_CSC_REGVAL_LEN]; unsigned int i; csc->to_regval(csc, regval); for (i = 0; i < ARRAY_SIZE(regval); i++) dispc_vp_write(dispc, hw_videoport, dispc_vp_csc_coef_reg[i], regval[i]); } static void dispc_k3_vp_set_ctm(struct dispc_device *dispc, u32 hw_videoport, struct drm_color_ctm *ctm) { u32 colorconvenable = 0; if (ctm) { struct dispc_csc_coef csc; dispc_csc_from_ctm(ctm, &csc); dispc_k3_vp_write_csc(dispc, hw_videoport, &csc); colorconvenable = 1; } VP_REG_FLD_MOD(dispc, hw_videoport, DISPC_VP_CONFIG, colorconvenable, 24, 24); } static void dispc_vp_set_color_mgmt(struct dispc_device *dispc, u32 hw_videoport, const struct drm_crtc_state *state, bool newmodeset) { struct drm_color_lut *lut = NULL; struct drm_color_ctm *ctm = NULL; unsigned int length = 0; if (!(state->color_mgmt_changed || newmodeset)) return; if (state->gamma_lut) { lut = (struct drm_color_lut *)state->gamma_lut->data; length = state->gamma_lut->length / sizeof(*lut); } dispc_vp_set_gamma(dispc, hw_videoport, lut, length); if (state->ctm) ctm = (struct drm_color_ctm *)state->ctm->data; if (dispc->feat->subrev == DISPC_K2G) dispc_k2g_vp_set_ctm(dispc, hw_videoport, ctm); else dispc_k3_vp_set_ctm(dispc, hw_videoport, ctm); } void dispc_vp_setup(struct dispc_device *dispc, u32 hw_videoport, const struct drm_crtc_state *state, bool newmodeset) { dispc_vp_set_default_color(dispc, hw_videoport, 0); dispc_vp_set_color_mgmt(dispc, hw_videoport, state, newmodeset); } int dispc_runtime_suspend(struct dispc_device *dispc) { dev_dbg(dispc->dev, "suspend\n"); dispc->is_enabled = false; clk_disable_unprepare(dispc->fclk); return 0; } int dispc_runtime_resume(struct dispc_device *dispc) { dev_dbg(dispc->dev, "resume\n"); clk_prepare_enable(dispc->fclk); if (REG_GET(dispc, DSS_SYSSTATUS, 0, 0) == 0) dev_warn(dispc->dev, "DSS FUNC RESET not done!\n"); dev_dbg(dispc->dev, "OMAP DSS7 rev 0x%x\n", dispc_read(dispc, DSS_REVISION)); dev_dbg(dispc->dev, "VP RESETDONE %d,%d,%d\n", REG_GET(dispc, DSS_SYSSTATUS, 1, 1), REG_GET(dispc, DSS_SYSSTATUS, 2, 2), REG_GET(dispc, DSS_SYSSTATUS, 3, 3)); if (dispc->feat->subrev == DISPC_AM625 || dispc->feat->subrev == DISPC_AM65X) dev_dbg(dispc->dev, "OLDI RESETDONE %d,%d,%d\n", REG_GET(dispc, DSS_SYSSTATUS, 5, 5), REG_GET(dispc, DSS_SYSSTATUS, 6, 6), REG_GET(dispc, DSS_SYSSTATUS, 7, 7)); dev_dbg(dispc->dev, "DISPC IDLE %d\n", REG_GET(dispc, DSS_SYSSTATUS, 9, 9)); dispc_initial_config(dispc); dispc->is_enabled = true; tidss_irq_resume(dispc->tidss); return 0; } void dispc_remove(struct tidss_device *tidss) { dev_dbg(tidss->dev, "%s\n", __func__); tidss->dispc = NULL; } static int dispc_iomap_resource(struct platform_device *pdev, const char *name, void __iomem **base) { void __iomem *b; b = devm_platform_ioremap_resource_byname(pdev, name); if (IS_ERR(b)) { dev_err(&pdev->dev, "cannot ioremap resource '%s'\n", name); return PTR_ERR(b); } *base = b; return 0; } static int dispc_init_am65x_oldi_io_ctrl(struct device *dev, struct dispc_device *dispc) { dispc->oldi_io_ctrl = syscon_regmap_lookup_by_phandle(dev->of_node, "ti,am65x-oldi-io-ctrl"); if (PTR_ERR(dispc->oldi_io_ctrl) == -ENODEV) { dispc->oldi_io_ctrl = NULL; } else if (IS_ERR(dispc->oldi_io_ctrl)) { dev_err(dev, "%s: syscon_regmap_lookup_by_phandle failed %ld\n", __func__, PTR_ERR(dispc->oldi_io_ctrl)); return PTR_ERR(dispc->oldi_io_ctrl); } return 0; } static void dispc_init_errata(struct dispc_device *dispc) { static const struct soc_device_attribute am65x_sr10_soc_devices[] = { { .family = "AM65X", .revision = "SR1.0" }, { /* sentinel */ } }; if (soc_device_match(am65x_sr10_soc_devices)) { dispc->errata.i2000 = true; dev_info(dispc->dev, "WA for erratum i2000: YUV formats disabled\n"); } } static void dispc_softreset(struct dispc_device *dispc) { u32 val; int ret = 0; /* Soft reset */ REG_FLD_MOD(dispc, DSS_SYSCONFIG, 1, 1, 1); /* Wait for reset to complete */ ret = readl_poll_timeout(dispc->base_common + DSS_SYSSTATUS, val, val & 1, 100, 5000); if (ret) dev_warn(dispc->dev, "failed to reset dispc\n"); } int dispc_init(struct tidss_device *tidss) { struct device *dev = tidss->dev; struct platform_device *pdev = to_platform_device(dev); struct dispc_device *dispc; const struct dispc_features *feat; unsigned int i, num_fourccs; int r = 0; dev_dbg(dev, "%s\n", __func__); feat = tidss->feat; if (feat->subrev != DISPC_K2G) { r = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(48)); if (r) dev_warn(dev, "cannot set DMA masks to 48-bit\n"); } dma_set_max_seg_size(dev, UINT_MAX); dispc = devm_kzalloc(dev, sizeof(*dispc), GFP_KERNEL); if (!dispc) return -ENOMEM; dispc->tidss = tidss; dispc->dev = dev; dispc->feat = feat; dispc_init_errata(dispc); dispc->fourccs = devm_kcalloc(dev, ARRAY_SIZE(dispc_color_formats), sizeof(*dispc->fourccs), GFP_KERNEL); if (!dispc->fourccs) return -ENOMEM; num_fourccs = 0; for (i = 0; i < ARRAY_SIZE(dispc_color_formats); ++i) { if (dispc->errata.i2000 && dispc_fourcc_is_yuv(dispc_color_formats[i].fourcc)) { continue; } dispc->fourccs[num_fourccs++] = dispc_color_formats[i].fourcc; } dispc->num_fourccs = num_fourccs; dispc_common_regmap = dispc->feat->common_regs; r = dispc_iomap_resource(pdev, dispc->feat->common, &dispc->base_common); if (r) return r; for (i = 0; i < dispc->feat->num_planes; i++) { r = dispc_iomap_resource(pdev, dispc->feat->vid_name[i], &dispc->base_vid[i]); if (r) return r; } /* K2G display controller does not support soft reset */ if (feat->subrev != DISPC_K2G) dispc_softreset(dispc); for (i = 0; i < dispc->feat->num_vps; i++) { u32 gamma_size = dispc->feat->vp_feat.color.gamma_size; u32 *gamma_table; struct clk *clk; r = dispc_iomap_resource(pdev, dispc->feat->ovr_name[i], &dispc->base_ovr[i]); if (r) return r; r = dispc_iomap_resource(pdev, dispc->feat->vp_name[i], &dispc->base_vp[i]); if (r) return r; clk = devm_clk_get(dev, dispc->feat->vpclk_name[i]); if (IS_ERR(clk)) { dev_err(dev, "%s: Failed to get clk %s:%ld\n", __func__, dispc->feat->vpclk_name[i], PTR_ERR(clk)); return PTR_ERR(clk); } dispc->vp_clk[i] = clk; gamma_table = devm_kmalloc_array(dev, gamma_size, sizeof(*gamma_table), GFP_KERNEL); if (!gamma_table) return -ENOMEM; dispc->vp_data[i].gamma_table = gamma_table; } if (feat->subrev == DISPC_AM65X) { r = dispc_init_am65x_oldi_io_ctrl(dev, dispc); if (r) return r; } dispc->fclk = devm_clk_get(dev, "fck"); if (IS_ERR(dispc->fclk)) { dev_err(dev, "%s: Failed to get fclk: %ld\n", __func__, PTR_ERR(dispc->fclk)); return PTR_ERR(dispc->fclk); } dev_dbg(dev, "DSS fclk %lu Hz\n", clk_get_rate(dispc->fclk)); of_property_read_u32(dispc->dev->of_node, "max-memory-bandwidth", &dispc->memory_bandwidth_limit); tidss->dispc = dispc; return 0; }
linux-master
drivers/gpu/drm/tidss/tidss_dispc.c
// SPDX-License-Identifier: GPL-2.0 /* * Copyright (C) 2018 Texas Instruments Incorporated - https://www.ti.com/ * Author: Tomi Valkeinen <[email protected]> */ #include <drm/drm_atomic.h> #include <drm/drm_atomic_helper.h> #include <drm/drm_crtc.h> #include <drm/drm_gem_dma_helper.h> #include <drm/drm_vblank.h> #include "tidss_crtc.h" #include "tidss_dispc.h" #include "tidss_drv.h" #include "tidss_irq.h" #include "tidss_plane.h" /* Page flip and frame done IRQs */ static void tidss_crtc_finish_page_flip(struct tidss_crtc *tcrtc) { struct drm_device *ddev = tcrtc->crtc.dev; struct tidss_device *tidss = to_tidss(ddev); struct drm_pending_vblank_event *event; unsigned long flags; bool busy; spin_lock_irqsave(&ddev->event_lock, flags); /* * New settings are taken into use at VFP, and GO bit is cleared at * the same time. This happens before the vertical blank interrupt. * So there is a small change that the driver sets GO bit after VFP, but * before vblank, and we have to check for that case here. */ busy = dispc_vp_go_busy(tidss->dispc, tcrtc->hw_videoport); if (busy) { spin_unlock_irqrestore(&ddev->event_lock, flags); return; } event = tcrtc->event; tcrtc->event = NULL; if (!event) { spin_unlock_irqrestore(&ddev->event_lock, flags); return; } drm_crtc_send_vblank_event(&tcrtc->crtc, event); spin_unlock_irqrestore(&ddev->event_lock, flags); drm_crtc_vblank_put(&tcrtc->crtc); } void tidss_crtc_vblank_irq(struct drm_crtc *crtc) { struct tidss_crtc *tcrtc = to_tidss_crtc(crtc); drm_crtc_handle_vblank(crtc); tidss_crtc_finish_page_flip(tcrtc); } void tidss_crtc_framedone_irq(struct drm_crtc *crtc) { struct tidss_crtc *tcrtc = to_tidss_crtc(crtc); complete(&tcrtc->framedone_completion); } void tidss_crtc_error_irq(struct drm_crtc *crtc, u64 irqstatus) { struct tidss_crtc *tcrtc = to_tidss_crtc(crtc); dev_err_ratelimited(crtc->dev->dev, "CRTC%u SYNC LOST: (irq %llx)\n", tcrtc->hw_videoport, irqstatus); } /* drm_crtc_helper_funcs */ static int tidss_crtc_atomic_check(struct drm_crtc *crtc, struct drm_atomic_state *state) { struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state, crtc); struct drm_device *ddev = crtc->dev; struct tidss_device *tidss = to_tidss(ddev); struct dispc_device *dispc = tidss->dispc; struct tidss_crtc *tcrtc = to_tidss_crtc(crtc); u32 hw_videoport = tcrtc->hw_videoport; const struct drm_display_mode *mode; enum drm_mode_status ok; dev_dbg(ddev->dev, "%s\n", __func__); if (!crtc_state->enable) return 0; mode = &crtc_state->adjusted_mode; ok = dispc_vp_mode_valid(dispc, hw_videoport, mode); if (ok != MODE_OK) { dev_dbg(ddev->dev, "%s: bad mode: %ux%u pclk %u kHz\n", __func__, mode->hdisplay, mode->vdisplay, mode->clock); return -EINVAL; } return dispc_vp_bus_check(dispc, hw_videoport, crtc_state); } /* * This needs all affected planes to be present in the atomic * state. The untouched planes are added to the state in * tidss_atomic_check(). */ static void tidss_crtc_position_planes(struct tidss_device *tidss, struct drm_crtc *crtc, struct drm_crtc_state *old_state, bool newmodeset) { struct drm_atomic_state *ostate = old_state->state; struct tidss_crtc *tcrtc = to_tidss_crtc(crtc); struct drm_crtc_state *cstate = crtc->state; int layer; if (!newmodeset && !cstate->zpos_changed && !to_tidss_crtc_state(cstate)->plane_pos_changed) return; for (layer = 0; layer < tidss->feat->num_planes; layer++) { struct drm_plane_state *pstate; struct drm_plane *plane; bool layer_active = false; int i; for_each_new_plane_in_state(ostate, plane, pstate, i) { if (pstate->crtc != crtc || !pstate->visible) continue; if (pstate->normalized_zpos == layer) { layer_active = true; break; } } if (layer_active) { struct tidss_plane *tplane = to_tidss_plane(plane); dispc_ovr_set_plane(tidss->dispc, tplane->hw_plane_id, tcrtc->hw_videoport, pstate->crtc_x, pstate->crtc_y, layer); } dispc_ovr_enable_layer(tidss->dispc, tcrtc->hw_videoport, layer, layer_active); } } static void tidss_crtc_atomic_flush(struct drm_crtc *crtc, struct drm_atomic_state *state) { struct drm_crtc_state *old_crtc_state = drm_atomic_get_old_crtc_state(state, crtc); struct tidss_crtc *tcrtc = to_tidss_crtc(crtc); struct drm_device *ddev = crtc->dev; struct tidss_device *tidss = to_tidss(ddev); unsigned long flags; dev_dbg(ddev->dev, "%s: %s enabled %d, needs modeset %d, event %p\n", __func__, crtc->name, drm_atomic_crtc_needs_modeset(crtc->state), crtc->state->enable, crtc->state->event); /* There is nothing to do if CRTC is not going to be enabled. */ if (!crtc->state->enable) return; /* * Flush CRTC changes with go bit only if new modeset is not * coming, so CRTC is enabled trough out the commit. */ if (drm_atomic_crtc_needs_modeset(crtc->state)) return; /* If the GO bit is stuck we better quit here. */ if (WARN_ON(dispc_vp_go_busy(tidss->dispc, tcrtc->hw_videoport))) return; /* We should have event if CRTC is enabled through out this commit. */ if (WARN_ON(!crtc->state->event)) return; /* Write vp properties to HW if needed. */ dispc_vp_setup(tidss->dispc, tcrtc->hw_videoport, crtc->state, false); /* Update plane positions if needed. */ tidss_crtc_position_planes(tidss, crtc, old_crtc_state, false); WARN_ON(drm_crtc_vblank_get(crtc) != 0); spin_lock_irqsave(&ddev->event_lock, flags); dispc_vp_go(tidss->dispc, tcrtc->hw_videoport); WARN_ON(tcrtc->event); tcrtc->event = crtc->state->event; crtc->state->event = NULL; spin_unlock_irqrestore(&ddev->event_lock, flags); } static void tidss_crtc_atomic_enable(struct drm_crtc *crtc, struct drm_atomic_state *state) { struct drm_crtc_state *old_state = drm_atomic_get_old_crtc_state(state, crtc); struct tidss_crtc *tcrtc = to_tidss_crtc(crtc); struct drm_device *ddev = crtc->dev; struct tidss_device *tidss = to_tidss(ddev); const struct drm_display_mode *mode = &crtc->state->adjusted_mode; unsigned long flags; int r; dev_dbg(ddev->dev, "%s, event %p\n", __func__, crtc->state->event); tidss_runtime_get(tidss); r = dispc_vp_set_clk_rate(tidss->dispc, tcrtc->hw_videoport, mode->clock * 1000); if (r != 0) return; r = dispc_vp_enable_clk(tidss->dispc, tcrtc->hw_videoport); if (r != 0) return; dispc_vp_setup(tidss->dispc, tcrtc->hw_videoport, crtc->state, true); tidss_crtc_position_planes(tidss, crtc, old_state, true); /* Turn vertical blanking interrupt reporting on. */ drm_crtc_vblank_on(crtc); dispc_vp_prepare(tidss->dispc, tcrtc->hw_videoport, crtc->state); dispc_vp_enable(tidss->dispc, tcrtc->hw_videoport, crtc->state); spin_lock_irqsave(&ddev->event_lock, flags); if (crtc->state->event) { drm_crtc_send_vblank_event(crtc, crtc->state->event); crtc->state->event = NULL; } spin_unlock_irqrestore(&ddev->event_lock, flags); } static void tidss_crtc_atomic_disable(struct drm_crtc *crtc, struct drm_atomic_state *state) { struct tidss_crtc *tcrtc = to_tidss_crtc(crtc); struct drm_device *ddev = crtc->dev; struct tidss_device *tidss = to_tidss(ddev); unsigned long flags; dev_dbg(ddev->dev, "%s, event %p\n", __func__, crtc->state->event); reinit_completion(&tcrtc->framedone_completion); dispc_vp_disable(tidss->dispc, tcrtc->hw_videoport); if (!wait_for_completion_timeout(&tcrtc->framedone_completion, msecs_to_jiffies(500))) dev_err(tidss->dev, "Timeout waiting for framedone on crtc %d", tcrtc->hw_videoport); dispc_vp_unprepare(tidss->dispc, tcrtc->hw_videoport); spin_lock_irqsave(&ddev->event_lock, flags); if (crtc->state->event) { drm_crtc_send_vblank_event(crtc, crtc->state->event); crtc->state->event = NULL; } spin_unlock_irqrestore(&ddev->event_lock, flags); drm_crtc_vblank_off(crtc); dispc_vp_disable_clk(tidss->dispc, tcrtc->hw_videoport); tidss_runtime_put(tidss); } static enum drm_mode_status tidss_crtc_mode_valid(struct drm_crtc *crtc, const struct drm_display_mode *mode) { struct tidss_crtc *tcrtc = to_tidss_crtc(crtc); struct drm_device *ddev = crtc->dev; struct tidss_device *tidss = to_tidss(ddev); return dispc_vp_mode_valid(tidss->dispc, tcrtc->hw_videoport, mode); } static const struct drm_crtc_helper_funcs tidss_crtc_helper_funcs = { .atomic_check = tidss_crtc_atomic_check, .atomic_flush = tidss_crtc_atomic_flush, .atomic_enable = tidss_crtc_atomic_enable, .atomic_disable = tidss_crtc_atomic_disable, .mode_valid = tidss_crtc_mode_valid, }; /* drm_crtc_funcs */ static int tidss_crtc_enable_vblank(struct drm_crtc *crtc) { struct drm_device *ddev = crtc->dev; struct tidss_device *tidss = to_tidss(ddev); dev_dbg(ddev->dev, "%s\n", __func__); tidss_runtime_get(tidss); tidss_irq_enable_vblank(crtc); return 0; } static void tidss_crtc_disable_vblank(struct drm_crtc *crtc) { struct drm_device *ddev = crtc->dev; struct tidss_device *tidss = to_tidss(ddev); dev_dbg(ddev->dev, "%s\n", __func__); tidss_irq_disable_vblank(crtc); tidss_runtime_put(tidss); } static void tidss_crtc_reset(struct drm_crtc *crtc) { struct tidss_crtc_state *tcrtc; if (crtc->state) __drm_atomic_helper_crtc_destroy_state(crtc->state); kfree(crtc->state); tcrtc = kzalloc(sizeof(*tcrtc), GFP_KERNEL); if (!tcrtc) { crtc->state = NULL; return; } __drm_atomic_helper_crtc_reset(crtc, &tcrtc->base); } static struct drm_crtc_state *tidss_crtc_duplicate_state(struct drm_crtc *crtc) { struct tidss_crtc_state *state, *current_state; if (WARN_ON(!crtc->state)) return NULL; current_state = to_tidss_crtc_state(crtc->state); state = kmalloc(sizeof(*state), GFP_KERNEL); if (!state) return NULL; __drm_atomic_helper_crtc_duplicate_state(crtc, &state->base); state->plane_pos_changed = false; state->bus_format = current_state->bus_format; state->bus_flags = current_state->bus_flags; return &state->base; } static void tidss_crtc_destroy(struct drm_crtc *crtc) { struct tidss_crtc *tcrtc = to_tidss_crtc(crtc); drm_crtc_cleanup(crtc); kfree(tcrtc); } static const struct drm_crtc_funcs tidss_crtc_funcs = { .reset = tidss_crtc_reset, .destroy = tidss_crtc_destroy, .set_config = drm_atomic_helper_set_config, .page_flip = drm_atomic_helper_page_flip, .atomic_duplicate_state = tidss_crtc_duplicate_state, .atomic_destroy_state = drm_atomic_helper_crtc_destroy_state, .enable_vblank = tidss_crtc_enable_vblank, .disable_vblank = tidss_crtc_disable_vblank, }; struct tidss_crtc *tidss_crtc_create(struct tidss_device *tidss, u32 hw_videoport, struct drm_plane *primary) { struct tidss_crtc *tcrtc; struct drm_crtc *crtc; unsigned int gamma_lut_size = 0; bool has_ctm = tidss->feat->vp_feat.color.has_ctm; int ret; tcrtc = kzalloc(sizeof(*tcrtc), GFP_KERNEL); if (!tcrtc) return ERR_PTR(-ENOMEM); tcrtc->hw_videoport = hw_videoport; init_completion(&tcrtc->framedone_completion); crtc = &tcrtc->crtc; ret = drm_crtc_init_with_planes(&tidss->ddev, crtc, primary, NULL, &tidss_crtc_funcs, NULL); if (ret < 0) { kfree(tcrtc); return ERR_PTR(ret); } drm_crtc_helper_add(crtc, &tidss_crtc_helper_funcs); /* * The dispc gamma functions adapt to what ever size we ask * from it no matter what HW supports. X-server assumes 256 * element gamma tables so lets use that. */ if (tidss->feat->vp_feat.color.gamma_size) gamma_lut_size = 256; drm_crtc_enable_color_mgmt(crtc, 0, has_ctm, gamma_lut_size); if (gamma_lut_size) drm_mode_crtc_set_gamma_size(crtc, gamma_lut_size); return tcrtc; }
linux-master
drivers/gpu/drm/tidss/tidss_crtc.c
// SPDX-License-Identifier: GPL-2.0 /* * Copyright (C) 2018 Texas Instruments Incorporated - https://www.ti.com/ * Author: Tomi Valkeinen <[email protected]> */ #include <drm/drm_atomic.h> #include <drm/drm_atomic_helper.h> #include <drm/drm_blend.h> #include <drm/drm_crtc.h> #include <drm/drm_fourcc.h> #include <drm/drm_framebuffer.h> #include <drm/drm_gem_atomic_helper.h> #include "tidss_crtc.h" #include "tidss_dispc.h" #include "tidss_drv.h" #include "tidss_plane.h" /* drm_plane_helper_funcs */ static int tidss_plane_atomic_check(struct drm_plane *plane, struct drm_atomic_state *state) { struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state, plane); struct drm_device *ddev = plane->dev; struct tidss_device *tidss = to_tidss(ddev); struct tidss_plane *tplane = to_tidss_plane(plane); const struct drm_format_info *finfo; struct drm_crtc_state *crtc_state; u32 hw_plane = tplane->hw_plane_id; u32 hw_videoport; int ret; dev_dbg(ddev->dev, "%s\n", __func__); if (!new_plane_state->crtc) { /* * The visible field is not reset by the DRM core but only * updated by drm_atomic_helper_check_plane_state(), set it * manually. */ new_plane_state->visible = false; return 0; } crtc_state = drm_atomic_get_crtc_state(state, new_plane_state->crtc); if (IS_ERR(crtc_state)) return PTR_ERR(crtc_state); ret = drm_atomic_helper_check_plane_state(new_plane_state, crtc_state, 0, INT_MAX, true, true); if (ret < 0) return ret; /* * The HW is only able to start drawing at subpixel boundary * (the two first checks bellow). At the end of a row the HW * can only jump integer number of subpixels forward to the * beginning of the next row. So we can only show picture with * integer subpixel width (the third check). However, after * reaching the end of the drawn picture the drawing starts * again at the absolute memory address where top left corner * position of the drawn picture is (so there is no need to * check for odd height). */ finfo = drm_format_info(new_plane_state->fb->format->format); if ((new_plane_state->src_x >> 16) % finfo->hsub != 0) { dev_dbg(ddev->dev, "%s: x-position %u not divisible subpixel size %u\n", __func__, (new_plane_state->src_x >> 16), finfo->hsub); return -EINVAL; } if ((new_plane_state->src_y >> 16) % finfo->vsub != 0) { dev_dbg(ddev->dev, "%s: y-position %u not divisible subpixel size %u\n", __func__, (new_plane_state->src_y >> 16), finfo->vsub); return -EINVAL; } if ((new_plane_state->src_w >> 16) % finfo->hsub != 0) { dev_dbg(ddev->dev, "%s: src width %u not divisible by subpixel size %u\n", __func__, (new_plane_state->src_w >> 16), finfo->hsub); return -EINVAL; } if (!new_plane_state->visible) return 0; hw_videoport = to_tidss_crtc(new_plane_state->crtc)->hw_videoport; ret = dispc_plane_check(tidss->dispc, hw_plane, new_plane_state, hw_videoport); if (ret) return ret; return 0; } static void tidss_plane_atomic_update(struct drm_plane *plane, struct drm_atomic_state *state) { struct drm_device *ddev = plane->dev; struct tidss_device *tidss = to_tidss(ddev); struct tidss_plane *tplane = to_tidss_plane(plane); struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state, plane); u32 hw_videoport; dev_dbg(ddev->dev, "%s\n", __func__); if (!new_state->visible) { dispc_plane_enable(tidss->dispc, tplane->hw_plane_id, false); return; } hw_videoport = to_tidss_crtc(new_state->crtc)->hw_videoport; dispc_plane_setup(tidss->dispc, tplane->hw_plane_id, new_state, hw_videoport); } static void tidss_plane_atomic_enable(struct drm_plane *plane, struct drm_atomic_state *state) { struct drm_device *ddev = plane->dev; struct tidss_device *tidss = to_tidss(ddev); struct tidss_plane *tplane = to_tidss_plane(plane); dev_dbg(ddev->dev, "%s\n", __func__); dispc_plane_enable(tidss->dispc, tplane->hw_plane_id, true); } static void tidss_plane_atomic_disable(struct drm_plane *plane, struct drm_atomic_state *state) { struct drm_device *ddev = plane->dev; struct tidss_device *tidss = to_tidss(ddev); struct tidss_plane *tplane = to_tidss_plane(plane); dev_dbg(ddev->dev, "%s\n", __func__); dispc_plane_enable(tidss->dispc, tplane->hw_plane_id, false); } static void drm_plane_destroy(struct drm_plane *plane) { struct tidss_plane *tplane = to_tidss_plane(plane); drm_plane_cleanup(plane); kfree(tplane); } static const struct drm_plane_helper_funcs tidss_plane_helper_funcs = { .atomic_check = tidss_plane_atomic_check, .atomic_update = tidss_plane_atomic_update, .atomic_enable = tidss_plane_atomic_enable, .atomic_disable = tidss_plane_atomic_disable, }; static const struct drm_plane_funcs tidss_plane_funcs = { .update_plane = drm_atomic_helper_update_plane, .disable_plane = drm_atomic_helper_disable_plane, .reset = drm_atomic_helper_plane_reset, .destroy = drm_plane_destroy, .atomic_duplicate_state = drm_atomic_helper_plane_duplicate_state, .atomic_destroy_state = drm_atomic_helper_plane_destroy_state, }; struct tidss_plane *tidss_plane_create(struct tidss_device *tidss, u32 hw_plane_id, u32 plane_type, u32 crtc_mask, const u32 *formats, u32 num_formats) { struct tidss_plane *tplane; enum drm_plane_type type; u32 possible_crtcs; u32 num_planes = tidss->feat->num_planes; u32 color_encodings = (BIT(DRM_COLOR_YCBCR_BT601) | BIT(DRM_COLOR_YCBCR_BT709)); u32 color_ranges = (BIT(DRM_COLOR_YCBCR_FULL_RANGE) | BIT(DRM_COLOR_YCBCR_LIMITED_RANGE)); u32 default_encoding = DRM_COLOR_YCBCR_BT601; u32 default_range = DRM_COLOR_YCBCR_FULL_RANGE; u32 blend_modes = (BIT(DRM_MODE_BLEND_PREMULTI) | BIT(DRM_MODE_BLEND_COVERAGE)); int ret; tplane = kzalloc(sizeof(*tplane), GFP_KERNEL); if (!tplane) return ERR_PTR(-ENOMEM); tplane->hw_plane_id = hw_plane_id; possible_crtcs = crtc_mask; type = plane_type; ret = drm_universal_plane_init(&tidss->ddev, &tplane->plane, possible_crtcs, &tidss_plane_funcs, formats, num_formats, NULL, type, NULL); if (ret < 0) goto err; drm_plane_helper_add(&tplane->plane, &tidss_plane_helper_funcs); drm_plane_create_zpos_property(&tplane->plane, hw_plane_id, 0, num_planes - 1); ret = drm_plane_create_color_properties(&tplane->plane, color_encodings, color_ranges, default_encoding, default_range); if (ret) goto err; ret = drm_plane_create_alpha_property(&tplane->plane); if (ret) goto err; ret = drm_plane_create_blend_mode_property(&tplane->plane, blend_modes); if (ret) goto err; return tplane; err: kfree(tplane); return ERR_PTR(ret); }
linux-master
drivers/gpu/drm/tidss/tidss_plane.c
// SPDX-License-Identifier: GPL-2.0 /* * Copyright (C) 2018 Texas Instruments Incorporated - https://www.ti.com/ * Author: Tomi Valkeinen <[email protected]> */ #include <linux/export.h> #include <drm/drm_atomic_helper.h> #include <drm/drm_bridge.h> #include <drm/drm_bridge_connector.h> #include <drm/drm_crtc.h> #include <drm/drm_modeset_helper_vtables.h> #include <drm/drm_panel.h> #include <drm/drm_of.h> #include <drm/drm_simple_kms_helper.h> #include "tidss_crtc.h" #include "tidss_drv.h" #include "tidss_encoder.h" struct tidss_encoder { struct drm_bridge bridge; struct drm_encoder encoder; struct drm_connector *connector; struct drm_bridge *next_bridge; struct tidss_device *tidss; }; static inline struct tidss_encoder *bridge_to_tidss_encoder(struct drm_bridge *b) { return container_of(b, struct tidss_encoder, bridge); } static int tidss_bridge_attach(struct drm_bridge *bridge, enum drm_bridge_attach_flags flags) { struct tidss_encoder *t_enc = bridge_to_tidss_encoder(bridge); return drm_bridge_attach(bridge->encoder, t_enc->next_bridge, bridge, flags); } static int tidss_bridge_atomic_check(struct drm_bridge *bridge, struct drm_bridge_state *bridge_state, struct drm_crtc_state *crtc_state, struct drm_connector_state *conn_state) { struct tidss_encoder *t_enc = bridge_to_tidss_encoder(bridge); struct tidss_device *tidss = t_enc->tidss; struct tidss_crtc_state *tcrtc_state = to_tidss_crtc_state(crtc_state); struct drm_display_info *di = &conn_state->connector->display_info; struct drm_bridge_state *next_bridge_state = NULL; if (t_enc->next_bridge) next_bridge_state = drm_atomic_get_new_bridge_state(crtc_state->state, t_enc->next_bridge); if (next_bridge_state) { tcrtc_state->bus_flags = next_bridge_state->input_bus_cfg.flags; tcrtc_state->bus_format = next_bridge_state->input_bus_cfg.format; } else if (di->num_bus_formats) { tcrtc_state->bus_format = di->bus_formats[0]; tcrtc_state->bus_flags = di->bus_flags; } else { dev_err(tidss->dev, "%s: No bus_formats in connected display\n", __func__); return -EINVAL; } return 0; } static const struct drm_bridge_funcs tidss_bridge_funcs = { .attach = tidss_bridge_attach, .atomic_check = tidss_bridge_atomic_check, .atomic_reset = drm_atomic_helper_bridge_reset, .atomic_duplicate_state = drm_atomic_helper_bridge_duplicate_state, .atomic_destroy_state = drm_atomic_helper_bridge_destroy_state, }; int tidss_encoder_create(struct tidss_device *tidss, struct drm_bridge *next_bridge, u32 encoder_type, u32 possible_crtcs) { struct tidss_encoder *t_enc; struct drm_encoder *enc; struct drm_connector *connector; int ret; t_enc = drmm_simple_encoder_alloc(&tidss->ddev, struct tidss_encoder, encoder, encoder_type); if (IS_ERR(t_enc)) return PTR_ERR(t_enc); t_enc->tidss = tidss; t_enc->next_bridge = next_bridge; t_enc->bridge.funcs = &tidss_bridge_funcs; enc = &t_enc->encoder; enc->possible_crtcs = possible_crtcs; /* Attaching first bridge to the encoder */ ret = drm_bridge_attach(enc, &t_enc->bridge, NULL, DRM_BRIDGE_ATTACH_NO_CONNECTOR); if (ret) { dev_err(tidss->dev, "bridge attach failed: %d\n", ret); return ret; } /* Initializing the connector at the end of bridge-chain */ connector = drm_bridge_connector_init(&tidss->ddev, enc); if (IS_ERR(connector)) { dev_err(tidss->dev, "bridge_connector create failed\n"); return PTR_ERR(connector); } ret = drm_connector_attach_encoder(connector, enc); if (ret) { dev_err(tidss->dev, "attaching encoder to connector failed\n"); return ret; } t_enc->connector = connector; dev_dbg(tidss->dev, "Encoder create done\n"); return ret; }
linux-master
drivers/gpu/drm/tidss/tidss_encoder.c
// SPDX-License-Identifier: GPL-2.0-only /* * (C) COPYRIGHT 2012-2013 ARM Limited. All rights reserved. * * Parts of this file were based on sources as follows: * * Copyright (c) 2006-2008 Intel Corporation * Copyright (c) 2007 Dave Airlie <[email protected]> * Copyright (C) 2011 Texas Instruments */ /** * DOC: ARM PrimeCell PL110 and PL111 CLCD Driver * * The PL110/PL111 is a simple LCD controller that can support TFT * and STN displays. This driver exposes a standard KMS interface * for them. * * The driver currently doesn't expose the cursor. The DRM API for * cursors requires support for 64x64 ARGB8888 cursor images, while * the hardware can only support 64x64 monochrome with masking * cursors. While one could imagine trying to hack something together * to look at the ARGB8888 and program reasonable in monochrome, we * just don't expose the cursor at all instead, and leave cursor * support to the application software cursor layer. * * TODO: * * - Fix race between setting plane base address and getting IRQ for * vsync firing the pageflip completion. * * - Read back hardware state at boot to skip reprogramming the * hardware when doing a no-op modeset. * * - Use the CLKSEL bit to support switching between the two external * clock parents. */ #include <linux/amba/bus.h> #include <linux/dma-buf.h> #include <linux/module.h> #include <linux/of.h> #include <linux/of_graph.h> #include <linux/of_reserved_mem.h> #include <linux/shmem_fs.h> #include <linux/slab.h> #include <drm/drm_atomic_helper.h> #include <drm/drm_bridge.h> #include <drm/drm_drv.h> #include <drm/drm_fbdev_dma.h> #include <drm/drm_fourcc.h> #include <drm/drm_gem_dma_helper.h> #include <drm/drm_gem_framebuffer_helper.h> #include <drm/drm_of.h> #include <drm/drm_panel.h> #include <drm/drm_probe_helper.h> #include <drm/drm_vblank.h> #include "pl111_drm.h" #include "pl111_versatile.h" #include "pl111_nomadik.h" #define DRIVER_DESC "DRM module for PL111" static const struct drm_mode_config_funcs mode_config_funcs = { .fb_create = drm_gem_fb_create, .atomic_check = drm_atomic_helper_check, .atomic_commit = drm_atomic_helper_commit, }; static int pl111_modeset_init(struct drm_device *dev) { struct drm_mode_config *mode_config; struct pl111_drm_dev_private *priv = dev->dev_private; struct device_node *np = dev->dev->of_node; struct device_node *remote; struct drm_panel *panel = NULL; struct drm_bridge *bridge = NULL; bool defer = false; int ret; int i; ret = drmm_mode_config_init(dev); if (ret) return ret; mode_config = &dev->mode_config; mode_config->funcs = &mode_config_funcs; mode_config->min_width = 1; mode_config->max_width = 1024; mode_config->min_height = 1; mode_config->max_height = 768; i = 0; for_each_endpoint_of_node(np, remote) { struct drm_panel *tmp_panel; struct drm_bridge *tmp_bridge; dev_dbg(dev->dev, "checking endpoint %d\n", i); ret = drm_of_find_panel_or_bridge(dev->dev->of_node, 0, i, &tmp_panel, &tmp_bridge); if (ret) { if (ret == -EPROBE_DEFER) { /* * Something deferred, but that is often just * another way of saying -ENODEV, but let's * cast a vote for later deferral. */ defer = true; } else if (ret != -ENODEV) { /* Continue, maybe something else is working */ dev_err(dev->dev, "endpoint %d returns %d\n", i, ret); } } if (tmp_panel) { dev_info(dev->dev, "found panel on endpoint %d\n", i); panel = tmp_panel; } if (tmp_bridge) { dev_info(dev->dev, "found bridge on endpoint %d\n", i); bridge = tmp_bridge; } i++; } /* * If we can't find neither panel nor bridge on any of the * endpoints, and any of them retured -EPROBE_DEFER, then * let's defer this driver too. */ if ((!panel && !bridge) && defer) return -EPROBE_DEFER; if (panel) { bridge = drm_panel_bridge_add_typed(panel, DRM_MODE_CONNECTOR_Unknown); if (IS_ERR(bridge)) { ret = PTR_ERR(bridge); goto finish; } } else if (bridge) { dev_info(dev->dev, "Using non-panel bridge\n"); } else { dev_err(dev->dev, "No bridge, exiting\n"); return -ENODEV; } priv->bridge = bridge; if (panel) { priv->panel = panel; priv->connector = drm_panel_bridge_connector(bridge); } ret = pl111_display_init(dev); if (ret != 0) { dev_err(dev->dev, "Failed to init display\n"); goto out_bridge; } ret = drm_simple_display_pipe_attach_bridge(&priv->pipe, bridge); if (ret) return ret; if (!priv->variant->broken_vblank) { ret = drm_vblank_init(dev, 1); if (ret != 0) { dev_err(dev->dev, "Failed to init vblank\n"); goto out_bridge; } } drm_mode_config_reset(dev); drm_kms_helper_poll_init(dev); goto finish; out_bridge: if (panel) drm_panel_bridge_remove(bridge); finish: return ret; } static struct drm_gem_object * pl111_gem_import_sg_table(struct drm_device *dev, struct dma_buf_attachment *attach, struct sg_table *sgt) { struct pl111_drm_dev_private *priv = dev->dev_private; /* * When using device-specific reserved memory we can't import * DMA buffers: those are passed by reference in any global * memory and we can only handle a specific range of memory. */ if (priv->use_device_memory) return ERR_PTR(-EINVAL); return drm_gem_dma_prime_import_sg_table(dev, attach, sgt); } DEFINE_DRM_GEM_DMA_FOPS(drm_fops); static const struct drm_driver pl111_drm_driver = { .driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_ATOMIC, .ioctls = NULL, .fops = &drm_fops, .name = "pl111", .desc = DRIVER_DESC, .date = "20170317", .major = 1, .minor = 0, .patchlevel = 0, .dumb_create = drm_gem_dma_dumb_create, .gem_prime_import_sg_table = pl111_gem_import_sg_table, #if defined(CONFIG_DEBUG_FS) .debugfs_init = pl111_debugfs_init, #endif }; static int pl111_amba_probe(struct amba_device *amba_dev, const struct amba_id *id) { struct device *dev = &amba_dev->dev; struct pl111_drm_dev_private *priv; const struct pl111_variant_data *variant = id->data; struct drm_device *drm; int ret; priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL); if (!priv) return -ENOMEM; drm = drm_dev_alloc(&pl111_drm_driver, dev); if (IS_ERR(drm)) return PTR_ERR(drm); amba_set_drvdata(amba_dev, drm); priv->drm = drm; drm->dev_private = priv; priv->variant = variant; ret = of_reserved_mem_device_init(dev); if (!ret) { dev_info(dev, "using device-specific reserved memory\n"); priv->use_device_memory = true; } if (of_property_read_u32(dev->of_node, "max-memory-bandwidth", &priv->memory_bw)) { dev_info(dev, "no max memory bandwidth specified, assume unlimited\n"); priv->memory_bw = 0; } /* The two main variants swap this register */ if (variant->is_pl110 || variant->is_lcdc) { priv->ienb = CLCD_PL110_IENB; priv->ctrl = CLCD_PL110_CNTL; } else { priv->ienb = CLCD_PL111_IENB; priv->ctrl = CLCD_PL111_CNTL; } priv->regs = devm_ioremap_resource(dev, &amba_dev->res); if (IS_ERR(priv->regs)) { dev_err(dev, "%s failed mmio\n", __func__); ret = PTR_ERR(priv->regs); goto dev_put; } /* This may override some variant settings */ ret = pl111_versatile_init(dev, priv); if (ret) goto dev_put; pl111_nomadik_init(dev); /* turn off interrupts before requesting the irq */ writel(0, priv->regs + priv->ienb); ret = devm_request_irq(dev, amba_dev->irq[0], pl111_irq, 0, variant->name, priv); if (ret != 0) { dev_err(dev, "%s failed irq %d\n", __func__, ret); return ret; } ret = pl111_modeset_init(drm); if (ret != 0) goto dev_put; ret = drm_dev_register(drm, 0); if (ret < 0) goto dev_put; drm_fbdev_dma_setup(drm, priv->variant->fb_depth); return 0; dev_put: drm_dev_put(drm); of_reserved_mem_device_release(dev); return ret; } static void pl111_amba_remove(struct amba_device *amba_dev) { struct device *dev = &amba_dev->dev; struct drm_device *drm = amba_get_drvdata(amba_dev); struct pl111_drm_dev_private *priv = drm->dev_private; drm_dev_unregister(drm); if (priv->panel) drm_panel_bridge_remove(priv->bridge); drm_dev_put(drm); of_reserved_mem_device_release(dev); } /* * This early variant lacks the 565 and 444 pixel formats. */ static const u32 pl110_pixel_formats[] = { DRM_FORMAT_ABGR8888, DRM_FORMAT_XBGR8888, DRM_FORMAT_ARGB8888, DRM_FORMAT_XRGB8888, DRM_FORMAT_ABGR1555, DRM_FORMAT_XBGR1555, DRM_FORMAT_ARGB1555, DRM_FORMAT_XRGB1555, }; static const struct pl111_variant_data pl110_variant = { .name = "PL110", .is_pl110 = true, .formats = pl110_pixel_formats, .nformats = ARRAY_SIZE(pl110_pixel_formats), .fb_depth = 16, }; /* RealView, Versatile Express etc use this modern variant */ static const u32 pl111_pixel_formats[] = { DRM_FORMAT_ABGR8888, DRM_FORMAT_XBGR8888, DRM_FORMAT_ARGB8888, DRM_FORMAT_XRGB8888, DRM_FORMAT_BGR565, DRM_FORMAT_RGB565, DRM_FORMAT_ABGR1555, DRM_FORMAT_XBGR1555, DRM_FORMAT_ARGB1555, DRM_FORMAT_XRGB1555, DRM_FORMAT_ABGR4444, DRM_FORMAT_XBGR4444, DRM_FORMAT_ARGB4444, DRM_FORMAT_XRGB4444, }; static const struct pl111_variant_data pl111_variant = { .name = "PL111", .formats = pl111_pixel_formats, .nformats = ARRAY_SIZE(pl111_pixel_formats), .fb_depth = 32, }; static const u32 pl110_nomadik_pixel_formats[] = { DRM_FORMAT_RGB888, DRM_FORMAT_BGR888, DRM_FORMAT_ABGR8888, DRM_FORMAT_XBGR8888, DRM_FORMAT_ARGB8888, DRM_FORMAT_XRGB8888, DRM_FORMAT_BGR565, DRM_FORMAT_RGB565, DRM_FORMAT_ABGR1555, DRM_FORMAT_XBGR1555, DRM_FORMAT_ARGB1555, DRM_FORMAT_XRGB1555, DRM_FORMAT_ABGR4444, DRM_FORMAT_XBGR4444, DRM_FORMAT_ARGB4444, DRM_FORMAT_XRGB4444, }; static const struct pl111_variant_data pl110_nomadik_variant = { .name = "LCDC (PL110 Nomadik)", .formats = pl110_nomadik_pixel_formats, .nformats = ARRAY_SIZE(pl110_nomadik_pixel_formats), .is_lcdc = true, .st_bitmux_control = true, .broken_vblank = true, .fb_depth = 16, }; static const struct amba_id pl111_id_table[] = { { .id = 0x00041110, .mask = 0x000fffff, .data = (void *)&pl110_variant, }, { .id = 0x00180110, .mask = 0x00fffffe, .data = (void *)&pl110_nomadik_variant, }, { .id = 0x00041111, .mask = 0x000fffff, .data = (void *)&pl111_variant, }, {0, 0}, }; MODULE_DEVICE_TABLE(amba, pl111_id_table); static struct amba_driver pl111_amba_driver __maybe_unused = { .drv = { .name = "drm-clcd-pl111", }, .probe = pl111_amba_probe, .remove = pl111_amba_remove, .id_table = pl111_id_table, }; #ifdef CONFIG_ARM_AMBA module_amba_driver(pl111_amba_driver); #endif MODULE_DESCRIPTION(DRIVER_DESC); MODULE_AUTHOR("ARM Ltd."); MODULE_LICENSE("GPL");
linux-master
drivers/gpu/drm/pl111/pl111_drv.c