python_code
stringlengths
0
1.8M
repo_name
stringclasses
7 values
file_path
stringlengths
5
99
// SPDX-License-Identifier: GPL-2.0-only /* * Copyright © 2015 Broadcom */ /** * DOC: VC4 GEM BO management support * * The VC4 GPU architecture (both scanout and rendering) has direct * access to system memory with no MMU in between. To support it, we * use the GEM DMA helper functions to allocate contiguous ranges of * physical memory for our BOs. * * Since the DMA allocator is very slow, we keep a cache of recently * freed BOs around so that the kernel's allocation of objects for 3D * rendering can return quickly. */ #include <linux/dma-buf.h> #include <drm/drm_fourcc.h> #include "vc4_drv.h" #include "uapi/drm/vc4_drm.h" static const struct drm_gem_object_funcs vc4_gem_object_funcs; static const char * const bo_type_names[] = { "kernel", "V3D", "V3D shader", "dumb", "binner", "RCL", "BCL", "kernel BO cache", }; static bool is_user_label(int label) { return label >= VC4_BO_TYPE_COUNT; } static void vc4_bo_stats_print(struct drm_printer *p, struct vc4_dev *vc4) { int i; for (i = 0; i < vc4->num_labels; i++) { if (!vc4->bo_labels[i].num_allocated) continue; drm_printf(p, "%30s: %6dkb BOs (%d)\n", vc4->bo_labels[i].name, vc4->bo_labels[i].size_allocated / 1024, vc4->bo_labels[i].num_allocated); } mutex_lock(&vc4->purgeable.lock); if (vc4->purgeable.num) drm_printf(p, "%30s: %6zdkb BOs (%d)\n", "userspace BO cache", vc4->purgeable.size / 1024, vc4->purgeable.num); if (vc4->purgeable.purged_num) drm_printf(p, "%30s: %6zdkb BOs (%d)\n", "total purged BO", vc4->purgeable.purged_size / 1024, vc4->purgeable.purged_num); mutex_unlock(&vc4->purgeable.lock); } static int vc4_bo_stats_debugfs(struct seq_file *m, void *unused) { struct drm_debugfs_entry *entry = m->private; struct drm_device *dev = entry->dev; struct vc4_dev *vc4 = to_vc4_dev(dev); struct drm_printer p = drm_seq_file_printer(m); vc4_bo_stats_print(&p, vc4); return 0; } /* Takes ownership of *name and returns the appropriate slot for it in * the bo_labels[] array, extending it as necessary. * * This is inefficient and could use a hash table instead of walking * an array and strcmp()ing. However, the assumption is that user * labeling will be infrequent (scanout buffers and other long-lived * objects, or debug driver builds), so we can live with it for now. */ static int vc4_get_user_label(struct vc4_dev *vc4, const char *name) { int i; int free_slot = -1; for (i = 0; i < vc4->num_labels; i++) { if (!vc4->bo_labels[i].name) { free_slot = i; } else if (strcmp(vc4->bo_labels[i].name, name) == 0) { kfree(name); return i; } } if (free_slot != -1) { WARN_ON(vc4->bo_labels[free_slot].num_allocated != 0); vc4->bo_labels[free_slot].name = name; return free_slot; } else { u32 new_label_count = vc4->num_labels + 1; struct vc4_label *new_labels = krealloc(vc4->bo_labels, new_label_count * sizeof(*new_labels), GFP_KERNEL); if (!new_labels) { kfree(name); return -1; } free_slot = vc4->num_labels; vc4->bo_labels = new_labels; vc4->num_labels = new_label_count; vc4->bo_labels[free_slot].name = name; vc4->bo_labels[free_slot].num_allocated = 0; vc4->bo_labels[free_slot].size_allocated = 0; return free_slot; } } static void vc4_bo_set_label(struct drm_gem_object *gem_obj, int label) { struct vc4_bo *bo = to_vc4_bo(gem_obj); struct vc4_dev *vc4 = to_vc4_dev(gem_obj->dev); lockdep_assert_held(&vc4->bo_lock); if (label != -1) { vc4->bo_labels[label].num_allocated++; vc4->bo_labels[label].size_allocated += gem_obj->size; } vc4->bo_labels[bo->label].num_allocated--; vc4->bo_labels[bo->label].size_allocated -= gem_obj->size; if (vc4->bo_labels[bo->label].num_allocated == 0 && is_user_label(bo->label)) { /* Free user BO label slots on last unreference. * Slots are just where we track the stats for a given * name, and once a name is unused we can reuse that * slot. */ kfree(vc4->bo_labels[bo->label].name); vc4->bo_labels[bo->label].name = NULL; } bo->label = label; } static uint32_t bo_page_index(size_t size) { return (size / PAGE_SIZE) - 1; } static void vc4_bo_destroy(struct vc4_bo *bo) { struct drm_gem_object *obj = &bo->base.base; struct vc4_dev *vc4 = to_vc4_dev(obj->dev); lockdep_assert_held(&vc4->bo_lock); vc4_bo_set_label(obj, -1); if (bo->validated_shader) { kfree(bo->validated_shader->uniform_addr_offsets); kfree(bo->validated_shader->texture_samples); kfree(bo->validated_shader); bo->validated_shader = NULL; } mutex_destroy(&bo->madv_lock); drm_gem_dma_free(&bo->base); } static void vc4_bo_remove_from_cache(struct vc4_bo *bo) { struct vc4_dev *vc4 = to_vc4_dev(bo->base.base.dev); lockdep_assert_held(&vc4->bo_lock); list_del(&bo->unref_head); list_del(&bo->size_head); } static struct list_head *vc4_get_cache_list_for_size(struct drm_device *dev, size_t size) { struct vc4_dev *vc4 = to_vc4_dev(dev); uint32_t page_index = bo_page_index(size); if (vc4->bo_cache.size_list_size <= page_index) { uint32_t new_size = max(vc4->bo_cache.size_list_size * 2, page_index + 1); struct list_head *new_list; uint32_t i; new_list = kmalloc_array(new_size, sizeof(struct list_head), GFP_KERNEL); if (!new_list) return NULL; /* Rebase the old cached BO lists to their new list * head locations. */ for (i = 0; i < vc4->bo_cache.size_list_size; i++) { struct list_head *old_list = &vc4->bo_cache.size_list[i]; if (list_empty(old_list)) INIT_LIST_HEAD(&new_list[i]); else list_replace(old_list, &new_list[i]); } /* And initialize the brand new BO list heads. */ for (i = vc4->bo_cache.size_list_size; i < new_size; i++) INIT_LIST_HEAD(&new_list[i]); kfree(vc4->bo_cache.size_list); vc4->bo_cache.size_list = new_list; vc4->bo_cache.size_list_size = new_size; } return &vc4->bo_cache.size_list[page_index]; } static void vc4_bo_cache_purge(struct drm_device *dev) { struct vc4_dev *vc4 = to_vc4_dev(dev); mutex_lock(&vc4->bo_lock); while (!list_empty(&vc4->bo_cache.time_list)) { struct vc4_bo *bo = list_last_entry(&vc4->bo_cache.time_list, struct vc4_bo, unref_head); vc4_bo_remove_from_cache(bo); vc4_bo_destroy(bo); } mutex_unlock(&vc4->bo_lock); } void vc4_bo_add_to_purgeable_pool(struct vc4_bo *bo) { struct vc4_dev *vc4 = to_vc4_dev(bo->base.base.dev); if (WARN_ON_ONCE(vc4->is_vc5)) return; mutex_lock(&vc4->purgeable.lock); list_add_tail(&bo->size_head, &vc4->purgeable.list); vc4->purgeable.num++; vc4->purgeable.size += bo->base.base.size; mutex_unlock(&vc4->purgeable.lock); } static void vc4_bo_remove_from_purgeable_pool_locked(struct vc4_bo *bo) { struct vc4_dev *vc4 = to_vc4_dev(bo->base.base.dev); if (WARN_ON_ONCE(vc4->is_vc5)) return; /* list_del_init() is used here because the caller might release * the purgeable lock in order to acquire the madv one and update the * madv status. * During this short period of time a user might decide to mark * the BO as unpurgeable, and if bo->madv is set to * VC4_MADV_DONTNEED it will try to remove the BO from the * purgeable list which will fail if the ->next/prev fields * are set to LIST_POISON1/LIST_POISON2 (which is what * list_del() does). * Re-initializing the list element guarantees that list_del() * will work correctly even if it's a NOP. */ list_del_init(&bo->size_head); vc4->purgeable.num--; vc4->purgeable.size -= bo->base.base.size; } void vc4_bo_remove_from_purgeable_pool(struct vc4_bo *bo) { struct vc4_dev *vc4 = to_vc4_dev(bo->base.base.dev); mutex_lock(&vc4->purgeable.lock); vc4_bo_remove_from_purgeable_pool_locked(bo); mutex_unlock(&vc4->purgeable.lock); } static void vc4_bo_purge(struct drm_gem_object *obj) { struct vc4_bo *bo = to_vc4_bo(obj); struct drm_device *dev = obj->dev; WARN_ON(!mutex_is_locked(&bo->madv_lock)); WARN_ON(bo->madv != VC4_MADV_DONTNEED); drm_vma_node_unmap(&obj->vma_node, dev->anon_inode->i_mapping); dma_free_wc(dev->dev, obj->size, bo->base.vaddr, bo->base.dma_addr); bo->base.vaddr = NULL; bo->madv = __VC4_MADV_PURGED; } static void vc4_bo_userspace_cache_purge(struct drm_device *dev) { struct vc4_dev *vc4 = to_vc4_dev(dev); mutex_lock(&vc4->purgeable.lock); while (!list_empty(&vc4->purgeable.list)) { struct vc4_bo *bo = list_first_entry(&vc4->purgeable.list, struct vc4_bo, size_head); struct drm_gem_object *obj = &bo->base.base; size_t purged_size = 0; vc4_bo_remove_from_purgeable_pool_locked(bo); /* Release the purgeable lock while we're purging the BO so * that other people can continue inserting things in the * purgeable pool without having to wait for all BOs to be * purged. */ mutex_unlock(&vc4->purgeable.lock); mutex_lock(&bo->madv_lock); /* Since we released the purgeable pool lock before acquiring * the BO madv one, the user may have marked the BO as WILLNEED * and re-used it in the meantime. * Before purging the BO we need to make sure * - it is still marked as DONTNEED * - it has not been re-inserted in the purgeable list * - it is not used by HW blocks * If one of these conditions is not met, just skip the entry. */ if (bo->madv == VC4_MADV_DONTNEED && list_empty(&bo->size_head) && !refcount_read(&bo->usecnt)) { purged_size = bo->base.base.size; vc4_bo_purge(obj); } mutex_unlock(&bo->madv_lock); mutex_lock(&vc4->purgeable.lock); if (purged_size) { vc4->purgeable.purged_size += purged_size; vc4->purgeable.purged_num++; } } mutex_unlock(&vc4->purgeable.lock); } static struct vc4_bo *vc4_bo_get_from_cache(struct drm_device *dev, uint32_t size, enum vc4_kernel_bo_type type) { struct vc4_dev *vc4 = to_vc4_dev(dev); uint32_t page_index = bo_page_index(size); struct vc4_bo *bo = NULL; mutex_lock(&vc4->bo_lock); if (page_index >= vc4->bo_cache.size_list_size) goto out; if (list_empty(&vc4->bo_cache.size_list[page_index])) goto out; bo = list_first_entry(&vc4->bo_cache.size_list[page_index], struct vc4_bo, size_head); vc4_bo_remove_from_cache(bo); kref_init(&bo->base.base.refcount); out: if (bo) vc4_bo_set_label(&bo->base.base, type); mutex_unlock(&vc4->bo_lock); return bo; } /** * vc4_create_object - Implementation of driver->gem_create_object. * @dev: DRM device * @size: Size in bytes of the memory the object will reference * * This lets the DMA helpers allocate object structs for us, and keep * our BO stats correct. */ struct drm_gem_object *vc4_create_object(struct drm_device *dev, size_t size) { struct vc4_dev *vc4 = to_vc4_dev(dev); struct vc4_bo *bo; if (WARN_ON_ONCE(vc4->is_vc5)) return ERR_PTR(-ENODEV); bo = kzalloc(sizeof(*bo), GFP_KERNEL); if (!bo) return ERR_PTR(-ENOMEM); bo->madv = VC4_MADV_WILLNEED; refcount_set(&bo->usecnt, 0); mutex_init(&bo->madv_lock); mutex_lock(&vc4->bo_lock); bo->label = VC4_BO_TYPE_KERNEL; vc4->bo_labels[VC4_BO_TYPE_KERNEL].num_allocated++; vc4->bo_labels[VC4_BO_TYPE_KERNEL].size_allocated += size; mutex_unlock(&vc4->bo_lock); bo->base.base.funcs = &vc4_gem_object_funcs; return &bo->base.base; } struct vc4_bo *vc4_bo_create(struct drm_device *dev, size_t unaligned_size, bool allow_unzeroed, enum vc4_kernel_bo_type type) { size_t size = roundup(unaligned_size, PAGE_SIZE); struct vc4_dev *vc4 = to_vc4_dev(dev); struct drm_gem_dma_object *dma_obj; struct vc4_bo *bo; if (WARN_ON_ONCE(vc4->is_vc5)) return ERR_PTR(-ENODEV); if (size == 0) return ERR_PTR(-EINVAL); /* First, try to get a vc4_bo from the kernel BO cache. */ bo = vc4_bo_get_from_cache(dev, size, type); if (bo) { if (!allow_unzeroed) memset(bo->base.vaddr, 0, bo->base.base.size); return bo; } dma_obj = drm_gem_dma_create(dev, size); if (IS_ERR(dma_obj)) { /* * If we've run out of DMA memory, kill the cache of * DMA allocations we've got laying around and try again. */ vc4_bo_cache_purge(dev); dma_obj = drm_gem_dma_create(dev, size); } if (IS_ERR(dma_obj)) { /* * Still not enough DMA memory, purge the userspace BO * cache and retry. * This is sub-optimal since we purge the whole userspace * BO cache which forces user that want to re-use the BO to * restore its initial content. * Ideally, we should purge entries one by one and retry * after each to see if DMA allocation succeeds. Or even * better, try to find an entry with at least the same * size. */ vc4_bo_userspace_cache_purge(dev); dma_obj = drm_gem_dma_create(dev, size); } if (IS_ERR(dma_obj)) { struct drm_printer p = drm_info_printer(vc4->base.dev); DRM_ERROR("Failed to allocate from GEM DMA helper:\n"); vc4_bo_stats_print(&p, vc4); return ERR_PTR(-ENOMEM); } bo = to_vc4_bo(&dma_obj->base); /* By default, BOs do not support the MADV ioctl. This will be enabled * only on BOs that are exposed to userspace (V3D, V3D_SHADER and DUMB * BOs). */ bo->madv = __VC4_MADV_NOTSUPP; mutex_lock(&vc4->bo_lock); vc4_bo_set_label(&dma_obj->base, type); mutex_unlock(&vc4->bo_lock); return bo; } int vc4_bo_dumb_create(struct drm_file *file_priv, struct drm_device *dev, struct drm_mode_create_dumb *args) { struct vc4_dev *vc4 = to_vc4_dev(dev); struct vc4_bo *bo = NULL; int ret; if (WARN_ON_ONCE(vc4->is_vc5)) return -ENODEV; ret = vc4_dumb_fixup_args(args); if (ret) return ret; bo = vc4_bo_create(dev, args->size, false, VC4_BO_TYPE_DUMB); if (IS_ERR(bo)) return PTR_ERR(bo); bo->madv = VC4_MADV_WILLNEED; ret = drm_gem_handle_create(file_priv, &bo->base.base, &args->handle); drm_gem_object_put(&bo->base.base); return ret; } static void vc4_bo_cache_free_old(struct drm_device *dev) { struct vc4_dev *vc4 = to_vc4_dev(dev); unsigned long expire_time = jiffies - msecs_to_jiffies(1000); lockdep_assert_held(&vc4->bo_lock); while (!list_empty(&vc4->bo_cache.time_list)) { struct vc4_bo *bo = list_last_entry(&vc4->bo_cache.time_list, struct vc4_bo, unref_head); if (time_before(expire_time, bo->free_time)) { mod_timer(&vc4->bo_cache.time_timer, round_jiffies_up(jiffies + msecs_to_jiffies(1000))); return; } vc4_bo_remove_from_cache(bo); vc4_bo_destroy(bo); } } /* Called on the last userspace/kernel unreference of the BO. Returns * it to the BO cache if possible, otherwise frees it. */ static void vc4_free_object(struct drm_gem_object *gem_bo) { struct drm_device *dev = gem_bo->dev; struct vc4_dev *vc4 = to_vc4_dev(dev); struct vc4_bo *bo = to_vc4_bo(gem_bo); struct list_head *cache_list; /* Remove the BO from the purgeable list. */ mutex_lock(&bo->madv_lock); if (bo->madv == VC4_MADV_DONTNEED && !refcount_read(&bo->usecnt)) vc4_bo_remove_from_purgeable_pool(bo); mutex_unlock(&bo->madv_lock); mutex_lock(&vc4->bo_lock); /* If the object references someone else's memory, we can't cache it. */ if (gem_bo->import_attach) { vc4_bo_destroy(bo); goto out; } /* Don't cache if it was publicly named. */ if (gem_bo->name) { vc4_bo_destroy(bo); goto out; } /* If this object was partially constructed but DMA allocation * had failed, just free it. Can also happen when the BO has been * purged. */ if (!bo->base.vaddr) { vc4_bo_destroy(bo); goto out; } cache_list = vc4_get_cache_list_for_size(dev, gem_bo->size); if (!cache_list) { vc4_bo_destroy(bo); goto out; } if (bo->validated_shader) { kfree(bo->validated_shader->uniform_addr_offsets); kfree(bo->validated_shader->texture_samples); kfree(bo->validated_shader); bo->validated_shader = NULL; } /* Reset madv and usecnt before adding the BO to the cache. */ bo->madv = __VC4_MADV_NOTSUPP; refcount_set(&bo->usecnt, 0); bo->t_format = false; bo->free_time = jiffies; list_add(&bo->size_head, cache_list); list_add(&bo->unref_head, &vc4->bo_cache.time_list); vc4_bo_set_label(&bo->base.base, VC4_BO_TYPE_KERNEL_CACHE); vc4_bo_cache_free_old(dev); out: mutex_unlock(&vc4->bo_lock); } static void vc4_bo_cache_time_work(struct work_struct *work) { struct vc4_dev *vc4 = container_of(work, struct vc4_dev, bo_cache.time_work); struct drm_device *dev = &vc4->base; mutex_lock(&vc4->bo_lock); vc4_bo_cache_free_old(dev); mutex_unlock(&vc4->bo_lock); } int vc4_bo_inc_usecnt(struct vc4_bo *bo) { struct vc4_dev *vc4 = to_vc4_dev(bo->base.base.dev); int ret; if (WARN_ON_ONCE(vc4->is_vc5)) return -ENODEV; /* Fast path: if the BO is already retained by someone, no need to * check the madv status. */ if (refcount_inc_not_zero(&bo->usecnt)) return 0; mutex_lock(&bo->madv_lock); switch (bo->madv) { case VC4_MADV_WILLNEED: if (!refcount_inc_not_zero(&bo->usecnt)) refcount_set(&bo->usecnt, 1); ret = 0; break; case VC4_MADV_DONTNEED: /* We shouldn't use a BO marked as purgeable if at least * someone else retained its content by incrementing usecnt. * Luckily the BO hasn't been purged yet, but something wrong * is happening here. Just throw an error instead of * authorizing this use case. */ case __VC4_MADV_PURGED: /* We can't use a purged BO. */ default: /* Invalid madv value. */ ret = -EINVAL; break; } mutex_unlock(&bo->madv_lock); return ret; } void vc4_bo_dec_usecnt(struct vc4_bo *bo) { struct vc4_dev *vc4 = to_vc4_dev(bo->base.base.dev); if (WARN_ON_ONCE(vc4->is_vc5)) return; /* Fast path: if the BO is still retained by someone, no need to test * the madv value. */ if (refcount_dec_not_one(&bo->usecnt)) return; mutex_lock(&bo->madv_lock); if (refcount_dec_and_test(&bo->usecnt) && bo->madv == VC4_MADV_DONTNEED) vc4_bo_add_to_purgeable_pool(bo); mutex_unlock(&bo->madv_lock); } static void vc4_bo_cache_time_timer(struct timer_list *t) { struct vc4_dev *vc4 = from_timer(vc4, t, bo_cache.time_timer); schedule_work(&vc4->bo_cache.time_work); } static struct dma_buf *vc4_prime_export(struct drm_gem_object *obj, int flags) { struct vc4_bo *bo = to_vc4_bo(obj); struct dma_buf *dmabuf; int ret; if (bo->validated_shader) { DRM_DEBUG("Attempting to export shader BO\n"); return ERR_PTR(-EINVAL); } /* Note: as soon as the BO is exported it becomes unpurgeable, because * noone ever decrements the usecnt even if the reference held by the * exported BO is released. This shouldn't be a problem since we don't * expect exported BOs to be marked as purgeable. */ ret = vc4_bo_inc_usecnt(bo); if (ret) { DRM_ERROR("Failed to increment BO usecnt\n"); return ERR_PTR(ret); } dmabuf = drm_gem_prime_export(obj, flags); if (IS_ERR(dmabuf)) vc4_bo_dec_usecnt(bo); return dmabuf; } static vm_fault_t vc4_fault(struct vm_fault *vmf) { struct vm_area_struct *vma = vmf->vma; struct drm_gem_object *obj = vma->vm_private_data; struct vc4_bo *bo = to_vc4_bo(obj); /* The only reason we would end up here is when user-space accesses * BO's memory after it's been purged. */ mutex_lock(&bo->madv_lock); WARN_ON(bo->madv != __VC4_MADV_PURGED); mutex_unlock(&bo->madv_lock); return VM_FAULT_SIGBUS; } static int vc4_gem_object_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma) { struct vc4_bo *bo = to_vc4_bo(obj); if (bo->validated_shader && (vma->vm_flags & VM_WRITE)) { DRM_DEBUG("mmapping of shader BOs for writing not allowed.\n"); return -EINVAL; } if (bo->madv != VC4_MADV_WILLNEED) { DRM_DEBUG("mmapping of %s BO not allowed\n", bo->madv == VC4_MADV_DONTNEED ? "purgeable" : "purged"); return -EINVAL; } return drm_gem_dma_mmap(&bo->base, vma); } static const struct vm_operations_struct vc4_vm_ops = { .fault = vc4_fault, .open = drm_gem_vm_open, .close = drm_gem_vm_close, }; static const struct drm_gem_object_funcs vc4_gem_object_funcs = { .free = vc4_free_object, .export = vc4_prime_export, .get_sg_table = drm_gem_dma_object_get_sg_table, .vmap = drm_gem_dma_object_vmap, .mmap = vc4_gem_object_mmap, .vm_ops = &vc4_vm_ops, }; static int vc4_grab_bin_bo(struct vc4_dev *vc4, struct vc4_file *vc4file) { if (!vc4->v3d) return -ENODEV; if (vc4file->bin_bo_used) return 0; return vc4_v3d_bin_bo_get(vc4, &vc4file->bin_bo_used); } int vc4_create_bo_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) { struct drm_vc4_create_bo *args = data; struct vc4_file *vc4file = file_priv->driver_priv; struct vc4_dev *vc4 = to_vc4_dev(dev); struct vc4_bo *bo = NULL; int ret; if (WARN_ON_ONCE(vc4->is_vc5)) return -ENODEV; ret = vc4_grab_bin_bo(vc4, vc4file); if (ret) return ret; /* * We can't allocate from the BO cache, because the BOs don't * get zeroed, and that might leak data between users. */ bo = vc4_bo_create(dev, args->size, false, VC4_BO_TYPE_V3D); if (IS_ERR(bo)) return PTR_ERR(bo); bo->madv = VC4_MADV_WILLNEED; ret = drm_gem_handle_create(file_priv, &bo->base.base, &args->handle); drm_gem_object_put(&bo->base.base); return ret; } int vc4_mmap_bo_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) { struct vc4_dev *vc4 = to_vc4_dev(dev); struct drm_vc4_mmap_bo *args = data; struct drm_gem_object *gem_obj; if (WARN_ON_ONCE(vc4->is_vc5)) return -ENODEV; gem_obj = drm_gem_object_lookup(file_priv, args->handle); if (!gem_obj) { DRM_DEBUG("Failed to look up GEM BO %d\n", args->handle); return -EINVAL; } /* The mmap offset was set up at BO allocation time. */ args->offset = drm_vma_node_offset_addr(&gem_obj->vma_node); drm_gem_object_put(gem_obj); return 0; } int vc4_create_shader_bo_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) { struct drm_vc4_create_shader_bo *args = data; struct vc4_file *vc4file = file_priv->driver_priv; struct vc4_dev *vc4 = to_vc4_dev(dev); struct vc4_bo *bo = NULL; int ret; if (WARN_ON_ONCE(vc4->is_vc5)) return -ENODEV; if (args->size == 0) return -EINVAL; if (args->size % sizeof(u64) != 0) return -EINVAL; if (args->flags != 0) { DRM_INFO("Unknown flags set: 0x%08x\n", args->flags); return -EINVAL; } if (args->pad != 0) { DRM_INFO("Pad set: 0x%08x\n", args->pad); return -EINVAL; } ret = vc4_grab_bin_bo(vc4, vc4file); if (ret) return ret; bo = vc4_bo_create(dev, args->size, true, VC4_BO_TYPE_V3D_SHADER); if (IS_ERR(bo)) return PTR_ERR(bo); bo->madv = VC4_MADV_WILLNEED; if (copy_from_user(bo->base.vaddr, (void __user *)(uintptr_t)args->data, args->size)) { ret = -EFAULT; goto fail; } /* Clear the rest of the memory from allocating from the BO * cache. */ memset(bo->base.vaddr + args->size, 0, bo->base.base.size - args->size); bo->validated_shader = vc4_validate_shader(&bo->base); if (!bo->validated_shader) { ret = -EINVAL; goto fail; } /* We have to create the handle after validation, to avoid * races for users to do doing things like mmap the shader BO. */ ret = drm_gem_handle_create(file_priv, &bo->base.base, &args->handle); fail: drm_gem_object_put(&bo->base.base); return ret; } /** * vc4_set_tiling_ioctl() - Sets the tiling modifier for a BO. * @dev: DRM device * @data: ioctl argument * @file_priv: DRM file for this fd * * The tiling state of the BO decides the default modifier of an fb if * no specific modifier was set by userspace, and the return value of * vc4_get_tiling_ioctl() (so that userspace can treat a BO it * received from dmabuf as the same tiling format as the producer * used). */ int vc4_set_tiling_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) { struct vc4_dev *vc4 = to_vc4_dev(dev); struct drm_vc4_set_tiling *args = data; struct drm_gem_object *gem_obj; struct vc4_bo *bo; bool t_format; if (WARN_ON_ONCE(vc4->is_vc5)) return -ENODEV; if (args->flags != 0) return -EINVAL; switch (args->modifier) { case DRM_FORMAT_MOD_NONE: t_format = false; break; case DRM_FORMAT_MOD_BROADCOM_VC4_T_TILED: t_format = true; break; default: return -EINVAL; } gem_obj = drm_gem_object_lookup(file_priv, args->handle); if (!gem_obj) { DRM_DEBUG("Failed to look up GEM BO %d\n", args->handle); return -ENOENT; } bo = to_vc4_bo(gem_obj); bo->t_format = t_format; drm_gem_object_put(gem_obj); return 0; } /** * vc4_get_tiling_ioctl() - Gets the tiling modifier for a BO. * @dev: DRM device * @data: ioctl argument * @file_priv: DRM file for this fd * * Returns the tiling modifier for a BO as set by vc4_set_tiling_ioctl(). */ int vc4_get_tiling_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) { struct vc4_dev *vc4 = to_vc4_dev(dev); struct drm_vc4_get_tiling *args = data; struct drm_gem_object *gem_obj; struct vc4_bo *bo; if (WARN_ON_ONCE(vc4->is_vc5)) return -ENODEV; if (args->flags != 0 || args->modifier != 0) return -EINVAL; gem_obj = drm_gem_object_lookup(file_priv, args->handle); if (!gem_obj) { DRM_DEBUG("Failed to look up GEM BO %d\n", args->handle); return -ENOENT; } bo = to_vc4_bo(gem_obj); if (bo->t_format) args->modifier = DRM_FORMAT_MOD_BROADCOM_VC4_T_TILED; else args->modifier = DRM_FORMAT_MOD_NONE; drm_gem_object_put(gem_obj); return 0; } int vc4_bo_debugfs_init(struct drm_minor *minor) { struct drm_device *drm = minor->dev; struct vc4_dev *vc4 = to_vc4_dev(drm); if (!vc4->v3d) return -ENODEV; drm_debugfs_add_file(drm, "bo_stats", vc4_bo_stats_debugfs, NULL); return 0; } static void vc4_bo_cache_destroy(struct drm_device *dev, void *unused); int vc4_bo_cache_init(struct drm_device *dev) { struct vc4_dev *vc4 = to_vc4_dev(dev); int ret; int i; if (WARN_ON_ONCE(vc4->is_vc5)) return -ENODEV; /* Create the initial set of BO labels that the kernel will * use. This lets us avoid a bunch of string reallocation in * the kernel's draw and BO allocation paths. */ vc4->bo_labels = kcalloc(VC4_BO_TYPE_COUNT, sizeof(*vc4->bo_labels), GFP_KERNEL); if (!vc4->bo_labels) return -ENOMEM; vc4->num_labels = VC4_BO_TYPE_COUNT; BUILD_BUG_ON(ARRAY_SIZE(bo_type_names) != VC4_BO_TYPE_COUNT); for (i = 0; i < VC4_BO_TYPE_COUNT; i++) vc4->bo_labels[i].name = bo_type_names[i]; ret = drmm_mutex_init(dev, &vc4->bo_lock); if (ret) { kfree(vc4->bo_labels); return ret; } INIT_LIST_HEAD(&vc4->bo_cache.time_list); INIT_WORK(&vc4->bo_cache.time_work, vc4_bo_cache_time_work); timer_setup(&vc4->bo_cache.time_timer, vc4_bo_cache_time_timer, 0); return drmm_add_action_or_reset(dev, vc4_bo_cache_destroy, NULL); } static void vc4_bo_cache_destroy(struct drm_device *dev, void *unused) { struct vc4_dev *vc4 = to_vc4_dev(dev); int i; del_timer(&vc4->bo_cache.time_timer); cancel_work_sync(&vc4->bo_cache.time_work); vc4_bo_cache_purge(dev); for (i = 0; i < vc4->num_labels; i++) { if (vc4->bo_labels[i].num_allocated) { DRM_ERROR("Destroying BO cache with %d %s " "BOs still allocated\n", vc4->bo_labels[i].num_allocated, vc4->bo_labels[i].name); } if (is_user_label(i)) kfree(vc4->bo_labels[i].name); } kfree(vc4->bo_labels); } int vc4_label_bo_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) { struct vc4_dev *vc4 = to_vc4_dev(dev); struct drm_vc4_label_bo *args = data; char *name; struct drm_gem_object *gem_obj; int ret = 0, label; if (WARN_ON_ONCE(vc4->is_vc5)) return -ENODEV; if (!args->len) return -EINVAL; name = strndup_user(u64_to_user_ptr(args->name), args->len + 1); if (IS_ERR(name)) return PTR_ERR(name); gem_obj = drm_gem_object_lookup(file_priv, args->handle); if (!gem_obj) { DRM_ERROR("Failed to look up GEM BO %d\n", args->handle); kfree(name); return -ENOENT; } mutex_lock(&vc4->bo_lock); label = vc4_get_user_label(vc4, name); if (label != -1) vc4_bo_set_label(gem_obj, label); else ret = -ENOMEM; mutex_unlock(&vc4->bo_lock); drm_gem_object_put(gem_obj); return ret; }
linux-master
drivers/gpu/drm/vc4/vc4_bo.c
/* * Copyright © 2014-2015 Broadcom * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice (including the next * paragraph) shall be included in all copies or substantial portions of the * Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ /** * DOC: Render command list generation * * In the V3D hardware, render command lists are what load and store * tiles of a framebuffer and optionally call out to binner-generated * command lists to do the 3D drawing for that tile. * * In the VC4 driver, render command list generation is performed by the * kernel instead of userspace. We do this because validating a * user-submitted command list is hard to get right and has high CPU overhead, * while the number of valid configurations for render command lists is * actually fairly low. */ #include "uapi/drm/vc4_drm.h" #include "vc4_drv.h" #include "vc4_packet.h" struct vc4_rcl_setup { struct drm_gem_dma_object *color_read; struct drm_gem_dma_object *color_write; struct drm_gem_dma_object *zs_read; struct drm_gem_dma_object *zs_write; struct drm_gem_dma_object *msaa_color_write; struct drm_gem_dma_object *msaa_zs_write; struct drm_gem_dma_object *rcl; u32 next_offset; u32 next_write_bo_index; }; static inline void rcl_u8(struct vc4_rcl_setup *setup, u8 val) { *(u8 *)(setup->rcl->vaddr + setup->next_offset) = val; setup->next_offset += 1; } static inline void rcl_u16(struct vc4_rcl_setup *setup, u16 val) { *(u16 *)(setup->rcl->vaddr + setup->next_offset) = val; setup->next_offset += 2; } static inline void rcl_u32(struct vc4_rcl_setup *setup, u32 val) { *(u32 *)(setup->rcl->vaddr + setup->next_offset) = val; setup->next_offset += 4; } /* * Emits a no-op STORE_TILE_BUFFER_GENERAL. * * If we emit a PACKET_TILE_COORDINATES, it must be followed by a store of * some sort before another load is triggered. */ static void vc4_store_before_load(struct vc4_rcl_setup *setup) { rcl_u8(setup, VC4_PACKET_STORE_TILE_BUFFER_GENERAL); rcl_u16(setup, VC4_SET_FIELD(VC4_LOADSTORE_TILE_BUFFER_NONE, VC4_LOADSTORE_TILE_BUFFER_BUFFER) | VC4_STORE_TILE_BUFFER_DISABLE_COLOR_CLEAR | VC4_STORE_TILE_BUFFER_DISABLE_ZS_CLEAR | VC4_STORE_TILE_BUFFER_DISABLE_VG_MASK_CLEAR); rcl_u32(setup, 0); /* no address, since we're in None mode */ } /* * Calculates the physical address of the start of a tile in a RCL surface. * * Unlike the other load/store packets, * VC4_PACKET_LOAD/STORE_FULL_RES_TILE_BUFFER don't look at the tile * coordinates packet, and instead just store to the address given. */ static uint32_t vc4_full_res_offset(struct vc4_exec_info *exec, struct drm_gem_dma_object *bo, struct drm_vc4_submit_rcl_surface *surf, uint8_t x, uint8_t y) { return bo->dma_addr + surf->offset + VC4_TILE_BUFFER_SIZE * (DIV_ROUND_UP(exec->args->width, 32) * y + x); } /* * Emits a PACKET_TILE_COORDINATES if one isn't already pending. * * The tile coordinates packet triggers a pending load if there is one, are * used for clipping during rendering, and determine where loads/stores happen * relative to their base address. */ static void vc4_tile_coordinates(struct vc4_rcl_setup *setup, uint32_t x, uint32_t y) { rcl_u8(setup, VC4_PACKET_TILE_COORDINATES); rcl_u8(setup, x); rcl_u8(setup, y); } static void emit_tile(struct vc4_exec_info *exec, struct vc4_rcl_setup *setup, uint8_t x, uint8_t y, bool first, bool last) { struct drm_vc4_submit_cl *args = exec->args; bool has_bin = args->bin_cl_size != 0; /* Note that the load doesn't actually occur until the * tile coords packet is processed, and only one load * may be outstanding at a time. */ if (setup->color_read) { if (args->color_read.flags & VC4_SUBMIT_RCL_SURFACE_READ_IS_FULL_RES) { rcl_u8(setup, VC4_PACKET_LOAD_FULL_RES_TILE_BUFFER); rcl_u32(setup, vc4_full_res_offset(exec, setup->color_read, &args->color_read, x, y) | VC4_LOADSTORE_FULL_RES_DISABLE_ZS); } else { rcl_u8(setup, VC4_PACKET_LOAD_TILE_BUFFER_GENERAL); rcl_u16(setup, args->color_read.bits); rcl_u32(setup, setup->color_read->dma_addr + args->color_read.offset); } } if (setup->zs_read) { if (setup->color_read) { /* Exec previous load. */ vc4_tile_coordinates(setup, x, y); vc4_store_before_load(setup); } if (args->zs_read.flags & VC4_SUBMIT_RCL_SURFACE_READ_IS_FULL_RES) { rcl_u8(setup, VC4_PACKET_LOAD_FULL_RES_TILE_BUFFER); rcl_u32(setup, vc4_full_res_offset(exec, setup->zs_read, &args->zs_read, x, y) | VC4_LOADSTORE_FULL_RES_DISABLE_COLOR); } else { rcl_u8(setup, VC4_PACKET_LOAD_TILE_BUFFER_GENERAL); rcl_u16(setup, args->zs_read.bits); rcl_u32(setup, setup->zs_read->dma_addr + args->zs_read.offset); } } /* Clipping depends on tile coordinates having been * emitted, so we always need one here. */ vc4_tile_coordinates(setup, x, y); /* Wait for the binner before jumping to the first * tile's lists. */ if (first && has_bin) rcl_u8(setup, VC4_PACKET_WAIT_ON_SEMAPHORE); if (has_bin) { rcl_u8(setup, VC4_PACKET_BRANCH_TO_SUB_LIST); rcl_u32(setup, (exec->tile_alloc_offset + (y * exec->bin_tiles_x + x) * 32)); } if (setup->msaa_color_write) { bool last_tile_write = (!setup->msaa_zs_write && !setup->zs_write && !setup->color_write); uint32_t bits = VC4_LOADSTORE_FULL_RES_DISABLE_ZS; if (!last_tile_write) bits |= VC4_LOADSTORE_FULL_RES_DISABLE_CLEAR_ALL; else if (last) bits |= VC4_LOADSTORE_FULL_RES_EOF; rcl_u8(setup, VC4_PACKET_STORE_FULL_RES_TILE_BUFFER); rcl_u32(setup, vc4_full_res_offset(exec, setup->msaa_color_write, &args->msaa_color_write, x, y) | bits); } if (setup->msaa_zs_write) { bool last_tile_write = (!setup->zs_write && !setup->color_write); uint32_t bits = VC4_LOADSTORE_FULL_RES_DISABLE_COLOR; if (setup->msaa_color_write) vc4_tile_coordinates(setup, x, y); if (!last_tile_write) bits |= VC4_LOADSTORE_FULL_RES_DISABLE_CLEAR_ALL; else if (last) bits |= VC4_LOADSTORE_FULL_RES_EOF; rcl_u8(setup, VC4_PACKET_STORE_FULL_RES_TILE_BUFFER); rcl_u32(setup, vc4_full_res_offset(exec, setup->msaa_zs_write, &args->msaa_zs_write, x, y) | bits); } if (setup->zs_write) { bool last_tile_write = !setup->color_write; if (setup->msaa_color_write || setup->msaa_zs_write) vc4_tile_coordinates(setup, x, y); rcl_u8(setup, VC4_PACKET_STORE_TILE_BUFFER_GENERAL); rcl_u16(setup, args->zs_write.bits | (last_tile_write ? 0 : VC4_STORE_TILE_BUFFER_DISABLE_COLOR_CLEAR)); rcl_u32(setup, (setup->zs_write->dma_addr + args->zs_write.offset) | ((last && last_tile_write) ? VC4_LOADSTORE_TILE_BUFFER_EOF : 0)); } if (setup->color_write) { if (setup->msaa_color_write || setup->msaa_zs_write || setup->zs_write) { vc4_tile_coordinates(setup, x, y); } if (last) rcl_u8(setup, VC4_PACKET_STORE_MS_TILE_BUFFER_AND_EOF); else rcl_u8(setup, VC4_PACKET_STORE_MS_TILE_BUFFER); } } static int vc4_create_rcl_bo(struct drm_device *dev, struct vc4_exec_info *exec, struct vc4_rcl_setup *setup) { struct drm_vc4_submit_cl *args = exec->args; bool has_bin = args->bin_cl_size != 0; uint8_t min_x_tile = args->min_x_tile; uint8_t min_y_tile = args->min_y_tile; uint8_t max_x_tile = args->max_x_tile; uint8_t max_y_tile = args->max_y_tile; uint8_t xtiles = max_x_tile - min_x_tile + 1; uint8_t ytiles = max_y_tile - min_y_tile + 1; uint8_t xi, yi; uint32_t size, loop_body_size; bool positive_x = true; bool positive_y = true; if (args->flags & VC4_SUBMIT_CL_FIXED_RCL_ORDER) { if (!(args->flags & VC4_SUBMIT_CL_RCL_ORDER_INCREASING_X)) positive_x = false; if (!(args->flags & VC4_SUBMIT_CL_RCL_ORDER_INCREASING_Y)) positive_y = false; } size = VC4_PACKET_TILE_RENDERING_MODE_CONFIG_SIZE; loop_body_size = VC4_PACKET_TILE_COORDINATES_SIZE; if (args->flags & VC4_SUBMIT_CL_USE_CLEAR_COLOR) { size += VC4_PACKET_CLEAR_COLORS_SIZE + VC4_PACKET_TILE_COORDINATES_SIZE + VC4_PACKET_STORE_TILE_BUFFER_GENERAL_SIZE; } if (setup->color_read) { if (args->color_read.flags & VC4_SUBMIT_RCL_SURFACE_READ_IS_FULL_RES) { loop_body_size += VC4_PACKET_LOAD_FULL_RES_TILE_BUFFER_SIZE; } else { loop_body_size += VC4_PACKET_LOAD_TILE_BUFFER_GENERAL_SIZE; } } if (setup->zs_read) { if (setup->color_read) { loop_body_size += VC4_PACKET_TILE_COORDINATES_SIZE; loop_body_size += VC4_PACKET_STORE_TILE_BUFFER_GENERAL_SIZE; } if (args->zs_read.flags & VC4_SUBMIT_RCL_SURFACE_READ_IS_FULL_RES) { loop_body_size += VC4_PACKET_LOAD_FULL_RES_TILE_BUFFER_SIZE; } else { loop_body_size += VC4_PACKET_LOAD_TILE_BUFFER_GENERAL_SIZE; } } if (has_bin) { size += VC4_PACKET_WAIT_ON_SEMAPHORE_SIZE; loop_body_size += VC4_PACKET_BRANCH_TO_SUB_LIST_SIZE; } if (setup->msaa_color_write) loop_body_size += VC4_PACKET_STORE_FULL_RES_TILE_BUFFER_SIZE; if (setup->msaa_zs_write) loop_body_size += VC4_PACKET_STORE_FULL_RES_TILE_BUFFER_SIZE; if (setup->zs_write) loop_body_size += VC4_PACKET_STORE_TILE_BUFFER_GENERAL_SIZE; if (setup->color_write) loop_body_size += VC4_PACKET_STORE_MS_TILE_BUFFER_SIZE; /* We need a VC4_PACKET_TILE_COORDINATES in between each store. */ loop_body_size += VC4_PACKET_TILE_COORDINATES_SIZE * ((setup->msaa_color_write != NULL) + (setup->msaa_zs_write != NULL) + (setup->color_write != NULL) + (setup->zs_write != NULL) - 1); size += xtiles * ytiles * loop_body_size; setup->rcl = &vc4_bo_create(dev, size, true, VC4_BO_TYPE_RCL)->base; if (IS_ERR(setup->rcl)) return PTR_ERR(setup->rcl); list_add_tail(&to_vc4_bo(&setup->rcl->base)->unref_head, &exec->unref_list); /* The tile buffer gets cleared when the previous tile is stored. If * the clear values changed between frames, then the tile buffer has * stale clear values in it, so we have to do a store in None mode (no * writes) so that we trigger the tile buffer clear. */ if (args->flags & VC4_SUBMIT_CL_USE_CLEAR_COLOR) { rcl_u8(setup, VC4_PACKET_CLEAR_COLORS); rcl_u32(setup, args->clear_color[0]); rcl_u32(setup, args->clear_color[1]); rcl_u32(setup, args->clear_z); rcl_u8(setup, args->clear_s); vc4_tile_coordinates(setup, 0, 0); rcl_u8(setup, VC4_PACKET_STORE_TILE_BUFFER_GENERAL); rcl_u16(setup, VC4_LOADSTORE_TILE_BUFFER_NONE); rcl_u32(setup, 0); /* no address, since we're in None mode */ } rcl_u8(setup, VC4_PACKET_TILE_RENDERING_MODE_CONFIG); rcl_u32(setup, (setup->color_write ? (setup->color_write->dma_addr + args->color_write.offset) : 0)); rcl_u16(setup, args->width); rcl_u16(setup, args->height); rcl_u16(setup, args->color_write.bits); for (yi = 0; yi < ytiles; yi++) { int y = positive_y ? min_y_tile + yi : max_y_tile - yi; for (xi = 0; xi < xtiles; xi++) { int x = positive_x ? min_x_tile + xi : max_x_tile - xi; bool first = (xi == 0 && yi == 0); bool last = (xi == xtiles - 1 && yi == ytiles - 1); emit_tile(exec, setup, x, y, first, last); } } BUG_ON(setup->next_offset != size); exec->ct1ca = setup->rcl->dma_addr; exec->ct1ea = setup->rcl->dma_addr + setup->next_offset; return 0; } static int vc4_full_res_bounds_check(struct vc4_exec_info *exec, struct drm_gem_dma_object *obj, struct drm_vc4_submit_rcl_surface *surf) { struct drm_vc4_submit_cl *args = exec->args; u32 render_tiles_stride = DIV_ROUND_UP(exec->args->width, 32); if (surf->offset > obj->base.size) { DRM_DEBUG("surface offset %d > BO size %zd\n", surf->offset, obj->base.size); return -EINVAL; } if ((obj->base.size - surf->offset) / VC4_TILE_BUFFER_SIZE < render_tiles_stride * args->max_y_tile + args->max_x_tile) { DRM_DEBUG("MSAA tile %d, %d out of bounds " "(bo size %zd, offset %d).\n", args->max_x_tile, args->max_y_tile, obj->base.size, surf->offset); return -EINVAL; } return 0; } static int vc4_rcl_msaa_surface_setup(struct vc4_exec_info *exec, struct drm_gem_dma_object **obj, struct drm_vc4_submit_rcl_surface *surf) { if (surf->flags != 0 || surf->bits != 0) { DRM_DEBUG("MSAA surface had nonzero flags/bits\n"); return -EINVAL; } if (surf->hindex == ~0) return 0; *obj = vc4_use_bo(exec, surf->hindex); if (!*obj) return -EINVAL; exec->rcl_write_bo[exec->rcl_write_bo_count++] = *obj; if (surf->offset & 0xf) { DRM_DEBUG("MSAA write must be 16b aligned.\n"); return -EINVAL; } return vc4_full_res_bounds_check(exec, *obj, surf); } static int vc4_rcl_surface_setup(struct vc4_exec_info *exec, struct drm_gem_dma_object **obj, struct drm_vc4_submit_rcl_surface *surf, bool is_write) { uint8_t tiling = VC4_GET_FIELD(surf->bits, VC4_LOADSTORE_TILE_BUFFER_TILING); uint8_t buffer = VC4_GET_FIELD(surf->bits, VC4_LOADSTORE_TILE_BUFFER_BUFFER); uint8_t format = VC4_GET_FIELD(surf->bits, VC4_LOADSTORE_TILE_BUFFER_FORMAT); int cpp; int ret; if (surf->flags & ~VC4_SUBMIT_RCL_SURFACE_READ_IS_FULL_RES) { DRM_DEBUG("Extra flags set\n"); return -EINVAL; } if (surf->hindex == ~0) return 0; *obj = vc4_use_bo(exec, surf->hindex); if (!*obj) return -EINVAL; if (is_write) exec->rcl_write_bo[exec->rcl_write_bo_count++] = *obj; if (surf->flags & VC4_SUBMIT_RCL_SURFACE_READ_IS_FULL_RES) { if (surf == &exec->args->zs_write) { DRM_DEBUG("general zs write may not be a full-res.\n"); return -EINVAL; } if (surf->bits != 0) { DRM_DEBUG("load/store general bits set with " "full res load/store.\n"); return -EINVAL; } ret = vc4_full_res_bounds_check(exec, *obj, surf); if (ret) return ret; return 0; } if (surf->bits & ~(VC4_LOADSTORE_TILE_BUFFER_TILING_MASK | VC4_LOADSTORE_TILE_BUFFER_BUFFER_MASK | VC4_LOADSTORE_TILE_BUFFER_FORMAT_MASK)) { DRM_DEBUG("Unknown bits in load/store: 0x%04x\n", surf->bits); return -EINVAL; } if (tiling > VC4_TILING_FORMAT_LT) { DRM_DEBUG("Bad tiling format\n"); return -EINVAL; } if (buffer == VC4_LOADSTORE_TILE_BUFFER_ZS) { if (format != 0) { DRM_DEBUG("No color format should be set for ZS\n"); return -EINVAL; } cpp = 4; } else if (buffer == VC4_LOADSTORE_TILE_BUFFER_COLOR) { switch (format) { case VC4_LOADSTORE_TILE_BUFFER_BGR565: case VC4_LOADSTORE_TILE_BUFFER_BGR565_DITHER: cpp = 2; break; case VC4_LOADSTORE_TILE_BUFFER_RGBA8888: cpp = 4; break; default: DRM_DEBUG("Bad tile buffer format\n"); return -EINVAL; } } else { DRM_DEBUG("Bad load/store buffer %d.\n", buffer); return -EINVAL; } if (surf->offset & 0xf) { DRM_DEBUG("load/store buffer must be 16b aligned.\n"); return -EINVAL; } if (!vc4_check_tex_size(exec, *obj, surf->offset, tiling, exec->args->width, exec->args->height, cpp)) { return -EINVAL; } return 0; } static int vc4_rcl_render_config_surface_setup(struct vc4_exec_info *exec, struct vc4_rcl_setup *setup, struct drm_gem_dma_object **obj, struct drm_vc4_submit_rcl_surface *surf) { uint8_t tiling = VC4_GET_FIELD(surf->bits, VC4_RENDER_CONFIG_MEMORY_FORMAT); uint8_t format = VC4_GET_FIELD(surf->bits, VC4_RENDER_CONFIG_FORMAT); int cpp; if (surf->flags != 0) { DRM_DEBUG("No flags supported on render config.\n"); return -EINVAL; } if (surf->bits & ~(VC4_RENDER_CONFIG_MEMORY_FORMAT_MASK | VC4_RENDER_CONFIG_FORMAT_MASK | VC4_RENDER_CONFIG_MS_MODE_4X | VC4_RENDER_CONFIG_DECIMATE_MODE_4X)) { DRM_DEBUG("Unknown bits in render config: 0x%04x\n", surf->bits); return -EINVAL; } if (surf->hindex == ~0) return 0; *obj = vc4_use_bo(exec, surf->hindex); if (!*obj) return -EINVAL; exec->rcl_write_bo[exec->rcl_write_bo_count++] = *obj; if (tiling > VC4_TILING_FORMAT_LT) { DRM_DEBUG("Bad tiling format\n"); return -EINVAL; } switch (format) { case VC4_RENDER_CONFIG_FORMAT_BGR565_DITHERED: case VC4_RENDER_CONFIG_FORMAT_BGR565: cpp = 2; break; case VC4_RENDER_CONFIG_FORMAT_RGBA8888: cpp = 4; break; default: DRM_DEBUG("Bad tile buffer format\n"); return -EINVAL; } if (!vc4_check_tex_size(exec, *obj, surf->offset, tiling, exec->args->width, exec->args->height, cpp)) { return -EINVAL; } return 0; } int vc4_get_rcl(struct drm_device *dev, struct vc4_exec_info *exec) { struct vc4_dev *vc4 = to_vc4_dev(dev); struct vc4_rcl_setup setup = {0}; struct drm_vc4_submit_cl *args = exec->args; bool has_bin = args->bin_cl_size != 0; int ret; if (WARN_ON_ONCE(vc4->is_vc5)) return -ENODEV; if (args->min_x_tile > args->max_x_tile || args->min_y_tile > args->max_y_tile) { DRM_DEBUG("Bad render tile set (%d,%d)-(%d,%d)\n", args->min_x_tile, args->min_y_tile, args->max_x_tile, args->max_y_tile); return -EINVAL; } if (has_bin && (args->max_x_tile > exec->bin_tiles_x || args->max_y_tile > exec->bin_tiles_y)) { DRM_DEBUG("Render tiles (%d,%d) outside of bin config " "(%d,%d)\n", args->max_x_tile, args->max_y_tile, exec->bin_tiles_x, exec->bin_tiles_y); return -EINVAL; } ret = vc4_rcl_render_config_surface_setup(exec, &setup, &setup.color_write, &args->color_write); if (ret) return ret; ret = vc4_rcl_surface_setup(exec, &setup.color_read, &args->color_read, false); if (ret) return ret; ret = vc4_rcl_surface_setup(exec, &setup.zs_read, &args->zs_read, false); if (ret) return ret; ret = vc4_rcl_surface_setup(exec, &setup.zs_write, &args->zs_write, true); if (ret) return ret; ret = vc4_rcl_msaa_surface_setup(exec, &setup.msaa_color_write, &args->msaa_color_write); if (ret) return ret; ret = vc4_rcl_msaa_surface_setup(exec, &setup.msaa_zs_write, &args->msaa_zs_write); if (ret) return ret; /* We shouldn't even have the job submitted to us if there's no * surface to write out. */ if (!setup.color_write && !setup.zs_write && !setup.msaa_color_write && !setup.msaa_zs_write) { DRM_DEBUG("RCL requires color or Z/S write\n"); return -EINVAL; } return vc4_create_rcl_bo(dev, exec, &setup); }
linux-master
drivers/gpu/drm/vc4/vc4_render_cl.c
// SPDX-License-Identifier: GPL-2.0-only /* * Copyright (C) 2016 Broadcom */ /** * DOC: VC4 DSI0/DSI1 module * * BCM2835 contains two DSI modules, DSI0 and DSI1. DSI0 is a * single-lane DSI controller, while DSI1 is a more modern 4-lane DSI * controller. * * Most Raspberry Pi boards expose DSI1 as their "DISPLAY" connector, * while the compute module brings both DSI0 and DSI1 out. * * This driver has been tested for DSI1 video-mode display only * currently, with most of the information necessary for DSI0 * hopefully present. */ #include <linux/clk-provider.h> #include <linux/clk.h> #include <linux/completion.h> #include <linux/component.h> #include <linux/dma-mapping.h> #include <linux/dmaengine.h> #include <linux/io.h> #include <linux/of.h> #include <linux/of_address.h> #include <linux/platform_device.h> #include <linux/pm_runtime.h> #include <drm/drm_atomic_helper.h> #include <drm/drm_bridge.h> #include <drm/drm_edid.h> #include <drm/drm_mipi_dsi.h> #include <drm/drm_of.h> #include <drm/drm_panel.h> #include <drm/drm_probe_helper.h> #include <drm/drm_simple_kms_helper.h> #include "vc4_drv.h" #include "vc4_regs.h" #define DSI_CMD_FIFO_DEPTH 16 #define DSI_PIX_FIFO_DEPTH 256 #define DSI_PIX_FIFO_WIDTH 4 #define DSI0_CTRL 0x00 /* Command packet control. */ #define DSI0_TXPKT1C 0x04 /* AKA PKTC */ #define DSI1_TXPKT1C 0x04 # define DSI_TXPKT1C_TRIG_CMD_MASK VC4_MASK(31, 24) # define DSI_TXPKT1C_TRIG_CMD_SHIFT 24 # define DSI_TXPKT1C_CMD_REPEAT_MASK VC4_MASK(23, 10) # define DSI_TXPKT1C_CMD_REPEAT_SHIFT 10 # define DSI_TXPKT1C_DISPLAY_NO_MASK VC4_MASK(9, 8) # define DSI_TXPKT1C_DISPLAY_NO_SHIFT 8 /* Short, trigger, BTA, or a long packet that fits all in CMDFIFO. */ # define DSI_TXPKT1C_DISPLAY_NO_SHORT 0 /* Primary display where cmdfifo provides part of the payload and * pixelvalve the rest. */ # define DSI_TXPKT1C_DISPLAY_NO_PRIMARY 1 /* Secondary display where cmdfifo provides part of the payload and * pixfifo the rest. */ # define DSI_TXPKT1C_DISPLAY_NO_SECONDARY 2 # define DSI_TXPKT1C_CMD_TX_TIME_MASK VC4_MASK(7, 6) # define DSI_TXPKT1C_CMD_TX_TIME_SHIFT 6 # define DSI_TXPKT1C_CMD_CTRL_MASK VC4_MASK(5, 4) # define DSI_TXPKT1C_CMD_CTRL_SHIFT 4 /* Command only. Uses TXPKT1H and DISPLAY_NO */ # define DSI_TXPKT1C_CMD_CTRL_TX 0 /* Command with BTA for either ack or read data. */ # define DSI_TXPKT1C_CMD_CTRL_RX 1 /* Trigger according to TRIG_CMD */ # define DSI_TXPKT1C_CMD_CTRL_TRIG 2 /* BTA alone for getting error status after a command, or a TE trigger * without a previous command. */ # define DSI_TXPKT1C_CMD_CTRL_BTA 3 # define DSI_TXPKT1C_CMD_MODE_LP BIT(3) # define DSI_TXPKT1C_CMD_TYPE_LONG BIT(2) # define DSI_TXPKT1C_CMD_TE_EN BIT(1) # define DSI_TXPKT1C_CMD_EN BIT(0) /* Command packet header. */ #define DSI0_TXPKT1H 0x08 /* AKA PKTH */ #define DSI1_TXPKT1H 0x08 # define DSI_TXPKT1H_BC_CMDFIFO_MASK VC4_MASK(31, 24) # define DSI_TXPKT1H_BC_CMDFIFO_SHIFT 24 # define DSI_TXPKT1H_BC_PARAM_MASK VC4_MASK(23, 8) # define DSI_TXPKT1H_BC_PARAM_SHIFT 8 # define DSI_TXPKT1H_BC_DT_MASK VC4_MASK(7, 0) # define DSI_TXPKT1H_BC_DT_SHIFT 0 #define DSI0_RXPKT1H 0x0c /* AKA RX1_PKTH */ #define DSI1_RXPKT1H 0x14 # define DSI_RXPKT1H_CRC_ERR BIT(31) # define DSI_RXPKT1H_DET_ERR BIT(30) # define DSI_RXPKT1H_ECC_ERR BIT(29) # define DSI_RXPKT1H_COR_ERR BIT(28) # define DSI_RXPKT1H_INCOMP_PKT BIT(25) # define DSI_RXPKT1H_PKT_TYPE_LONG BIT(24) /* Byte count if DSI_RXPKT1H_PKT_TYPE_LONG */ # define DSI_RXPKT1H_BC_PARAM_MASK VC4_MASK(23, 8) # define DSI_RXPKT1H_BC_PARAM_SHIFT 8 /* Short return bytes if !DSI_RXPKT1H_PKT_TYPE_LONG */ # define DSI_RXPKT1H_SHORT_1_MASK VC4_MASK(23, 16) # define DSI_RXPKT1H_SHORT_1_SHIFT 16 # define DSI_RXPKT1H_SHORT_0_MASK VC4_MASK(15, 8) # define DSI_RXPKT1H_SHORT_0_SHIFT 8 # define DSI_RXPKT1H_DT_LP_CMD_MASK VC4_MASK(7, 0) # define DSI_RXPKT1H_DT_LP_CMD_SHIFT 0 #define DSI0_RXPKT2H 0x10 /* AKA RX2_PKTH */ #define DSI1_RXPKT2H 0x18 # define DSI_RXPKT1H_DET_ERR BIT(30) # define DSI_RXPKT1H_ECC_ERR BIT(29) # define DSI_RXPKT1H_COR_ERR BIT(28) # define DSI_RXPKT1H_INCOMP_PKT BIT(25) # define DSI_RXPKT1H_BC_PARAM_MASK VC4_MASK(23, 8) # define DSI_RXPKT1H_BC_PARAM_SHIFT 8 # define DSI_RXPKT1H_DT_MASK VC4_MASK(7, 0) # define DSI_RXPKT1H_DT_SHIFT 0 #define DSI0_TXPKT_CMD_FIFO 0x14 /* AKA CMD_DATAF */ #define DSI1_TXPKT_CMD_FIFO 0x1c #define DSI0_DISP0_CTRL 0x18 # define DSI_DISP0_PIX_CLK_DIV_MASK VC4_MASK(21, 13) # define DSI_DISP0_PIX_CLK_DIV_SHIFT 13 # define DSI_DISP0_LP_STOP_CTRL_MASK VC4_MASK(12, 11) # define DSI_DISP0_LP_STOP_CTRL_SHIFT 11 # define DSI_DISP0_LP_STOP_DISABLE 0 # define DSI_DISP0_LP_STOP_PERLINE 1 # define DSI_DISP0_LP_STOP_PERFRAME 2 /* Transmit RGB pixels and null packets only during HACTIVE, instead * of going to LP-STOP. */ # define DSI_DISP_HACTIVE_NULL BIT(10) /* Transmit blanking packet only during vblank, instead of allowing LP-STOP. */ # define DSI_DISP_VBLP_CTRL BIT(9) /* Transmit blanking packet only during HFP, instead of allowing LP-STOP. */ # define DSI_DISP_HFP_CTRL BIT(8) /* Transmit blanking packet only during HBP, instead of allowing LP-STOP. */ # define DSI_DISP_HBP_CTRL BIT(7) # define DSI_DISP0_CHANNEL_MASK VC4_MASK(6, 5) # define DSI_DISP0_CHANNEL_SHIFT 5 /* Enables end events for HSYNC/VSYNC, not just start events. */ # define DSI_DISP0_ST_END BIT(4) # define DSI_DISP0_PFORMAT_MASK VC4_MASK(3, 2) # define DSI_DISP0_PFORMAT_SHIFT 2 # define DSI_PFORMAT_RGB565 0 # define DSI_PFORMAT_RGB666_PACKED 1 # define DSI_PFORMAT_RGB666 2 # define DSI_PFORMAT_RGB888 3 /* Default is VIDEO mode. */ # define DSI_DISP0_COMMAND_MODE BIT(1) # define DSI_DISP0_ENABLE BIT(0) #define DSI0_DISP1_CTRL 0x1c #define DSI1_DISP1_CTRL 0x2c /* Format of the data written to TXPKT_PIX_FIFO. */ # define DSI_DISP1_PFORMAT_MASK VC4_MASK(2, 1) # define DSI_DISP1_PFORMAT_SHIFT 1 # define DSI_DISP1_PFORMAT_16BIT 0 # define DSI_DISP1_PFORMAT_24BIT 1 # define DSI_DISP1_PFORMAT_32BIT_LE 2 # define DSI_DISP1_PFORMAT_32BIT_BE 3 /* DISP1 is always command mode. */ # define DSI_DISP1_ENABLE BIT(0) #define DSI0_TXPKT_PIX_FIFO 0x20 /* AKA PIX_FIFO */ #define DSI0_INT_STAT 0x24 #define DSI0_INT_EN 0x28 # define DSI0_INT_FIFO_ERR BIT(25) # define DSI0_INT_CMDC_DONE_MASK VC4_MASK(24, 23) # define DSI0_INT_CMDC_DONE_SHIFT 23 # define DSI0_INT_CMDC_DONE_NO_REPEAT 1 # define DSI0_INT_CMDC_DONE_REPEAT 3 # define DSI0_INT_PHY_DIR_RTF BIT(22) # define DSI0_INT_PHY_D1_ULPS BIT(21) # define DSI0_INT_PHY_D1_STOP BIT(20) # define DSI0_INT_PHY_RXLPDT BIT(19) # define DSI0_INT_PHY_RXTRIG BIT(18) # define DSI0_INT_PHY_D0_ULPS BIT(17) # define DSI0_INT_PHY_D0_LPDT BIT(16) # define DSI0_INT_PHY_D0_FTR BIT(15) # define DSI0_INT_PHY_D0_STOP BIT(14) /* Signaled when the clock lane enters the given state. */ # define DSI0_INT_PHY_CLK_ULPS BIT(13) # define DSI0_INT_PHY_CLK_HS BIT(12) # define DSI0_INT_PHY_CLK_FTR BIT(11) /* Signaled on timeouts */ # define DSI0_INT_PR_TO BIT(10) # define DSI0_INT_TA_TO BIT(9) # define DSI0_INT_LPRX_TO BIT(8) # define DSI0_INT_HSTX_TO BIT(7) /* Contention on a line when trying to drive the line low */ # define DSI0_INT_ERR_CONT_LP1 BIT(6) # define DSI0_INT_ERR_CONT_LP0 BIT(5) /* Control error: incorrect line state sequence on data lane 0. */ # define DSI0_INT_ERR_CONTROL BIT(4) # define DSI0_INT_ERR_SYNC_ESC BIT(3) # define DSI0_INT_RX2_PKT BIT(2) # define DSI0_INT_RX1_PKT BIT(1) # define DSI0_INT_CMD_PKT BIT(0) #define DSI0_INTERRUPTS_ALWAYS_ENABLED (DSI0_INT_ERR_SYNC_ESC | \ DSI0_INT_ERR_CONTROL | \ DSI0_INT_ERR_CONT_LP0 | \ DSI0_INT_ERR_CONT_LP1 | \ DSI0_INT_HSTX_TO | \ DSI0_INT_LPRX_TO | \ DSI0_INT_TA_TO | \ DSI0_INT_PR_TO) # define DSI1_INT_PHY_D3_ULPS BIT(30) # define DSI1_INT_PHY_D3_STOP BIT(29) # define DSI1_INT_PHY_D2_ULPS BIT(28) # define DSI1_INT_PHY_D2_STOP BIT(27) # define DSI1_INT_PHY_D1_ULPS BIT(26) # define DSI1_INT_PHY_D1_STOP BIT(25) # define DSI1_INT_PHY_D0_ULPS BIT(24) # define DSI1_INT_PHY_D0_STOP BIT(23) # define DSI1_INT_FIFO_ERR BIT(22) # define DSI1_INT_PHY_DIR_RTF BIT(21) # define DSI1_INT_PHY_RXLPDT BIT(20) # define DSI1_INT_PHY_RXTRIG BIT(19) # define DSI1_INT_PHY_D0_LPDT BIT(18) # define DSI1_INT_PHY_DIR_FTR BIT(17) /* Signaled when the clock lane enters the given state. */ # define DSI1_INT_PHY_CLOCK_ULPS BIT(16) # define DSI1_INT_PHY_CLOCK_HS BIT(15) # define DSI1_INT_PHY_CLOCK_STOP BIT(14) /* Signaled on timeouts */ # define DSI1_INT_PR_TO BIT(13) # define DSI1_INT_TA_TO BIT(12) # define DSI1_INT_LPRX_TO BIT(11) # define DSI1_INT_HSTX_TO BIT(10) /* Contention on a line when trying to drive the line low */ # define DSI1_INT_ERR_CONT_LP1 BIT(9) # define DSI1_INT_ERR_CONT_LP0 BIT(8) /* Control error: incorrect line state sequence on data lane 0. */ # define DSI1_INT_ERR_CONTROL BIT(7) /* LPDT synchronization error (bits received not a multiple of 8. */ # define DSI1_INT_ERR_SYNC_ESC BIT(6) /* Signaled after receiving an error packet from the display in * response to a read. */ # define DSI1_INT_RXPKT2 BIT(5) /* Signaled after receiving a packet. The header and optional short * response will be in RXPKT1H, and a long response will be in the * RXPKT_FIFO. */ # define DSI1_INT_RXPKT1 BIT(4) # define DSI1_INT_TXPKT2_DONE BIT(3) # define DSI1_INT_TXPKT2_END BIT(2) /* Signaled after all repeats of TXPKT1 are transferred. */ # define DSI1_INT_TXPKT1_DONE BIT(1) /* Signaled after each TXPKT1 repeat is scheduled. */ # define DSI1_INT_TXPKT1_END BIT(0) #define DSI1_INTERRUPTS_ALWAYS_ENABLED (DSI1_INT_ERR_SYNC_ESC | \ DSI1_INT_ERR_CONTROL | \ DSI1_INT_ERR_CONT_LP0 | \ DSI1_INT_ERR_CONT_LP1 | \ DSI1_INT_HSTX_TO | \ DSI1_INT_LPRX_TO | \ DSI1_INT_TA_TO | \ DSI1_INT_PR_TO) #define DSI0_STAT 0x2c #define DSI0_HSTX_TO_CNT 0x30 #define DSI0_LPRX_TO_CNT 0x34 #define DSI0_TA_TO_CNT 0x38 #define DSI0_PR_TO_CNT 0x3c #define DSI0_PHYC 0x40 # define DSI1_PHYC_ESC_CLK_LPDT_MASK VC4_MASK(25, 20) # define DSI1_PHYC_ESC_CLK_LPDT_SHIFT 20 # define DSI1_PHYC_HS_CLK_CONTINUOUS BIT(18) # define DSI0_PHYC_ESC_CLK_LPDT_MASK VC4_MASK(17, 12) # define DSI0_PHYC_ESC_CLK_LPDT_SHIFT 12 # define DSI1_PHYC_CLANE_ULPS BIT(17) # define DSI1_PHYC_CLANE_ENABLE BIT(16) # define DSI_PHYC_DLANE3_ULPS BIT(13) # define DSI_PHYC_DLANE3_ENABLE BIT(12) # define DSI0_PHYC_HS_CLK_CONTINUOUS BIT(10) # define DSI0_PHYC_CLANE_ULPS BIT(9) # define DSI_PHYC_DLANE2_ULPS BIT(9) # define DSI0_PHYC_CLANE_ENABLE BIT(8) # define DSI_PHYC_DLANE2_ENABLE BIT(8) # define DSI_PHYC_DLANE1_ULPS BIT(5) # define DSI_PHYC_DLANE1_ENABLE BIT(4) # define DSI_PHYC_DLANE0_FORCE_STOP BIT(2) # define DSI_PHYC_DLANE0_ULPS BIT(1) # define DSI_PHYC_DLANE0_ENABLE BIT(0) #define DSI0_HS_CLT0 0x44 #define DSI0_HS_CLT1 0x48 #define DSI0_HS_CLT2 0x4c #define DSI0_HS_DLT3 0x50 #define DSI0_HS_DLT4 0x54 #define DSI0_HS_DLT5 0x58 #define DSI0_HS_DLT6 0x5c #define DSI0_HS_DLT7 0x60 #define DSI0_PHY_AFEC0 0x64 # define DSI0_PHY_AFEC0_DDR2CLK_EN BIT(26) # define DSI0_PHY_AFEC0_DDRCLK_EN BIT(25) # define DSI0_PHY_AFEC0_LATCH_ULPS BIT(24) # define DSI1_PHY_AFEC0_IDR_DLANE3_MASK VC4_MASK(31, 29) # define DSI1_PHY_AFEC0_IDR_DLANE3_SHIFT 29 # define DSI1_PHY_AFEC0_IDR_DLANE2_MASK VC4_MASK(28, 26) # define DSI1_PHY_AFEC0_IDR_DLANE2_SHIFT 26 # define DSI1_PHY_AFEC0_IDR_DLANE1_MASK VC4_MASK(27, 23) # define DSI1_PHY_AFEC0_IDR_DLANE1_SHIFT 23 # define DSI1_PHY_AFEC0_IDR_DLANE0_MASK VC4_MASK(22, 20) # define DSI1_PHY_AFEC0_IDR_DLANE0_SHIFT 20 # define DSI1_PHY_AFEC0_IDR_CLANE_MASK VC4_MASK(19, 17) # define DSI1_PHY_AFEC0_IDR_CLANE_SHIFT 17 # define DSI0_PHY_AFEC0_ACTRL_DLANE1_MASK VC4_MASK(23, 20) # define DSI0_PHY_AFEC0_ACTRL_DLANE1_SHIFT 20 # define DSI0_PHY_AFEC0_ACTRL_DLANE0_MASK VC4_MASK(19, 16) # define DSI0_PHY_AFEC0_ACTRL_DLANE0_SHIFT 16 # define DSI0_PHY_AFEC0_ACTRL_CLANE_MASK VC4_MASK(15, 12) # define DSI0_PHY_AFEC0_ACTRL_CLANE_SHIFT 12 # define DSI1_PHY_AFEC0_DDR2CLK_EN BIT(16) # define DSI1_PHY_AFEC0_DDRCLK_EN BIT(15) # define DSI1_PHY_AFEC0_LATCH_ULPS BIT(14) # define DSI1_PHY_AFEC0_RESET BIT(13) # define DSI1_PHY_AFEC0_PD BIT(12) # define DSI0_PHY_AFEC0_RESET BIT(11) # define DSI1_PHY_AFEC0_PD_BG BIT(11) # define DSI0_PHY_AFEC0_PD BIT(10) # define DSI1_PHY_AFEC0_PD_DLANE1 BIT(10) # define DSI0_PHY_AFEC0_PD_BG BIT(9) # define DSI1_PHY_AFEC0_PD_DLANE2 BIT(9) # define DSI0_PHY_AFEC0_PD_DLANE1 BIT(8) # define DSI1_PHY_AFEC0_PD_DLANE3 BIT(8) # define DSI_PHY_AFEC0_PTATADJ_MASK VC4_MASK(7, 4) # define DSI_PHY_AFEC0_PTATADJ_SHIFT 4 # define DSI_PHY_AFEC0_CTATADJ_MASK VC4_MASK(3, 0) # define DSI_PHY_AFEC0_CTATADJ_SHIFT 0 #define DSI0_PHY_AFEC1 0x68 # define DSI0_PHY_AFEC1_IDR_DLANE1_MASK VC4_MASK(10, 8) # define DSI0_PHY_AFEC1_IDR_DLANE1_SHIFT 8 # define DSI0_PHY_AFEC1_IDR_DLANE0_MASK VC4_MASK(6, 4) # define DSI0_PHY_AFEC1_IDR_DLANE0_SHIFT 4 # define DSI0_PHY_AFEC1_IDR_CLANE_MASK VC4_MASK(2, 0) # define DSI0_PHY_AFEC1_IDR_CLANE_SHIFT 0 #define DSI0_TST_SEL 0x6c #define DSI0_TST_MON 0x70 #define DSI0_ID 0x74 # define DSI_ID_VALUE 0x00647369 #define DSI1_CTRL 0x00 # define DSI_CTRL_HS_CLKC_MASK VC4_MASK(15, 14) # define DSI_CTRL_HS_CLKC_SHIFT 14 # define DSI_CTRL_HS_CLKC_BYTE 0 # define DSI_CTRL_HS_CLKC_DDR2 1 # define DSI_CTRL_HS_CLKC_DDR 2 # define DSI_CTRL_RX_LPDT_EOT_DISABLE BIT(13) # define DSI_CTRL_LPDT_EOT_DISABLE BIT(12) # define DSI_CTRL_HSDT_EOT_DISABLE BIT(11) # define DSI_CTRL_SOFT_RESET_CFG BIT(10) # define DSI_CTRL_CAL_BYTE BIT(9) # define DSI_CTRL_INV_BYTE BIT(8) # define DSI_CTRL_CLR_LDF BIT(7) # define DSI0_CTRL_CLR_PBCF BIT(6) # define DSI1_CTRL_CLR_RXF BIT(6) # define DSI0_CTRL_CLR_CPBCF BIT(5) # define DSI1_CTRL_CLR_PDF BIT(5) # define DSI0_CTRL_CLR_PDF BIT(4) # define DSI1_CTRL_CLR_CDF BIT(4) # define DSI0_CTRL_CLR_CDF BIT(3) # define DSI0_CTRL_CTRL2 BIT(2) # define DSI1_CTRL_DISABLE_DISP_CRCC BIT(2) # define DSI0_CTRL_CTRL1 BIT(1) # define DSI1_CTRL_DISABLE_DISP_ECCC BIT(1) # define DSI0_CTRL_CTRL0 BIT(0) # define DSI1_CTRL_EN BIT(0) # define DSI0_CTRL_RESET_FIFOS (DSI_CTRL_CLR_LDF | \ DSI0_CTRL_CLR_PBCF | \ DSI0_CTRL_CLR_CPBCF | \ DSI0_CTRL_CLR_PDF | \ DSI0_CTRL_CLR_CDF) # define DSI1_CTRL_RESET_FIFOS (DSI_CTRL_CLR_LDF | \ DSI1_CTRL_CLR_RXF | \ DSI1_CTRL_CLR_PDF | \ DSI1_CTRL_CLR_CDF) #define DSI1_TXPKT2C 0x0c #define DSI1_TXPKT2H 0x10 #define DSI1_TXPKT_PIX_FIFO 0x20 #define DSI1_RXPKT_FIFO 0x24 #define DSI1_DISP0_CTRL 0x28 #define DSI1_INT_STAT 0x30 #define DSI1_INT_EN 0x34 /* State reporting bits. These mostly behave like INT_STAT, where * writing a 1 clears the bit. */ #define DSI1_STAT 0x38 # define DSI1_STAT_PHY_D3_ULPS BIT(31) # define DSI1_STAT_PHY_D3_STOP BIT(30) # define DSI1_STAT_PHY_D2_ULPS BIT(29) # define DSI1_STAT_PHY_D2_STOP BIT(28) # define DSI1_STAT_PHY_D1_ULPS BIT(27) # define DSI1_STAT_PHY_D1_STOP BIT(26) # define DSI1_STAT_PHY_D0_ULPS BIT(25) # define DSI1_STAT_PHY_D0_STOP BIT(24) # define DSI1_STAT_FIFO_ERR BIT(23) # define DSI1_STAT_PHY_RXLPDT BIT(22) # define DSI1_STAT_PHY_RXTRIG BIT(21) # define DSI1_STAT_PHY_D0_LPDT BIT(20) /* Set when in forward direction */ # define DSI1_STAT_PHY_DIR BIT(19) # define DSI1_STAT_PHY_CLOCK_ULPS BIT(18) # define DSI1_STAT_PHY_CLOCK_HS BIT(17) # define DSI1_STAT_PHY_CLOCK_STOP BIT(16) # define DSI1_STAT_PR_TO BIT(15) # define DSI1_STAT_TA_TO BIT(14) # define DSI1_STAT_LPRX_TO BIT(13) # define DSI1_STAT_HSTX_TO BIT(12) # define DSI1_STAT_ERR_CONT_LP1 BIT(11) # define DSI1_STAT_ERR_CONT_LP0 BIT(10) # define DSI1_STAT_ERR_CONTROL BIT(9) # define DSI1_STAT_ERR_SYNC_ESC BIT(8) # define DSI1_STAT_RXPKT2 BIT(7) # define DSI1_STAT_RXPKT1 BIT(6) # define DSI1_STAT_TXPKT2_BUSY BIT(5) # define DSI1_STAT_TXPKT2_DONE BIT(4) # define DSI1_STAT_TXPKT2_END BIT(3) # define DSI1_STAT_TXPKT1_BUSY BIT(2) # define DSI1_STAT_TXPKT1_DONE BIT(1) # define DSI1_STAT_TXPKT1_END BIT(0) #define DSI1_HSTX_TO_CNT 0x3c #define DSI1_LPRX_TO_CNT 0x40 #define DSI1_TA_TO_CNT 0x44 #define DSI1_PR_TO_CNT 0x48 #define DSI1_PHYC 0x4c #define DSI1_HS_CLT0 0x50 # define DSI_HS_CLT0_CZERO_MASK VC4_MASK(26, 18) # define DSI_HS_CLT0_CZERO_SHIFT 18 # define DSI_HS_CLT0_CPRE_MASK VC4_MASK(17, 9) # define DSI_HS_CLT0_CPRE_SHIFT 9 # define DSI_HS_CLT0_CPREP_MASK VC4_MASK(8, 0) # define DSI_HS_CLT0_CPREP_SHIFT 0 #define DSI1_HS_CLT1 0x54 # define DSI_HS_CLT1_CTRAIL_MASK VC4_MASK(17, 9) # define DSI_HS_CLT1_CTRAIL_SHIFT 9 # define DSI_HS_CLT1_CPOST_MASK VC4_MASK(8, 0) # define DSI_HS_CLT1_CPOST_SHIFT 0 #define DSI1_HS_CLT2 0x58 # define DSI_HS_CLT2_WUP_MASK VC4_MASK(23, 0) # define DSI_HS_CLT2_WUP_SHIFT 0 #define DSI1_HS_DLT3 0x5c # define DSI_HS_DLT3_EXIT_MASK VC4_MASK(26, 18) # define DSI_HS_DLT3_EXIT_SHIFT 18 # define DSI_HS_DLT3_ZERO_MASK VC4_MASK(17, 9) # define DSI_HS_DLT3_ZERO_SHIFT 9 # define DSI_HS_DLT3_PRE_MASK VC4_MASK(8, 0) # define DSI_HS_DLT3_PRE_SHIFT 0 #define DSI1_HS_DLT4 0x60 # define DSI_HS_DLT4_ANLAT_MASK VC4_MASK(22, 18) # define DSI_HS_DLT4_ANLAT_SHIFT 18 # define DSI_HS_DLT4_TRAIL_MASK VC4_MASK(17, 9) # define DSI_HS_DLT4_TRAIL_SHIFT 9 # define DSI_HS_DLT4_LPX_MASK VC4_MASK(8, 0) # define DSI_HS_DLT4_LPX_SHIFT 0 #define DSI1_HS_DLT5 0x64 # define DSI_HS_DLT5_INIT_MASK VC4_MASK(23, 0) # define DSI_HS_DLT5_INIT_SHIFT 0 #define DSI1_HS_DLT6 0x68 # define DSI_HS_DLT6_TA_GET_MASK VC4_MASK(31, 24) # define DSI_HS_DLT6_TA_GET_SHIFT 24 # define DSI_HS_DLT6_TA_SURE_MASK VC4_MASK(23, 16) # define DSI_HS_DLT6_TA_SURE_SHIFT 16 # define DSI_HS_DLT6_TA_GO_MASK VC4_MASK(15, 8) # define DSI_HS_DLT6_TA_GO_SHIFT 8 # define DSI_HS_DLT6_LP_LPX_MASK VC4_MASK(7, 0) # define DSI_HS_DLT6_LP_LPX_SHIFT 0 #define DSI1_HS_DLT7 0x6c # define DSI_HS_DLT7_LP_WUP_MASK VC4_MASK(23, 0) # define DSI_HS_DLT7_LP_WUP_SHIFT 0 #define DSI1_PHY_AFEC0 0x70 #define DSI1_PHY_AFEC1 0x74 # define DSI1_PHY_AFEC1_ACTRL_DLANE3_MASK VC4_MASK(19, 16) # define DSI1_PHY_AFEC1_ACTRL_DLANE3_SHIFT 16 # define DSI1_PHY_AFEC1_ACTRL_DLANE2_MASK VC4_MASK(15, 12) # define DSI1_PHY_AFEC1_ACTRL_DLANE2_SHIFT 12 # define DSI1_PHY_AFEC1_ACTRL_DLANE1_MASK VC4_MASK(11, 8) # define DSI1_PHY_AFEC1_ACTRL_DLANE1_SHIFT 8 # define DSI1_PHY_AFEC1_ACTRL_DLANE0_MASK VC4_MASK(7, 4) # define DSI1_PHY_AFEC1_ACTRL_DLANE0_SHIFT 4 # define DSI1_PHY_AFEC1_ACTRL_CLANE_MASK VC4_MASK(3, 0) # define DSI1_PHY_AFEC1_ACTRL_CLANE_SHIFT 0 #define DSI1_TST_SEL 0x78 #define DSI1_TST_MON 0x7c #define DSI1_PHY_TST1 0x80 #define DSI1_PHY_TST2 0x84 #define DSI1_PHY_FIFO_STAT 0x88 /* Actually, all registers in the range that aren't otherwise claimed * will return the ID. */ #define DSI1_ID 0x8c struct vc4_dsi_variant { /* Whether we're on bcm2835's DSI0 or DSI1. */ unsigned int port; bool broken_axi_workaround; const char *debugfs_name; const struct debugfs_reg32 *regs; size_t nregs; }; /* General DSI hardware state. */ struct vc4_dsi { struct vc4_encoder encoder; struct mipi_dsi_host dsi_host; struct kref kref; struct platform_device *pdev; struct drm_bridge *out_bridge; struct drm_bridge bridge; void __iomem *regs; struct dma_chan *reg_dma_chan; dma_addr_t reg_dma_paddr; u32 *reg_dma_mem; dma_addr_t reg_paddr; const struct vc4_dsi_variant *variant; /* DSI channel for the panel we're connected to. */ u32 channel; u32 lanes; u32 format; u32 divider; u32 mode_flags; /* Input clock from CPRMAN to the digital PHY, for the DSI * escape clock. */ struct clk *escape_clock; /* Input clock to the analog PHY, used to generate the DSI bit * clock. */ struct clk *pll_phy_clock; /* HS Clocks generated within the DSI analog PHY. */ struct clk_fixed_factor phy_clocks[3]; struct clk_hw_onecell_data *clk_onecell; /* Pixel clock output to the pixelvalve, generated from the HS * clock. */ struct clk *pixel_clock; struct completion xfer_completion; int xfer_result; struct debugfs_regset32 regset; }; #define host_to_dsi(host) \ container_of_const(host, struct vc4_dsi, dsi_host) #define to_vc4_dsi(_encoder) \ container_of_const(_encoder, struct vc4_dsi, encoder.base) #define bridge_to_vc4_dsi(_bridge) \ container_of_const(_bridge, struct vc4_dsi, bridge) static inline void dsi_dma_workaround_write(struct vc4_dsi *dsi, u32 offset, u32 val) { struct dma_chan *chan = dsi->reg_dma_chan; struct dma_async_tx_descriptor *tx; dma_cookie_t cookie; int ret; kunit_fail_current_test("Accessing a register in a unit test!\n"); /* DSI0 should be able to write normally. */ if (!chan) { writel(val, dsi->regs + offset); return; } *dsi->reg_dma_mem = val; tx = chan->device->device_prep_dma_memcpy(chan, dsi->reg_paddr + offset, dsi->reg_dma_paddr, 4, 0); if (!tx) { DRM_ERROR("Failed to set up DMA register write\n"); return; } cookie = tx->tx_submit(tx); ret = dma_submit_error(cookie); if (ret) { DRM_ERROR("Failed to submit DMA: %d\n", ret); return; } ret = dma_sync_wait(chan, cookie); if (ret) DRM_ERROR("Failed to wait for DMA: %d\n", ret); } #define DSI_READ(offset) \ ({ \ kunit_fail_current_test("Accessing a register in a unit test!\n"); \ readl(dsi->regs + (offset)); \ }) #define DSI_WRITE(offset, val) dsi_dma_workaround_write(dsi, offset, val) #define DSI_PORT_READ(offset) \ DSI_READ(dsi->variant->port ? DSI1_##offset : DSI0_##offset) #define DSI_PORT_WRITE(offset, val) \ DSI_WRITE(dsi->variant->port ? DSI1_##offset : DSI0_##offset, val) #define DSI_PORT_BIT(bit) (dsi->variant->port ? DSI1_##bit : DSI0_##bit) static const struct debugfs_reg32 dsi0_regs[] = { VC4_REG32(DSI0_CTRL), VC4_REG32(DSI0_STAT), VC4_REG32(DSI0_HSTX_TO_CNT), VC4_REG32(DSI0_LPRX_TO_CNT), VC4_REG32(DSI0_TA_TO_CNT), VC4_REG32(DSI0_PR_TO_CNT), VC4_REG32(DSI0_DISP0_CTRL), VC4_REG32(DSI0_DISP1_CTRL), VC4_REG32(DSI0_INT_STAT), VC4_REG32(DSI0_INT_EN), VC4_REG32(DSI0_PHYC), VC4_REG32(DSI0_HS_CLT0), VC4_REG32(DSI0_HS_CLT1), VC4_REG32(DSI0_HS_CLT2), VC4_REG32(DSI0_HS_DLT3), VC4_REG32(DSI0_HS_DLT4), VC4_REG32(DSI0_HS_DLT5), VC4_REG32(DSI0_HS_DLT6), VC4_REG32(DSI0_HS_DLT7), VC4_REG32(DSI0_PHY_AFEC0), VC4_REG32(DSI0_PHY_AFEC1), VC4_REG32(DSI0_ID), }; static const struct debugfs_reg32 dsi1_regs[] = { VC4_REG32(DSI1_CTRL), VC4_REG32(DSI1_STAT), VC4_REG32(DSI1_HSTX_TO_CNT), VC4_REG32(DSI1_LPRX_TO_CNT), VC4_REG32(DSI1_TA_TO_CNT), VC4_REG32(DSI1_PR_TO_CNT), VC4_REG32(DSI1_DISP0_CTRL), VC4_REG32(DSI1_DISP1_CTRL), VC4_REG32(DSI1_INT_STAT), VC4_REG32(DSI1_INT_EN), VC4_REG32(DSI1_PHYC), VC4_REG32(DSI1_HS_CLT0), VC4_REG32(DSI1_HS_CLT1), VC4_REG32(DSI1_HS_CLT2), VC4_REG32(DSI1_HS_DLT3), VC4_REG32(DSI1_HS_DLT4), VC4_REG32(DSI1_HS_DLT5), VC4_REG32(DSI1_HS_DLT6), VC4_REG32(DSI1_HS_DLT7), VC4_REG32(DSI1_PHY_AFEC0), VC4_REG32(DSI1_PHY_AFEC1), VC4_REG32(DSI1_ID), }; static void vc4_dsi_latch_ulps(struct vc4_dsi *dsi, bool latch) { u32 afec0 = DSI_PORT_READ(PHY_AFEC0); if (latch) afec0 |= DSI_PORT_BIT(PHY_AFEC0_LATCH_ULPS); else afec0 &= ~DSI_PORT_BIT(PHY_AFEC0_LATCH_ULPS); DSI_PORT_WRITE(PHY_AFEC0, afec0); } /* Enters or exits Ultra Low Power State. */ static void vc4_dsi_ulps(struct vc4_dsi *dsi, bool ulps) { bool non_continuous = dsi->mode_flags & MIPI_DSI_CLOCK_NON_CONTINUOUS; u32 phyc_ulps = ((non_continuous ? DSI_PORT_BIT(PHYC_CLANE_ULPS) : 0) | DSI_PHYC_DLANE0_ULPS | (dsi->lanes > 1 ? DSI_PHYC_DLANE1_ULPS : 0) | (dsi->lanes > 2 ? DSI_PHYC_DLANE2_ULPS : 0) | (dsi->lanes > 3 ? DSI_PHYC_DLANE3_ULPS : 0)); u32 stat_ulps = ((non_continuous ? DSI1_STAT_PHY_CLOCK_ULPS : 0) | DSI1_STAT_PHY_D0_ULPS | (dsi->lanes > 1 ? DSI1_STAT_PHY_D1_ULPS : 0) | (dsi->lanes > 2 ? DSI1_STAT_PHY_D2_ULPS : 0) | (dsi->lanes > 3 ? DSI1_STAT_PHY_D3_ULPS : 0)); u32 stat_stop = ((non_continuous ? DSI1_STAT_PHY_CLOCK_STOP : 0) | DSI1_STAT_PHY_D0_STOP | (dsi->lanes > 1 ? DSI1_STAT_PHY_D1_STOP : 0) | (dsi->lanes > 2 ? DSI1_STAT_PHY_D2_STOP : 0) | (dsi->lanes > 3 ? DSI1_STAT_PHY_D3_STOP : 0)); int ret; bool ulps_currently_enabled = (DSI_PORT_READ(PHY_AFEC0) & DSI_PORT_BIT(PHY_AFEC0_LATCH_ULPS)); if (ulps == ulps_currently_enabled) return; DSI_PORT_WRITE(STAT, stat_ulps); DSI_PORT_WRITE(PHYC, DSI_PORT_READ(PHYC) | phyc_ulps); ret = wait_for((DSI_PORT_READ(STAT) & stat_ulps) == stat_ulps, 200); if (ret) { dev_warn(&dsi->pdev->dev, "Timeout waiting for DSI ULPS entry: STAT 0x%08x", DSI_PORT_READ(STAT)); DSI_PORT_WRITE(PHYC, DSI_PORT_READ(PHYC) & ~phyc_ulps); vc4_dsi_latch_ulps(dsi, false); return; } /* The DSI module can't be disabled while the module is * generating ULPS state. So, to be able to disable the * module, we have the AFE latch the ULPS state and continue * on to having the module enter STOP. */ vc4_dsi_latch_ulps(dsi, ulps); DSI_PORT_WRITE(STAT, stat_stop); DSI_PORT_WRITE(PHYC, DSI_PORT_READ(PHYC) & ~phyc_ulps); ret = wait_for((DSI_PORT_READ(STAT) & stat_stop) == stat_stop, 200); if (ret) { dev_warn(&dsi->pdev->dev, "Timeout waiting for DSI STOP entry: STAT 0x%08x", DSI_PORT_READ(STAT)); DSI_PORT_WRITE(PHYC, DSI_PORT_READ(PHYC) & ~phyc_ulps); return; } } static u32 dsi_hs_timing(u32 ui_ns, u32 ns, u32 ui) { /* The HS timings have to be rounded up to a multiple of 8 * because we're using the byte clock. */ return roundup(ui + DIV_ROUND_UP(ns, ui_ns), 8); } /* ESC always runs at 100Mhz. */ #define ESC_TIME_NS 10 static u32 dsi_esc_timing(u32 ns) { return DIV_ROUND_UP(ns, ESC_TIME_NS); } static void vc4_dsi_bridge_disable(struct drm_bridge *bridge, struct drm_bridge_state *state) { struct vc4_dsi *dsi = bridge_to_vc4_dsi(bridge); u32 disp0_ctrl; disp0_ctrl = DSI_PORT_READ(DISP0_CTRL); disp0_ctrl &= ~DSI_DISP0_ENABLE; DSI_PORT_WRITE(DISP0_CTRL, disp0_ctrl); } static void vc4_dsi_bridge_post_disable(struct drm_bridge *bridge, struct drm_bridge_state *state) { struct vc4_dsi *dsi = bridge_to_vc4_dsi(bridge); struct device *dev = &dsi->pdev->dev; clk_disable_unprepare(dsi->pll_phy_clock); clk_disable_unprepare(dsi->escape_clock); clk_disable_unprepare(dsi->pixel_clock); pm_runtime_put(dev); } /* Extends the mode's blank intervals to handle BCM2835's integer-only * DSI PLL divider. * * On 2835, PLLD is set to 2Ghz, and may not be changed by the display * driver since most peripherals are hanging off of the PLLD_PER * divider. PLLD_DSI1, which drives our DSI bit clock (and therefore * the pixel clock), only has an integer divider off of DSI. * * To get our panel mode to refresh at the expected 60Hz, we need to * extend the horizontal blank time. This means we drive a * higher-than-expected clock rate to the panel, but that's what the * firmware does too. */ static bool vc4_dsi_bridge_mode_fixup(struct drm_bridge *bridge, const struct drm_display_mode *mode, struct drm_display_mode *adjusted_mode) { struct vc4_dsi *dsi = bridge_to_vc4_dsi(bridge); struct clk *phy_parent = clk_get_parent(dsi->pll_phy_clock); unsigned long parent_rate = clk_get_rate(phy_parent); unsigned long pixel_clock_hz = mode->clock * 1000; unsigned long pll_clock = pixel_clock_hz * dsi->divider; int divider; /* Find what divider gets us a faster clock than the requested * pixel clock. */ for (divider = 1; divider < 255; divider++) { if (parent_rate / (divider + 1) < pll_clock) break; } /* Now that we've picked a PLL divider, calculate back to its * pixel clock. */ pll_clock = parent_rate / divider; pixel_clock_hz = pll_clock / dsi->divider; adjusted_mode->clock = pixel_clock_hz / 1000; /* Given the new pixel clock, adjust HFP to keep vrefresh the same. */ adjusted_mode->htotal = adjusted_mode->clock * mode->htotal / mode->clock; adjusted_mode->hsync_end += adjusted_mode->htotal - mode->htotal; adjusted_mode->hsync_start += adjusted_mode->htotal - mode->htotal; return true; } static void vc4_dsi_bridge_pre_enable(struct drm_bridge *bridge, struct drm_bridge_state *old_state) { struct drm_atomic_state *state = old_state->base.state; struct vc4_dsi *dsi = bridge_to_vc4_dsi(bridge); const struct drm_crtc_state *crtc_state; struct device *dev = &dsi->pdev->dev; const struct drm_display_mode *mode; struct drm_connector *connector; bool debug_dump_regs = false; unsigned long hs_clock; struct drm_crtc *crtc; u32 ui_ns; /* Minimum LP state duration in escape clock cycles. */ u32 lpx = dsi_esc_timing(60); unsigned long pixel_clock_hz; unsigned long dsip_clock; unsigned long phy_clock; int ret; ret = pm_runtime_resume_and_get(dev); if (ret) { DRM_ERROR("Failed to runtime PM enable on DSI%d\n", dsi->variant->port); return; } if (debug_dump_regs) { struct drm_printer p = drm_info_printer(&dsi->pdev->dev); dev_info(&dsi->pdev->dev, "DSI regs before:\n"); drm_print_regset32(&p, &dsi->regset); } /* * Retrieve the CRTC adjusted mode. This requires a little dance to go * from the bridge to the encoder, to the connector and to the CRTC. */ connector = drm_atomic_get_new_connector_for_encoder(state, bridge->encoder); crtc = drm_atomic_get_new_connector_state(state, connector)->crtc; crtc_state = drm_atomic_get_new_crtc_state(state, crtc); mode = &crtc_state->adjusted_mode; pixel_clock_hz = mode->clock * 1000; /* Round up the clk_set_rate() request slightly, since * PLLD_DSI1 is an integer divider and its rate selection will * never round up. */ phy_clock = (pixel_clock_hz + 1000) * dsi->divider; ret = clk_set_rate(dsi->pll_phy_clock, phy_clock); if (ret) { dev_err(&dsi->pdev->dev, "Failed to set phy clock to %ld: %d\n", phy_clock, ret); } /* Reset the DSI and all its fifos. */ DSI_PORT_WRITE(CTRL, DSI_CTRL_SOFT_RESET_CFG | DSI_PORT_BIT(CTRL_RESET_FIFOS)); DSI_PORT_WRITE(CTRL, DSI_CTRL_HSDT_EOT_DISABLE | DSI_CTRL_RX_LPDT_EOT_DISABLE); /* Clear all stat bits so we see what has happened during enable. */ DSI_PORT_WRITE(STAT, DSI_PORT_READ(STAT)); /* Set AFE CTR00/CTR1 to release powerdown of analog. */ if (dsi->variant->port == 0) { u32 afec0 = (VC4_SET_FIELD(7, DSI_PHY_AFEC0_PTATADJ) | VC4_SET_FIELD(7, DSI_PHY_AFEC0_CTATADJ)); if (dsi->lanes < 2) afec0 |= DSI0_PHY_AFEC0_PD_DLANE1; if (!(dsi->mode_flags & MIPI_DSI_MODE_VIDEO)) afec0 |= DSI0_PHY_AFEC0_RESET; DSI_PORT_WRITE(PHY_AFEC0, afec0); /* AFEC reset hold time */ mdelay(1); DSI_PORT_WRITE(PHY_AFEC1, VC4_SET_FIELD(6, DSI0_PHY_AFEC1_IDR_DLANE1) | VC4_SET_FIELD(6, DSI0_PHY_AFEC1_IDR_DLANE0) | VC4_SET_FIELD(6, DSI0_PHY_AFEC1_IDR_CLANE)); } else { u32 afec0 = (VC4_SET_FIELD(7, DSI_PHY_AFEC0_PTATADJ) | VC4_SET_FIELD(7, DSI_PHY_AFEC0_CTATADJ) | VC4_SET_FIELD(6, DSI1_PHY_AFEC0_IDR_CLANE) | VC4_SET_FIELD(6, DSI1_PHY_AFEC0_IDR_DLANE0) | VC4_SET_FIELD(6, DSI1_PHY_AFEC0_IDR_DLANE1) | VC4_SET_FIELD(6, DSI1_PHY_AFEC0_IDR_DLANE2) | VC4_SET_FIELD(6, DSI1_PHY_AFEC0_IDR_DLANE3)); if (dsi->lanes < 4) afec0 |= DSI1_PHY_AFEC0_PD_DLANE3; if (dsi->lanes < 3) afec0 |= DSI1_PHY_AFEC0_PD_DLANE2; if (dsi->lanes < 2) afec0 |= DSI1_PHY_AFEC0_PD_DLANE1; afec0 |= DSI1_PHY_AFEC0_RESET; DSI_PORT_WRITE(PHY_AFEC0, afec0); DSI_PORT_WRITE(PHY_AFEC1, 0); /* AFEC reset hold time */ mdelay(1); } ret = clk_prepare_enable(dsi->escape_clock); if (ret) { DRM_ERROR("Failed to turn on DSI escape clock: %d\n", ret); return; } ret = clk_prepare_enable(dsi->pll_phy_clock); if (ret) { DRM_ERROR("Failed to turn on DSI PLL: %d\n", ret); return; } hs_clock = clk_get_rate(dsi->pll_phy_clock); /* Yes, we set the DSI0P/DSI1P pixel clock to the byte rate, * not the pixel clock rate. DSIxP take from the APHY's byte, * DDR2, or DDR4 clock (we use byte) and feed into the PV at * that rate. Separately, a value derived from PIX_CLK_DIV * and HS_CLKC is fed into the PV to divide down to the actual * pixel clock for pushing pixels into DSI. */ dsip_clock = phy_clock / 8; ret = clk_set_rate(dsi->pixel_clock, dsip_clock); if (ret) { dev_err(dev, "Failed to set pixel clock to %ldHz: %d\n", dsip_clock, ret); } ret = clk_prepare_enable(dsi->pixel_clock); if (ret) { DRM_ERROR("Failed to turn on DSI pixel clock: %d\n", ret); return; } /* How many ns one DSI unit interval is. Note that the clock * is DDR, so there's an extra divide by 2. */ ui_ns = DIV_ROUND_UP(500000000, hs_clock); DSI_PORT_WRITE(HS_CLT0, VC4_SET_FIELD(dsi_hs_timing(ui_ns, 262, 0), DSI_HS_CLT0_CZERO) | VC4_SET_FIELD(dsi_hs_timing(ui_ns, 0, 8), DSI_HS_CLT0_CPRE) | VC4_SET_FIELD(dsi_hs_timing(ui_ns, 38, 0), DSI_HS_CLT0_CPREP)); DSI_PORT_WRITE(HS_CLT1, VC4_SET_FIELD(dsi_hs_timing(ui_ns, 60, 0), DSI_HS_CLT1_CTRAIL) | VC4_SET_FIELD(dsi_hs_timing(ui_ns, 60, 52), DSI_HS_CLT1_CPOST)); DSI_PORT_WRITE(HS_CLT2, VC4_SET_FIELD(dsi_hs_timing(ui_ns, 1000000, 0), DSI_HS_CLT2_WUP)); DSI_PORT_WRITE(HS_DLT3, VC4_SET_FIELD(dsi_hs_timing(ui_ns, 100, 0), DSI_HS_DLT3_EXIT) | VC4_SET_FIELD(dsi_hs_timing(ui_ns, 105, 6), DSI_HS_DLT3_ZERO) | VC4_SET_FIELD(dsi_hs_timing(ui_ns, 40, 4), DSI_HS_DLT3_PRE)); DSI_PORT_WRITE(HS_DLT4, VC4_SET_FIELD(dsi_hs_timing(ui_ns, lpx * ESC_TIME_NS, 0), DSI_HS_DLT4_LPX) | VC4_SET_FIELD(max(dsi_hs_timing(ui_ns, 0, 8), dsi_hs_timing(ui_ns, 60, 4)), DSI_HS_DLT4_TRAIL) | VC4_SET_FIELD(0, DSI_HS_DLT4_ANLAT)); /* T_INIT is how long STOP is driven after power-up to * indicate to the slave (also coming out of power-up) that * master init is complete, and should be greater than the * maximum of two value: T_INIT,MASTER and T_INIT,SLAVE. The * D-PHY spec gives a minimum 100us for T_INIT,MASTER and * T_INIT,SLAVE, while allowing protocols on top of it to give * greater minimums. The vc4 firmware uses an extremely * conservative 5ms, and we maintain that here. */ DSI_PORT_WRITE(HS_DLT5, VC4_SET_FIELD(dsi_hs_timing(ui_ns, 5 * 1000 * 1000, 0), DSI_HS_DLT5_INIT)); DSI_PORT_WRITE(HS_DLT6, VC4_SET_FIELD(lpx * 5, DSI_HS_DLT6_TA_GET) | VC4_SET_FIELD(lpx, DSI_HS_DLT6_TA_SURE) | VC4_SET_FIELD(lpx * 4, DSI_HS_DLT6_TA_GO) | VC4_SET_FIELD(lpx, DSI_HS_DLT6_LP_LPX)); DSI_PORT_WRITE(HS_DLT7, VC4_SET_FIELD(dsi_esc_timing(1000000), DSI_HS_DLT7_LP_WUP)); DSI_PORT_WRITE(PHYC, DSI_PHYC_DLANE0_ENABLE | (dsi->lanes >= 2 ? DSI_PHYC_DLANE1_ENABLE : 0) | (dsi->lanes >= 3 ? DSI_PHYC_DLANE2_ENABLE : 0) | (dsi->lanes >= 4 ? DSI_PHYC_DLANE3_ENABLE : 0) | DSI_PORT_BIT(PHYC_CLANE_ENABLE) | ((dsi->mode_flags & MIPI_DSI_CLOCK_NON_CONTINUOUS) ? 0 : DSI_PORT_BIT(PHYC_HS_CLK_CONTINUOUS)) | (dsi->variant->port == 0 ? VC4_SET_FIELD(lpx - 1, DSI0_PHYC_ESC_CLK_LPDT) : VC4_SET_FIELD(lpx - 1, DSI1_PHYC_ESC_CLK_LPDT))); DSI_PORT_WRITE(CTRL, DSI_PORT_READ(CTRL) | DSI_CTRL_CAL_BYTE); /* HS timeout in HS clock cycles: disabled. */ DSI_PORT_WRITE(HSTX_TO_CNT, 0); /* LP receive timeout in HS clocks. */ DSI_PORT_WRITE(LPRX_TO_CNT, 0xffffff); /* Bus turnaround timeout */ DSI_PORT_WRITE(TA_TO_CNT, 100000); /* Display reset sequence timeout */ DSI_PORT_WRITE(PR_TO_CNT, 100000); /* Set up DISP1 for transferring long command payloads through * the pixfifo. */ DSI_PORT_WRITE(DISP1_CTRL, VC4_SET_FIELD(DSI_DISP1_PFORMAT_32BIT_LE, DSI_DISP1_PFORMAT) | DSI_DISP1_ENABLE); /* Ungate the block. */ if (dsi->variant->port == 0) DSI_PORT_WRITE(CTRL, DSI_PORT_READ(CTRL) | DSI0_CTRL_CTRL0); else DSI_PORT_WRITE(CTRL, DSI_PORT_READ(CTRL) | DSI1_CTRL_EN); /* Bring AFE out of reset. */ DSI_PORT_WRITE(PHY_AFEC0, DSI_PORT_READ(PHY_AFEC0) & ~DSI_PORT_BIT(PHY_AFEC0_RESET)); vc4_dsi_ulps(dsi, false); if (dsi->mode_flags & MIPI_DSI_MODE_VIDEO) { DSI_PORT_WRITE(DISP0_CTRL, VC4_SET_FIELD(dsi->divider, DSI_DISP0_PIX_CLK_DIV) | VC4_SET_FIELD(dsi->format, DSI_DISP0_PFORMAT) | VC4_SET_FIELD(DSI_DISP0_LP_STOP_PERFRAME, DSI_DISP0_LP_STOP_CTRL) | DSI_DISP0_ST_END); } else { DSI_PORT_WRITE(DISP0_CTRL, DSI_DISP0_COMMAND_MODE); } } static void vc4_dsi_bridge_enable(struct drm_bridge *bridge, struct drm_bridge_state *old_state) { struct vc4_dsi *dsi = bridge_to_vc4_dsi(bridge); bool debug_dump_regs = false; u32 disp0_ctrl; disp0_ctrl = DSI_PORT_READ(DISP0_CTRL); disp0_ctrl |= DSI_DISP0_ENABLE; DSI_PORT_WRITE(DISP0_CTRL, disp0_ctrl); if (debug_dump_regs) { struct drm_printer p = drm_info_printer(&dsi->pdev->dev); dev_info(&dsi->pdev->dev, "DSI regs after:\n"); drm_print_regset32(&p, &dsi->regset); } } static int vc4_dsi_bridge_attach(struct drm_bridge *bridge, enum drm_bridge_attach_flags flags) { struct vc4_dsi *dsi = bridge_to_vc4_dsi(bridge); /* Attach the panel or bridge to the dsi bridge */ return drm_bridge_attach(bridge->encoder, dsi->out_bridge, &dsi->bridge, flags); } static ssize_t vc4_dsi_host_transfer(struct mipi_dsi_host *host, const struct mipi_dsi_msg *msg) { struct vc4_dsi *dsi = host_to_dsi(host); struct mipi_dsi_packet packet; u32 pkth = 0, pktc = 0; int i, ret; bool is_long = mipi_dsi_packet_format_is_long(msg->type); u32 cmd_fifo_len = 0, pix_fifo_len = 0; mipi_dsi_create_packet(&packet, msg); pkth |= VC4_SET_FIELD(packet.header[0], DSI_TXPKT1H_BC_DT); pkth |= VC4_SET_FIELD(packet.header[1] | (packet.header[2] << 8), DSI_TXPKT1H_BC_PARAM); if (is_long) { /* Divide data across the various FIFOs we have available. * The command FIFO takes byte-oriented data, but is of * limited size. The pixel FIFO (never actually used for * pixel data in reality) is word oriented, and substantially * larger. So, we use the pixel FIFO for most of the data, * sending the residual bytes in the command FIFO at the start. * * With this arrangement, the command FIFO will never get full. */ if (packet.payload_length <= 16) { cmd_fifo_len = packet.payload_length; pix_fifo_len = 0; } else { cmd_fifo_len = (packet.payload_length % DSI_PIX_FIFO_WIDTH); pix_fifo_len = ((packet.payload_length - cmd_fifo_len) / DSI_PIX_FIFO_WIDTH); } WARN_ON_ONCE(pix_fifo_len >= DSI_PIX_FIFO_DEPTH); pkth |= VC4_SET_FIELD(cmd_fifo_len, DSI_TXPKT1H_BC_CMDFIFO); } if (msg->rx_len) { pktc |= VC4_SET_FIELD(DSI_TXPKT1C_CMD_CTRL_RX, DSI_TXPKT1C_CMD_CTRL); } else { pktc |= VC4_SET_FIELD(DSI_TXPKT1C_CMD_CTRL_TX, DSI_TXPKT1C_CMD_CTRL); } for (i = 0; i < cmd_fifo_len; i++) DSI_PORT_WRITE(TXPKT_CMD_FIFO, packet.payload[i]); for (i = 0; i < pix_fifo_len; i++) { const u8 *pix = packet.payload + cmd_fifo_len + i * 4; DSI_PORT_WRITE(TXPKT_PIX_FIFO, pix[0] | pix[1] << 8 | pix[2] << 16 | pix[3] << 24); } if (msg->flags & MIPI_DSI_MSG_USE_LPM) pktc |= DSI_TXPKT1C_CMD_MODE_LP; if (is_long) pktc |= DSI_TXPKT1C_CMD_TYPE_LONG; /* Send one copy of the packet. Larger repeats are used for pixel * data in command mode. */ pktc |= VC4_SET_FIELD(1, DSI_TXPKT1C_CMD_REPEAT); pktc |= DSI_TXPKT1C_CMD_EN; if (pix_fifo_len) { pktc |= VC4_SET_FIELD(DSI_TXPKT1C_DISPLAY_NO_SECONDARY, DSI_TXPKT1C_DISPLAY_NO); } else { pktc |= VC4_SET_FIELD(DSI_TXPKT1C_DISPLAY_NO_SHORT, DSI_TXPKT1C_DISPLAY_NO); } /* Enable the appropriate interrupt for the transfer completion. */ dsi->xfer_result = 0; reinit_completion(&dsi->xfer_completion); if (dsi->variant->port == 0) { DSI_PORT_WRITE(INT_STAT, DSI0_INT_CMDC_DONE_MASK | DSI1_INT_PHY_DIR_RTF); if (msg->rx_len) { DSI_PORT_WRITE(INT_EN, (DSI0_INTERRUPTS_ALWAYS_ENABLED | DSI0_INT_PHY_DIR_RTF)); } else { DSI_PORT_WRITE(INT_EN, (DSI0_INTERRUPTS_ALWAYS_ENABLED | VC4_SET_FIELD(DSI0_INT_CMDC_DONE_NO_REPEAT, DSI0_INT_CMDC_DONE))); } } else { DSI_PORT_WRITE(INT_STAT, DSI1_INT_TXPKT1_DONE | DSI1_INT_PHY_DIR_RTF); if (msg->rx_len) { DSI_PORT_WRITE(INT_EN, (DSI1_INTERRUPTS_ALWAYS_ENABLED | DSI1_INT_PHY_DIR_RTF)); } else { DSI_PORT_WRITE(INT_EN, (DSI1_INTERRUPTS_ALWAYS_ENABLED | DSI1_INT_TXPKT1_DONE)); } } /* Send the packet. */ DSI_PORT_WRITE(TXPKT1H, pkth); DSI_PORT_WRITE(TXPKT1C, pktc); if (!wait_for_completion_timeout(&dsi->xfer_completion, msecs_to_jiffies(1000))) { dev_err(&dsi->pdev->dev, "transfer interrupt wait timeout"); dev_err(&dsi->pdev->dev, "instat: 0x%08x\n", DSI_PORT_READ(INT_STAT)); ret = -ETIMEDOUT; } else { ret = dsi->xfer_result; } DSI_PORT_WRITE(INT_EN, DSI_PORT_BIT(INTERRUPTS_ALWAYS_ENABLED)); if (ret) goto reset_fifo_and_return; if (ret == 0 && msg->rx_len) { u32 rxpkt1h = DSI_PORT_READ(RXPKT1H); u8 *msg_rx = msg->rx_buf; if (rxpkt1h & DSI_RXPKT1H_PKT_TYPE_LONG) { u32 rxlen = VC4_GET_FIELD(rxpkt1h, DSI_RXPKT1H_BC_PARAM); if (rxlen != msg->rx_len) { DRM_ERROR("DSI returned %db, expecting %db\n", rxlen, (int)msg->rx_len); ret = -ENXIO; goto reset_fifo_and_return; } for (i = 0; i < msg->rx_len; i++) msg_rx[i] = DSI_READ(DSI1_RXPKT_FIFO); } else { /* FINISHME: Handle AWER */ msg_rx[0] = VC4_GET_FIELD(rxpkt1h, DSI_RXPKT1H_SHORT_0); if (msg->rx_len > 1) { msg_rx[1] = VC4_GET_FIELD(rxpkt1h, DSI_RXPKT1H_SHORT_1); } } } return ret; reset_fifo_and_return: DRM_ERROR("DSI transfer failed, resetting: %d\n", ret); DSI_PORT_WRITE(TXPKT1C, DSI_PORT_READ(TXPKT1C) & ~DSI_TXPKT1C_CMD_EN); udelay(1); DSI_PORT_WRITE(CTRL, DSI_PORT_READ(CTRL) | DSI_PORT_BIT(CTRL_RESET_FIFOS)); DSI_PORT_WRITE(TXPKT1C, 0); DSI_PORT_WRITE(INT_EN, DSI_PORT_BIT(INTERRUPTS_ALWAYS_ENABLED)); return ret; } static const struct component_ops vc4_dsi_ops; static int vc4_dsi_host_attach(struct mipi_dsi_host *host, struct mipi_dsi_device *device) { struct vc4_dsi *dsi = host_to_dsi(host); int ret; dsi->lanes = device->lanes; dsi->channel = device->channel; dsi->mode_flags = device->mode_flags; switch (device->format) { case MIPI_DSI_FMT_RGB888: dsi->format = DSI_PFORMAT_RGB888; dsi->divider = 24 / dsi->lanes; break; case MIPI_DSI_FMT_RGB666: dsi->format = DSI_PFORMAT_RGB666; dsi->divider = 24 / dsi->lanes; break; case MIPI_DSI_FMT_RGB666_PACKED: dsi->format = DSI_PFORMAT_RGB666_PACKED; dsi->divider = 18 / dsi->lanes; break; case MIPI_DSI_FMT_RGB565: dsi->format = DSI_PFORMAT_RGB565; dsi->divider = 16 / dsi->lanes; break; default: dev_err(&dsi->pdev->dev, "Unknown DSI format: %d.\n", dsi->format); return 0; } if (!(dsi->mode_flags & MIPI_DSI_MODE_VIDEO)) { dev_err(&dsi->pdev->dev, "Only VIDEO mode panels supported currently.\n"); return 0; } drm_bridge_add(&dsi->bridge); ret = component_add(&dsi->pdev->dev, &vc4_dsi_ops); if (ret) { drm_bridge_remove(&dsi->bridge); return ret; } return 0; } static int vc4_dsi_host_detach(struct mipi_dsi_host *host, struct mipi_dsi_device *device) { struct vc4_dsi *dsi = host_to_dsi(host); component_del(&dsi->pdev->dev, &vc4_dsi_ops); drm_bridge_remove(&dsi->bridge); return 0; } static const struct mipi_dsi_host_ops vc4_dsi_host_ops = { .attach = vc4_dsi_host_attach, .detach = vc4_dsi_host_detach, .transfer = vc4_dsi_host_transfer, }; static const struct drm_bridge_funcs vc4_dsi_bridge_funcs = { .atomic_duplicate_state = drm_atomic_helper_bridge_duplicate_state, .atomic_destroy_state = drm_atomic_helper_bridge_destroy_state, .atomic_reset = drm_atomic_helper_bridge_reset, .atomic_pre_enable = vc4_dsi_bridge_pre_enable, .atomic_enable = vc4_dsi_bridge_enable, .atomic_disable = vc4_dsi_bridge_disable, .atomic_post_disable = vc4_dsi_bridge_post_disable, .attach = vc4_dsi_bridge_attach, .mode_fixup = vc4_dsi_bridge_mode_fixup, }; static int vc4_dsi_late_register(struct drm_encoder *encoder) { struct drm_device *drm = encoder->dev; struct vc4_dsi *dsi = to_vc4_dsi(encoder); vc4_debugfs_add_regset32(drm, dsi->variant->debugfs_name, &dsi->regset); return 0; } static const struct drm_encoder_funcs vc4_dsi_encoder_funcs = { .late_register = vc4_dsi_late_register, }; static const struct vc4_dsi_variant bcm2711_dsi1_variant = { .port = 1, .debugfs_name = "dsi1_regs", .regs = dsi1_regs, .nregs = ARRAY_SIZE(dsi1_regs), }; static const struct vc4_dsi_variant bcm2835_dsi0_variant = { .port = 0, .debugfs_name = "dsi0_regs", .regs = dsi0_regs, .nregs = ARRAY_SIZE(dsi0_regs), }; static const struct vc4_dsi_variant bcm2835_dsi1_variant = { .port = 1, .broken_axi_workaround = true, .debugfs_name = "dsi1_regs", .regs = dsi1_regs, .nregs = ARRAY_SIZE(dsi1_regs), }; static const struct of_device_id vc4_dsi_dt_match[] = { { .compatible = "brcm,bcm2711-dsi1", &bcm2711_dsi1_variant }, { .compatible = "brcm,bcm2835-dsi0", &bcm2835_dsi0_variant }, { .compatible = "brcm,bcm2835-dsi1", &bcm2835_dsi1_variant }, {} }; static void dsi_handle_error(struct vc4_dsi *dsi, irqreturn_t *ret, u32 stat, u32 bit, const char *type) { if (!(stat & bit)) return; DRM_ERROR("DSI%d: %s error\n", dsi->variant->port, type); *ret = IRQ_HANDLED; } /* * Initial handler for port 1 where we need the reg_dma workaround. * The register DMA writes sleep, so we can't do it in the top half. * Instead we use IRQF_ONESHOT so that the IRQ gets disabled in the * parent interrupt contrller until our interrupt thread is done. */ static irqreturn_t vc4_dsi_irq_defer_to_thread_handler(int irq, void *data) { struct vc4_dsi *dsi = data; u32 stat = DSI_PORT_READ(INT_STAT); if (!stat) return IRQ_NONE; return IRQ_WAKE_THREAD; } /* * Normal IRQ handler for port 0, or the threaded IRQ handler for port * 1 where we need the reg_dma workaround. */ static irqreturn_t vc4_dsi_irq_handler(int irq, void *data) { struct vc4_dsi *dsi = data; u32 stat = DSI_PORT_READ(INT_STAT); irqreturn_t ret = IRQ_NONE; DSI_PORT_WRITE(INT_STAT, stat); dsi_handle_error(dsi, &ret, stat, DSI_PORT_BIT(INT_ERR_SYNC_ESC), "LPDT sync"); dsi_handle_error(dsi, &ret, stat, DSI_PORT_BIT(INT_ERR_CONTROL), "data lane 0 sequence"); dsi_handle_error(dsi, &ret, stat, DSI_PORT_BIT(INT_ERR_CONT_LP0), "LP0 contention"); dsi_handle_error(dsi, &ret, stat, DSI_PORT_BIT(INT_ERR_CONT_LP1), "LP1 contention"); dsi_handle_error(dsi, &ret, stat, DSI_PORT_BIT(INT_HSTX_TO), "HSTX timeout"); dsi_handle_error(dsi, &ret, stat, DSI_PORT_BIT(INT_LPRX_TO), "LPRX timeout"); dsi_handle_error(dsi, &ret, stat, DSI_PORT_BIT(INT_TA_TO), "turnaround timeout"); dsi_handle_error(dsi, &ret, stat, DSI_PORT_BIT(INT_PR_TO), "peripheral reset timeout"); if (stat & ((dsi->variant->port ? DSI1_INT_TXPKT1_DONE : DSI0_INT_CMDC_DONE_MASK) | DSI_PORT_BIT(INT_PHY_DIR_RTF))) { complete(&dsi->xfer_completion); ret = IRQ_HANDLED; } else if (stat & DSI_PORT_BIT(INT_HSTX_TO)) { complete(&dsi->xfer_completion); dsi->xfer_result = -ETIMEDOUT; ret = IRQ_HANDLED; } return ret; } /** * vc4_dsi_init_phy_clocks - Exposes clocks generated by the analog * PHY that are consumed by CPRMAN (clk-bcm2835.c). * @dsi: DSI encoder */ static int vc4_dsi_init_phy_clocks(struct vc4_dsi *dsi) { struct device *dev = &dsi->pdev->dev; const char *parent_name = __clk_get_name(dsi->pll_phy_clock); static const struct { const char *name; int div; } phy_clocks[] = { { "byte", 8 }, { "ddr2", 4 }, { "ddr", 2 }, }; int i; dsi->clk_onecell = devm_kzalloc(dev, sizeof(*dsi->clk_onecell) + ARRAY_SIZE(phy_clocks) * sizeof(struct clk_hw *), GFP_KERNEL); if (!dsi->clk_onecell) return -ENOMEM; dsi->clk_onecell->num = ARRAY_SIZE(phy_clocks); for (i = 0; i < ARRAY_SIZE(phy_clocks); i++) { struct clk_fixed_factor *fix = &dsi->phy_clocks[i]; struct clk_init_data init; char clk_name[16]; int ret; snprintf(clk_name, sizeof(clk_name), "dsi%u_%s", dsi->variant->port, phy_clocks[i].name); /* We just use core fixed factor clock ops for the PHY * clocks. The clocks are actually gated by the * PHY_AFEC0_DDRCLK_EN bits, which we should be * setting if we use the DDR/DDR2 clocks. However, * vc4_dsi_encoder_enable() is setting up both AFEC0, * setting both our parent DSI PLL's rate and this * clock's rate, so it knows if DDR/DDR2 are going to * be used and could enable the gates itself. */ fix->mult = 1; fix->div = phy_clocks[i].div; fix->hw.init = &init; memset(&init, 0, sizeof(init)); init.parent_names = &parent_name; init.num_parents = 1; init.name = clk_name; init.ops = &clk_fixed_factor_ops; ret = devm_clk_hw_register(dev, &fix->hw); if (ret) return ret; dsi->clk_onecell->hws[i] = &fix->hw; } return of_clk_add_hw_provider(dev->of_node, of_clk_hw_onecell_get, dsi->clk_onecell); } static void vc4_dsi_dma_mem_release(void *ptr) { struct vc4_dsi *dsi = ptr; struct device *dev = &dsi->pdev->dev; dma_free_coherent(dev, 4, dsi->reg_dma_mem, dsi->reg_dma_paddr); dsi->reg_dma_mem = NULL; } static void vc4_dsi_dma_chan_release(void *ptr) { struct vc4_dsi *dsi = ptr; dma_release_channel(dsi->reg_dma_chan); dsi->reg_dma_chan = NULL; } static void vc4_dsi_release(struct kref *kref) { struct vc4_dsi *dsi = container_of(kref, struct vc4_dsi, kref); kfree(dsi); } static void vc4_dsi_get(struct vc4_dsi *dsi) { kref_get(&dsi->kref); } static void vc4_dsi_put(struct vc4_dsi *dsi) { kref_put(&dsi->kref, &vc4_dsi_release); } static void vc4_dsi_release_action(struct drm_device *drm, void *ptr) { struct vc4_dsi *dsi = ptr; vc4_dsi_put(dsi); } static int vc4_dsi_bind(struct device *dev, struct device *master, void *data) { struct platform_device *pdev = to_platform_device(dev); struct drm_device *drm = dev_get_drvdata(master); struct vc4_dsi *dsi = dev_get_drvdata(dev); struct drm_encoder *encoder = &dsi->encoder.base; int ret; vc4_dsi_get(dsi); ret = drmm_add_action_or_reset(drm, vc4_dsi_release_action, dsi); if (ret) return ret; dsi->variant = of_device_get_match_data(dev); dsi->encoder.type = dsi->variant->port ? VC4_ENCODER_TYPE_DSI1 : VC4_ENCODER_TYPE_DSI0; dsi->regs = vc4_ioremap_regs(pdev, 0); if (IS_ERR(dsi->regs)) return PTR_ERR(dsi->regs); dsi->regset.base = dsi->regs; dsi->regset.regs = dsi->variant->regs; dsi->regset.nregs = dsi->variant->nregs; if (DSI_PORT_READ(ID) != DSI_ID_VALUE) { dev_err(dev, "Port returned 0x%08x for ID instead of 0x%08x\n", DSI_PORT_READ(ID), DSI_ID_VALUE); return -ENODEV; } /* DSI1 on BCM2835/6/7 has a broken AXI slave that doesn't respond to * writes from the ARM. It does handle writes from the DMA engine, * so set up a channel for talking to it. */ if (dsi->variant->broken_axi_workaround) { dma_cap_mask_t dma_mask; dsi->reg_dma_mem = dma_alloc_coherent(dev, 4, &dsi->reg_dma_paddr, GFP_KERNEL); if (!dsi->reg_dma_mem) { DRM_ERROR("Failed to get DMA memory\n"); return -ENOMEM; } ret = devm_add_action_or_reset(dev, vc4_dsi_dma_mem_release, dsi); if (ret) return ret; dma_cap_zero(dma_mask); dma_cap_set(DMA_MEMCPY, dma_mask); dsi->reg_dma_chan = dma_request_chan_by_mask(&dma_mask); if (IS_ERR(dsi->reg_dma_chan)) { ret = PTR_ERR(dsi->reg_dma_chan); if (ret != -EPROBE_DEFER) DRM_ERROR("Failed to get DMA channel: %d\n", ret); return ret; } ret = devm_add_action_or_reset(dev, vc4_dsi_dma_chan_release, dsi); if (ret) return ret; /* Get the physical address of the device's registers. The * struct resource for the regs gives us the bus address * instead. */ dsi->reg_paddr = be32_to_cpup(of_get_address(dev->of_node, 0, NULL, NULL)); } init_completion(&dsi->xfer_completion); /* At startup enable error-reporting interrupts and nothing else. */ DSI_PORT_WRITE(INT_EN, DSI1_INTERRUPTS_ALWAYS_ENABLED); /* Clear any existing interrupt state. */ DSI_PORT_WRITE(INT_STAT, DSI_PORT_READ(INT_STAT)); if (dsi->reg_dma_mem) ret = devm_request_threaded_irq(dev, platform_get_irq(pdev, 0), vc4_dsi_irq_defer_to_thread_handler, vc4_dsi_irq_handler, IRQF_ONESHOT, "vc4 dsi", dsi); else ret = devm_request_irq(dev, platform_get_irq(pdev, 0), vc4_dsi_irq_handler, 0, "vc4 dsi", dsi); if (ret) { if (ret != -EPROBE_DEFER) dev_err(dev, "Failed to get interrupt: %d\n", ret); return ret; } dsi->escape_clock = devm_clk_get(dev, "escape"); if (IS_ERR(dsi->escape_clock)) { ret = PTR_ERR(dsi->escape_clock); if (ret != -EPROBE_DEFER) dev_err(dev, "Failed to get escape clock: %d\n", ret); return ret; } dsi->pll_phy_clock = devm_clk_get(dev, "phy"); if (IS_ERR(dsi->pll_phy_clock)) { ret = PTR_ERR(dsi->pll_phy_clock); if (ret != -EPROBE_DEFER) dev_err(dev, "Failed to get phy clock: %d\n", ret); return ret; } dsi->pixel_clock = devm_clk_get(dev, "pixel"); if (IS_ERR(dsi->pixel_clock)) { ret = PTR_ERR(dsi->pixel_clock); if (ret != -EPROBE_DEFER) dev_err(dev, "Failed to get pixel clock: %d\n", ret); return ret; } dsi->out_bridge = drmm_of_get_bridge(drm, dev->of_node, 0, 0); if (IS_ERR(dsi->out_bridge)) return PTR_ERR(dsi->out_bridge); /* The esc clock rate is supposed to always be 100Mhz. */ ret = clk_set_rate(dsi->escape_clock, 100 * 1000000); if (ret) { dev_err(dev, "Failed to set esc clock: %d\n", ret); return ret; } ret = vc4_dsi_init_phy_clocks(dsi); if (ret) return ret; ret = drmm_encoder_init(drm, encoder, &vc4_dsi_encoder_funcs, DRM_MODE_ENCODER_DSI, NULL); if (ret) return ret; ret = devm_pm_runtime_enable(dev); if (ret) return ret; ret = drm_bridge_attach(encoder, &dsi->bridge, NULL, 0); if (ret) return ret; return 0; } static const struct component_ops vc4_dsi_ops = { .bind = vc4_dsi_bind, }; static int vc4_dsi_dev_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; struct vc4_dsi *dsi; dsi = kzalloc(sizeof(*dsi), GFP_KERNEL); if (!dsi) return -ENOMEM; dev_set_drvdata(dev, dsi); kref_init(&dsi->kref); dsi->pdev = pdev; dsi->bridge.funcs = &vc4_dsi_bridge_funcs; #ifdef CONFIG_OF dsi->bridge.of_node = dev->of_node; #endif dsi->bridge.type = DRM_MODE_CONNECTOR_DSI; dsi->dsi_host.ops = &vc4_dsi_host_ops; dsi->dsi_host.dev = dev; mipi_dsi_host_register(&dsi->dsi_host); return 0; } static void vc4_dsi_dev_remove(struct platform_device *pdev) { struct device *dev = &pdev->dev; struct vc4_dsi *dsi = dev_get_drvdata(dev); mipi_dsi_host_unregister(&dsi->dsi_host); vc4_dsi_put(dsi); } struct platform_driver vc4_dsi_driver = { .probe = vc4_dsi_dev_probe, .remove_new = vc4_dsi_dev_remove, .driver = { .name = "vc4_dsi", .of_match_table = vc4_dsi_dt_match, }, };
linux-master
drivers/gpu/drm/vc4/vc4_dsi.c
// SPDX-License-Identifier: GPL-2.0-only /* * Copyright (c) 2014 The Linux Foundation. All rights reserved. * Copyright (C) 2013 Red Hat * Author: Rob Clark <[email protected]> */ #include <linux/clk.h> #include <linux/component.h> #include <linux/platform_device.h> #include <linux/pm_runtime.h> #include "vc4_drv.h" #include "vc4_regs.h" static const struct debugfs_reg32 v3d_regs[] = { VC4_REG32(V3D_IDENT0), VC4_REG32(V3D_IDENT1), VC4_REG32(V3D_IDENT2), VC4_REG32(V3D_SCRATCH), VC4_REG32(V3D_L2CACTL), VC4_REG32(V3D_SLCACTL), VC4_REG32(V3D_INTCTL), VC4_REG32(V3D_INTENA), VC4_REG32(V3D_INTDIS), VC4_REG32(V3D_CT0CS), VC4_REG32(V3D_CT1CS), VC4_REG32(V3D_CT0EA), VC4_REG32(V3D_CT1EA), VC4_REG32(V3D_CT0CA), VC4_REG32(V3D_CT1CA), VC4_REG32(V3D_CT00RA0), VC4_REG32(V3D_CT01RA0), VC4_REG32(V3D_CT0LC), VC4_REG32(V3D_CT1LC), VC4_REG32(V3D_CT0PC), VC4_REG32(V3D_CT1PC), VC4_REG32(V3D_PCS), VC4_REG32(V3D_BFC), VC4_REG32(V3D_RFC), VC4_REG32(V3D_BPCA), VC4_REG32(V3D_BPCS), VC4_REG32(V3D_BPOA), VC4_REG32(V3D_BPOS), VC4_REG32(V3D_BXCF), VC4_REG32(V3D_SQRSV0), VC4_REG32(V3D_SQRSV1), VC4_REG32(V3D_SQCNTL), VC4_REG32(V3D_SRQPC), VC4_REG32(V3D_SRQUA), VC4_REG32(V3D_SRQUL), VC4_REG32(V3D_SRQCS), VC4_REG32(V3D_VPACNTL), VC4_REG32(V3D_VPMBASE), VC4_REG32(V3D_PCTRC), VC4_REG32(V3D_PCTRE), VC4_REG32(V3D_PCTR(0)), VC4_REG32(V3D_PCTRS(0)), VC4_REG32(V3D_PCTR(1)), VC4_REG32(V3D_PCTRS(1)), VC4_REG32(V3D_PCTR(2)), VC4_REG32(V3D_PCTRS(2)), VC4_REG32(V3D_PCTR(3)), VC4_REG32(V3D_PCTRS(3)), VC4_REG32(V3D_PCTR(4)), VC4_REG32(V3D_PCTRS(4)), VC4_REG32(V3D_PCTR(5)), VC4_REG32(V3D_PCTRS(5)), VC4_REG32(V3D_PCTR(6)), VC4_REG32(V3D_PCTRS(6)), VC4_REG32(V3D_PCTR(7)), VC4_REG32(V3D_PCTRS(7)), VC4_REG32(V3D_PCTR(8)), VC4_REG32(V3D_PCTRS(8)), VC4_REG32(V3D_PCTR(9)), VC4_REG32(V3D_PCTRS(9)), VC4_REG32(V3D_PCTR(10)), VC4_REG32(V3D_PCTRS(10)), VC4_REG32(V3D_PCTR(11)), VC4_REG32(V3D_PCTRS(11)), VC4_REG32(V3D_PCTR(12)), VC4_REG32(V3D_PCTRS(12)), VC4_REG32(V3D_PCTR(13)), VC4_REG32(V3D_PCTRS(13)), VC4_REG32(V3D_PCTR(14)), VC4_REG32(V3D_PCTRS(14)), VC4_REG32(V3D_PCTR(15)), VC4_REG32(V3D_PCTRS(15)), VC4_REG32(V3D_DBGE), VC4_REG32(V3D_FDBGO), VC4_REG32(V3D_FDBGB), VC4_REG32(V3D_FDBGR), VC4_REG32(V3D_FDBGS), VC4_REG32(V3D_ERRSTAT), }; static int vc4_v3d_debugfs_ident(struct seq_file *m, void *unused) { struct drm_debugfs_entry *entry = m->private; struct drm_device *dev = entry->dev; struct vc4_dev *vc4 = to_vc4_dev(dev); int ret = vc4_v3d_pm_get(vc4); if (ret == 0) { uint32_t ident1 = V3D_READ(V3D_IDENT1); uint32_t nslc = VC4_GET_FIELD(ident1, V3D_IDENT1_NSLC); uint32_t tups = VC4_GET_FIELD(ident1, V3D_IDENT1_TUPS); uint32_t qups = VC4_GET_FIELD(ident1, V3D_IDENT1_QUPS); seq_printf(m, "Revision: %d\n", VC4_GET_FIELD(ident1, V3D_IDENT1_REV)); seq_printf(m, "Slices: %d\n", nslc); seq_printf(m, "TMUs: %d\n", nslc * tups); seq_printf(m, "QPUs: %d\n", nslc * qups); seq_printf(m, "Semaphores: %d\n", VC4_GET_FIELD(ident1, V3D_IDENT1_NSEM)); vc4_v3d_pm_put(vc4); } return 0; } /* * Wraps pm_runtime_get_sync() in a refcount, so that we can reliably * get the pm_runtime refcount to 0 in vc4_reset(). */ int vc4_v3d_pm_get(struct vc4_dev *vc4) { if (WARN_ON_ONCE(vc4->is_vc5)) return -ENODEV; mutex_lock(&vc4->power_lock); if (vc4->power_refcount++ == 0) { int ret = pm_runtime_get_sync(&vc4->v3d->pdev->dev); if (ret < 0) { vc4->power_refcount--; mutex_unlock(&vc4->power_lock); return ret; } } mutex_unlock(&vc4->power_lock); return 0; } void vc4_v3d_pm_put(struct vc4_dev *vc4) { if (WARN_ON_ONCE(vc4->is_vc5)) return; mutex_lock(&vc4->power_lock); if (--vc4->power_refcount == 0) { pm_runtime_mark_last_busy(&vc4->v3d->pdev->dev); pm_runtime_put_autosuspend(&vc4->v3d->pdev->dev); } mutex_unlock(&vc4->power_lock); } static void vc4_v3d_init_hw(struct drm_device *dev) { struct vc4_dev *vc4 = to_vc4_dev(dev); /* Take all the memory that would have been reserved for user * QPU programs, since we don't have an interface for running * them, anyway. */ V3D_WRITE(V3D_VPMBASE, 0); } int vc4_v3d_get_bin_slot(struct vc4_dev *vc4) { struct drm_device *dev = &vc4->base; unsigned long irqflags; int slot; uint64_t seqno = 0; struct vc4_exec_info *exec; if (WARN_ON_ONCE(vc4->is_vc5)) return -ENODEV; try_again: spin_lock_irqsave(&vc4->job_lock, irqflags); slot = ffs(~vc4->bin_alloc_used); if (slot != 0) { /* Switch from ffs() bit index to a 0-based index. */ slot--; vc4->bin_alloc_used |= BIT(slot); spin_unlock_irqrestore(&vc4->job_lock, irqflags); return slot; } /* Couldn't find an open slot. Wait for render to complete * and try again. */ exec = vc4_last_render_job(vc4); if (exec) seqno = exec->seqno; spin_unlock_irqrestore(&vc4->job_lock, irqflags); if (seqno) { int ret = vc4_wait_for_seqno(dev, seqno, ~0ull, true); if (ret == 0) goto try_again; return ret; } return -ENOMEM; } /* * bin_bo_alloc() - allocates the memory that will be used for * tile binning. * * The binner has a limitation that the addresses in the tile state * buffer that point into the tile alloc buffer or binner overflow * memory only have 28 bits (256MB), and the top 4 on the bus for * tile alloc references end up coming from the tile state buffer's * address. * * To work around this, we allocate a single large buffer while V3D is * in use, make sure that it has the top 4 bits constant across its * entire extent, and then put the tile state, tile alloc, and binner * overflow memory inside that buffer. * * This creates a limitation where we may not be able to execute a job * if it doesn't fit within the buffer that we allocated up front. * However, it turns out that 16MB is "enough for anybody", and * real-world applications run into allocation failures from the * overall DMA pool before they make scenes complicated enough to run * out of bin space. */ static int bin_bo_alloc(struct vc4_dev *vc4) { struct vc4_v3d *v3d = vc4->v3d; uint32_t size = 16 * 1024 * 1024; int ret = 0; struct list_head list; if (!v3d) return -ENODEV; /* We may need to try allocating more than once to get a BO * that doesn't cross 256MB. Track the ones we've allocated * that failed so far, so that we can free them when we've got * one that succeeded (if we freed them right away, our next * allocation would probably be the same chunk of memory). */ INIT_LIST_HEAD(&list); while (true) { struct vc4_bo *bo = vc4_bo_create(&vc4->base, size, true, VC4_BO_TYPE_BIN); if (IS_ERR(bo)) { ret = PTR_ERR(bo); dev_err(&v3d->pdev->dev, "Failed to allocate memory for tile binning: " "%d. You may need to enable DMA or give it " "more memory.", ret); break; } /* Check if this BO won't trigger the addressing bug. */ if ((bo->base.dma_addr & 0xf0000000) == ((bo->base.dma_addr + bo->base.base.size - 1) & 0xf0000000)) { vc4->bin_bo = bo; /* Set up for allocating 512KB chunks of * binner memory. The biggest allocation we * need to do is for the initial tile alloc + * tile state buffer. We can render to a * maximum of ((2048*2048) / (32*32) = 4096 * tiles in a frame (until we do floating * point rendering, at which point it would be * 8192). Tile state is 48b/tile (rounded to * a page), and tile alloc is 32b/tile * (rounded to a page), plus a page of extra, * for a total of 320kb for our worst-case. * We choose 512kb so that it divides evenly * into our 16MB, and the rest of the 512kb * will be used as storage for the overflow * from the initial 32b CL per bin. */ vc4->bin_alloc_size = 512 * 1024; vc4->bin_alloc_used = 0; vc4->bin_alloc_overflow = 0; WARN_ON_ONCE(sizeof(vc4->bin_alloc_used) * 8 != bo->base.base.size / vc4->bin_alloc_size); kref_init(&vc4->bin_bo_kref); /* Enable the out-of-memory interrupt to set our * newly-allocated binner BO, potentially from an * already-pending-but-masked interrupt. */ V3D_WRITE(V3D_INTENA, V3D_INT_OUTOMEM); break; } /* Put it on the list to free later, and try again. */ list_add(&bo->unref_head, &list); } /* Free all the BOs we allocated but didn't choose. */ while (!list_empty(&list)) { struct vc4_bo *bo = list_last_entry(&list, struct vc4_bo, unref_head); list_del(&bo->unref_head); drm_gem_object_put(&bo->base.base); } return ret; } int vc4_v3d_bin_bo_get(struct vc4_dev *vc4, bool *used) { int ret = 0; if (WARN_ON_ONCE(vc4->is_vc5)) return -ENODEV; mutex_lock(&vc4->bin_bo_lock); if (used && *used) goto complete; if (vc4->bin_bo) kref_get(&vc4->bin_bo_kref); else ret = bin_bo_alloc(vc4); if (ret == 0 && used) *used = true; complete: mutex_unlock(&vc4->bin_bo_lock); return ret; } static void bin_bo_release(struct kref *ref) { struct vc4_dev *vc4 = container_of(ref, struct vc4_dev, bin_bo_kref); if (WARN_ON_ONCE(!vc4->bin_bo)) return; drm_gem_object_put(&vc4->bin_bo->base.base); vc4->bin_bo = NULL; } void vc4_v3d_bin_bo_put(struct vc4_dev *vc4) { if (WARN_ON_ONCE(vc4->is_vc5)) return; mutex_lock(&vc4->bin_bo_lock); kref_put(&vc4->bin_bo_kref, bin_bo_release); mutex_unlock(&vc4->bin_bo_lock); } #ifdef CONFIG_PM static int vc4_v3d_runtime_suspend(struct device *dev) { struct vc4_v3d *v3d = dev_get_drvdata(dev); struct vc4_dev *vc4 = v3d->vc4; vc4_irq_disable(&vc4->base); clk_disable_unprepare(v3d->clk); return 0; } static int vc4_v3d_runtime_resume(struct device *dev) { struct vc4_v3d *v3d = dev_get_drvdata(dev); struct vc4_dev *vc4 = v3d->vc4; int ret; ret = clk_prepare_enable(v3d->clk); if (ret != 0) return ret; vc4_v3d_init_hw(&vc4->base); vc4_irq_enable(&vc4->base); return 0; } #endif int vc4_v3d_debugfs_init(struct drm_minor *minor) { struct drm_device *drm = minor->dev; struct vc4_dev *vc4 = to_vc4_dev(drm); struct vc4_v3d *v3d = vc4->v3d; if (!vc4->v3d) return -ENODEV; drm_debugfs_add_file(drm, "v3d_ident", vc4_v3d_debugfs_ident, NULL); vc4_debugfs_add_regset32(drm, "v3d_regs", &v3d->regset); return 0; } static int vc4_v3d_bind(struct device *dev, struct device *master, void *data) { struct platform_device *pdev = to_platform_device(dev); struct drm_device *drm = dev_get_drvdata(master); struct vc4_dev *vc4 = to_vc4_dev(drm); struct vc4_v3d *v3d = NULL; int ret; v3d = devm_kzalloc(&pdev->dev, sizeof(*v3d), GFP_KERNEL); if (!v3d) return -ENOMEM; dev_set_drvdata(dev, v3d); v3d->pdev = pdev; v3d->regs = vc4_ioremap_regs(pdev, 0); if (IS_ERR(v3d->regs)) return PTR_ERR(v3d->regs); v3d->regset.base = v3d->regs; v3d->regset.regs = v3d_regs; v3d->regset.nregs = ARRAY_SIZE(v3d_regs); vc4->v3d = v3d; v3d->vc4 = vc4; v3d->clk = devm_clk_get(dev, NULL); if (IS_ERR(v3d->clk)) { int ret = PTR_ERR(v3d->clk); if (ret == -ENOENT) { /* bcm2835 didn't have a clock reference in the DT. */ ret = 0; v3d->clk = NULL; } else { if (ret != -EPROBE_DEFER) dev_err(dev, "Failed to get V3D clock: %d\n", ret); return ret; } } ret = platform_get_irq(pdev, 0); if (ret < 0) return ret; vc4->irq = ret; ret = devm_pm_runtime_enable(dev); if (ret) return ret; ret = pm_runtime_resume_and_get(dev); if (ret) return ret; if (V3D_READ(V3D_IDENT0) != V3D_EXPECTED_IDENT0) { DRM_ERROR("V3D_IDENT0 read 0x%08x instead of 0x%08x\n", V3D_READ(V3D_IDENT0), V3D_EXPECTED_IDENT0); ret = -EINVAL; goto err_put_runtime_pm; } /* Reset the binner overflow address/size at setup, to be sure * we don't reuse an old one. */ V3D_WRITE(V3D_BPOA, 0); V3D_WRITE(V3D_BPOS, 0); ret = vc4_irq_install(drm, vc4->irq); if (ret) { DRM_ERROR("Failed to install IRQ handler\n"); goto err_put_runtime_pm; } pm_runtime_use_autosuspend(dev); pm_runtime_set_autosuspend_delay(dev, 40); /* a little over 2 frames. */ return 0; err_put_runtime_pm: pm_runtime_put(dev); return ret; } static void vc4_v3d_unbind(struct device *dev, struct device *master, void *data) { struct drm_device *drm = dev_get_drvdata(master); struct vc4_dev *vc4 = to_vc4_dev(drm); vc4_irq_uninstall(drm); /* Disable the binner's overflow memory address, so the next * driver probe (if any) doesn't try to reuse our old * allocation. */ V3D_WRITE(V3D_BPOA, 0); V3D_WRITE(V3D_BPOS, 0); vc4->v3d = NULL; } static const struct dev_pm_ops vc4_v3d_pm_ops = { SET_RUNTIME_PM_OPS(vc4_v3d_runtime_suspend, vc4_v3d_runtime_resume, NULL) }; static const struct component_ops vc4_v3d_ops = { .bind = vc4_v3d_bind, .unbind = vc4_v3d_unbind, }; static int vc4_v3d_dev_probe(struct platform_device *pdev) { return component_add(&pdev->dev, &vc4_v3d_ops); } static void vc4_v3d_dev_remove(struct platform_device *pdev) { component_del(&pdev->dev, &vc4_v3d_ops); } const struct of_device_id vc4_v3d_dt_match[] = { { .compatible = "brcm,bcm2835-v3d" }, { .compatible = "brcm,cygnus-v3d" }, { .compatible = "brcm,vc4-v3d" }, {} }; struct platform_driver vc4_v3d_driver = { .probe = vc4_v3d_dev_probe, .remove_new = vc4_v3d_dev_remove, .driver = { .name = "vc4_v3d", .of_match_table = vc4_v3d_dt_match, .pm = &vc4_v3d_pm_ops, }, };
linux-master
drivers/gpu/drm/vc4/vc4_v3d.c
// SPDX-License-Identifier: GPL-2.0-only /* * Copyright (C) 2015 Broadcom */ /** * DOC: VC4 HVS module. * * The Hardware Video Scaler (HVS) is the piece of hardware that does * translation, scaling, colorspace conversion, and compositing of * pixels stored in framebuffers into a FIFO of pixels going out to * the Pixel Valve (CRTC). It operates at the system clock rate (the * system audio clock gate, specifically), which is much higher than * the pixel clock rate. * * There is a single global HVS, with multiple output FIFOs that can * be consumed by the PVs. This file just manages the resources for * the HVS, while the vc4_crtc.c code actually drives HVS setup for * each CRTC. */ #include <linux/bitfield.h> #include <linux/clk.h> #include <linux/component.h> #include <linux/platform_device.h> #include <drm/drm_atomic_helper.h> #include <drm/drm_drv.h> #include <drm/drm_vblank.h> #include <soc/bcm2835/raspberrypi-firmware.h> #include "vc4_drv.h" #include "vc4_regs.h" static const struct debugfs_reg32 hvs_regs[] = { VC4_REG32(SCALER_DISPCTRL), VC4_REG32(SCALER_DISPSTAT), VC4_REG32(SCALER_DISPID), VC4_REG32(SCALER_DISPECTRL), VC4_REG32(SCALER_DISPPROF), VC4_REG32(SCALER_DISPDITHER), VC4_REG32(SCALER_DISPEOLN), VC4_REG32(SCALER_DISPLIST0), VC4_REG32(SCALER_DISPLIST1), VC4_REG32(SCALER_DISPLIST2), VC4_REG32(SCALER_DISPLSTAT), VC4_REG32(SCALER_DISPLACT0), VC4_REG32(SCALER_DISPLACT1), VC4_REG32(SCALER_DISPLACT2), VC4_REG32(SCALER_DISPCTRL0), VC4_REG32(SCALER_DISPBKGND0), VC4_REG32(SCALER_DISPSTAT0), VC4_REG32(SCALER_DISPBASE0), VC4_REG32(SCALER_DISPCTRL1), VC4_REG32(SCALER_DISPBKGND1), VC4_REG32(SCALER_DISPSTAT1), VC4_REG32(SCALER_DISPBASE1), VC4_REG32(SCALER_DISPCTRL2), VC4_REG32(SCALER_DISPBKGND2), VC4_REG32(SCALER_DISPSTAT2), VC4_REG32(SCALER_DISPBASE2), VC4_REG32(SCALER_DISPALPHA2), VC4_REG32(SCALER_OLEDOFFS), VC4_REG32(SCALER_OLEDCOEF0), VC4_REG32(SCALER_OLEDCOEF1), VC4_REG32(SCALER_OLEDCOEF2), }; void vc4_hvs_dump_state(struct vc4_hvs *hvs) { struct drm_device *drm = &hvs->vc4->base; struct drm_printer p = drm_info_printer(&hvs->pdev->dev); int idx, i; if (!drm_dev_enter(drm, &idx)) return; drm_print_regset32(&p, &hvs->regset); DRM_INFO("HVS ctx:\n"); for (i = 0; i < 64; i += 4) { DRM_INFO("0x%08x (%s): 0x%08x 0x%08x 0x%08x 0x%08x\n", i * 4, i < HVS_BOOTLOADER_DLIST_END ? "B" : "D", readl((u32 __iomem *)hvs->dlist + i + 0), readl((u32 __iomem *)hvs->dlist + i + 1), readl((u32 __iomem *)hvs->dlist + i + 2), readl((u32 __iomem *)hvs->dlist + i + 3)); } drm_dev_exit(idx); } static int vc4_hvs_debugfs_underrun(struct seq_file *m, void *data) { struct drm_debugfs_entry *entry = m->private; struct drm_device *dev = entry->dev; struct vc4_dev *vc4 = to_vc4_dev(dev); struct drm_printer p = drm_seq_file_printer(m); drm_printf(&p, "%d\n", atomic_read(&vc4->underrun)); return 0; } static int vc4_hvs_debugfs_dlist(struct seq_file *m, void *data) { struct drm_debugfs_entry *entry = m->private; struct drm_device *dev = entry->dev; struct vc4_dev *vc4 = to_vc4_dev(dev); struct vc4_hvs *hvs = vc4->hvs; struct drm_printer p = drm_seq_file_printer(m); unsigned int next_entry_start = 0; unsigned int i, j; u32 dlist_word, dispstat; for (i = 0; i < SCALER_CHANNELS_COUNT; i++) { dispstat = VC4_GET_FIELD(HVS_READ(SCALER_DISPSTATX(i)), SCALER_DISPSTATX_MODE); if (dispstat == SCALER_DISPSTATX_MODE_DISABLED || dispstat == SCALER_DISPSTATX_MODE_EOF) { drm_printf(&p, "HVS chan %u disabled\n", i); continue; } drm_printf(&p, "HVS chan %u:\n", i); for (j = HVS_READ(SCALER_DISPLISTX(i)); j < 256; j++) { dlist_word = readl((u32 __iomem *)vc4->hvs->dlist + j); drm_printf(&p, "dlist: %02d: 0x%08x\n", j, dlist_word); if (!next_entry_start || next_entry_start == j) { if (dlist_word & SCALER_CTL0_END) break; next_entry_start = j + VC4_GET_FIELD(dlist_word, SCALER_CTL0_SIZE); } } } return 0; } /* The filter kernel is composed of dwords each containing 3 9-bit * signed integers packed next to each other. */ #define VC4_INT_TO_COEFF(coeff) (coeff & 0x1ff) #define VC4_PPF_FILTER_WORD(c0, c1, c2) \ ((((c0) & 0x1ff) << 0) | \ (((c1) & 0x1ff) << 9) | \ (((c2) & 0x1ff) << 18)) /* The whole filter kernel is arranged as the coefficients 0-16 going * up, then a pad, then 17-31 going down and reversed within the * dwords. This means that a linear phase kernel (where it's * symmetrical at the boundary between 15 and 16) has the last 5 * dwords matching the first 5, but reversed. */ #define VC4_LINEAR_PHASE_KERNEL(c0, c1, c2, c3, c4, c5, c6, c7, c8, \ c9, c10, c11, c12, c13, c14, c15) \ {VC4_PPF_FILTER_WORD(c0, c1, c2), \ VC4_PPF_FILTER_WORD(c3, c4, c5), \ VC4_PPF_FILTER_WORD(c6, c7, c8), \ VC4_PPF_FILTER_WORD(c9, c10, c11), \ VC4_PPF_FILTER_WORD(c12, c13, c14), \ VC4_PPF_FILTER_WORD(c15, c15, 0)} #define VC4_LINEAR_PHASE_KERNEL_DWORDS 6 #define VC4_KERNEL_DWORDS (VC4_LINEAR_PHASE_KERNEL_DWORDS * 2 - 1) /* Recommended B=1/3, C=1/3 filter choice from Mitchell/Netravali. * http://www.cs.utexas.edu/~fussell/courses/cs384g/lectures/mitchell/Mitchell.pdf */ static const u32 mitchell_netravali_1_3_1_3_kernel[] = VC4_LINEAR_PHASE_KERNEL(0, -2, -6, -8, -10, -8, -3, 2, 18, 50, 82, 119, 155, 187, 213, 227); static int vc4_hvs_upload_linear_kernel(struct vc4_hvs *hvs, struct drm_mm_node *space, const u32 *kernel) { int ret, i; u32 __iomem *dst_kernel; /* * NOTE: We don't need a call to drm_dev_enter()/drm_dev_exit() * here since that function is only called from vc4_hvs_bind(). */ ret = drm_mm_insert_node(&hvs->dlist_mm, space, VC4_KERNEL_DWORDS); if (ret) { DRM_ERROR("Failed to allocate space for filter kernel: %d\n", ret); return ret; } dst_kernel = hvs->dlist + space->start; for (i = 0; i < VC4_KERNEL_DWORDS; i++) { if (i < VC4_LINEAR_PHASE_KERNEL_DWORDS) writel(kernel[i], &dst_kernel[i]); else { writel(kernel[VC4_KERNEL_DWORDS - i - 1], &dst_kernel[i]); } } return 0; } static void vc4_hvs_lut_load(struct vc4_hvs *hvs, struct vc4_crtc *vc4_crtc) { struct drm_device *drm = &hvs->vc4->base; struct drm_crtc *crtc = &vc4_crtc->base; struct vc4_crtc_state *vc4_state = to_vc4_crtc_state(crtc->state); int idx; u32 i; if (!drm_dev_enter(drm, &idx)) return; /* The LUT memory is laid out with each HVS channel in order, * each of which takes 256 writes for R, 256 for G, then 256 * for B. */ HVS_WRITE(SCALER_GAMADDR, SCALER_GAMADDR_AUTOINC | (vc4_state->assigned_channel * 3 * crtc->gamma_size)); for (i = 0; i < crtc->gamma_size; i++) HVS_WRITE(SCALER_GAMDATA, vc4_crtc->lut_r[i]); for (i = 0; i < crtc->gamma_size; i++) HVS_WRITE(SCALER_GAMDATA, vc4_crtc->lut_g[i]); for (i = 0; i < crtc->gamma_size; i++) HVS_WRITE(SCALER_GAMDATA, vc4_crtc->lut_b[i]); drm_dev_exit(idx); } static void vc4_hvs_update_gamma_lut(struct vc4_hvs *hvs, struct vc4_crtc *vc4_crtc) { struct drm_crtc_state *crtc_state = vc4_crtc->base.state; struct drm_color_lut *lut = crtc_state->gamma_lut->data; u32 length = drm_color_lut_size(crtc_state->gamma_lut); u32 i; for (i = 0; i < length; i++) { vc4_crtc->lut_r[i] = drm_color_lut_extract(lut[i].red, 8); vc4_crtc->lut_g[i] = drm_color_lut_extract(lut[i].green, 8); vc4_crtc->lut_b[i] = drm_color_lut_extract(lut[i].blue, 8); } vc4_hvs_lut_load(hvs, vc4_crtc); } u8 vc4_hvs_get_fifo_frame_count(struct vc4_hvs *hvs, unsigned int fifo) { struct drm_device *drm = &hvs->vc4->base; u8 field = 0; int idx; if (!drm_dev_enter(drm, &idx)) return 0; switch (fifo) { case 0: field = VC4_GET_FIELD(HVS_READ(SCALER_DISPSTAT1), SCALER_DISPSTAT1_FRCNT0); break; case 1: field = VC4_GET_FIELD(HVS_READ(SCALER_DISPSTAT1), SCALER_DISPSTAT1_FRCNT1); break; case 2: field = VC4_GET_FIELD(HVS_READ(SCALER_DISPSTAT2), SCALER_DISPSTAT2_FRCNT2); break; } drm_dev_exit(idx); return field; } int vc4_hvs_get_fifo_from_output(struct vc4_hvs *hvs, unsigned int output) { struct vc4_dev *vc4 = hvs->vc4; u32 reg; int ret; if (!vc4->is_vc5) return output; /* * NOTE: We should probably use drm_dev_enter()/drm_dev_exit() * here, but this function is only used during the DRM device * initialization, so we should be fine. */ switch (output) { case 0: return 0; case 1: return 1; case 2: reg = HVS_READ(SCALER_DISPECTRL); ret = FIELD_GET(SCALER_DISPECTRL_DSP2_MUX_MASK, reg); if (ret == 0) return 2; return 0; case 3: reg = HVS_READ(SCALER_DISPCTRL); ret = FIELD_GET(SCALER_DISPCTRL_DSP3_MUX_MASK, reg); if (ret == 3) return -EPIPE; return ret; case 4: reg = HVS_READ(SCALER_DISPEOLN); ret = FIELD_GET(SCALER_DISPEOLN_DSP4_MUX_MASK, reg); if (ret == 3) return -EPIPE; return ret; case 5: reg = HVS_READ(SCALER_DISPDITHER); ret = FIELD_GET(SCALER_DISPDITHER_DSP5_MUX_MASK, reg); if (ret == 3) return -EPIPE; return ret; default: return -EPIPE; } } static int vc4_hvs_init_channel(struct vc4_hvs *hvs, struct drm_crtc *crtc, struct drm_display_mode *mode, bool oneshot) { struct vc4_dev *vc4 = hvs->vc4; struct drm_device *drm = &vc4->base; struct vc4_crtc *vc4_crtc = to_vc4_crtc(crtc); struct vc4_crtc_state *vc4_crtc_state = to_vc4_crtc_state(crtc->state); unsigned int chan = vc4_crtc_state->assigned_channel; bool interlace = mode->flags & DRM_MODE_FLAG_INTERLACE; u32 dispbkgndx; u32 dispctrl; int idx; if (!drm_dev_enter(drm, &idx)) return -ENODEV; HVS_WRITE(SCALER_DISPCTRLX(chan), 0); HVS_WRITE(SCALER_DISPCTRLX(chan), SCALER_DISPCTRLX_RESET); HVS_WRITE(SCALER_DISPCTRLX(chan), 0); /* Turn on the scaler, which will wait for vstart to start * compositing. * When feeding the transposer, we should operate in oneshot * mode. */ dispctrl = SCALER_DISPCTRLX_ENABLE; dispbkgndx = HVS_READ(SCALER_DISPBKGNDX(chan)); if (!vc4->is_vc5) { dispctrl |= VC4_SET_FIELD(mode->hdisplay, SCALER_DISPCTRLX_WIDTH) | VC4_SET_FIELD(mode->vdisplay, SCALER_DISPCTRLX_HEIGHT) | (oneshot ? SCALER_DISPCTRLX_ONESHOT : 0); dispbkgndx |= SCALER_DISPBKGND_AUTOHS; } else { dispctrl |= VC4_SET_FIELD(mode->hdisplay, SCALER5_DISPCTRLX_WIDTH) | VC4_SET_FIELD(mode->vdisplay, SCALER5_DISPCTRLX_HEIGHT) | (oneshot ? SCALER5_DISPCTRLX_ONESHOT : 0); dispbkgndx &= ~SCALER5_DISPBKGND_BCK2BCK; } HVS_WRITE(SCALER_DISPCTRLX(chan), dispctrl); dispbkgndx &= ~SCALER_DISPBKGND_GAMMA; dispbkgndx &= ~SCALER_DISPBKGND_INTERLACE; HVS_WRITE(SCALER_DISPBKGNDX(chan), dispbkgndx | ((!vc4->is_vc5) ? SCALER_DISPBKGND_GAMMA : 0) | (interlace ? SCALER_DISPBKGND_INTERLACE : 0)); /* Reload the LUT, since the SRAMs would have been disabled if * all CRTCs had SCALER_DISPBKGND_GAMMA unset at once. */ vc4_hvs_lut_load(hvs, vc4_crtc); drm_dev_exit(idx); return 0; } void vc4_hvs_stop_channel(struct vc4_hvs *hvs, unsigned int chan) { struct drm_device *drm = &hvs->vc4->base; int idx; if (!drm_dev_enter(drm, &idx)) return; if (HVS_READ(SCALER_DISPCTRLX(chan)) & SCALER_DISPCTRLX_ENABLE) goto out; HVS_WRITE(SCALER_DISPCTRLX(chan), HVS_READ(SCALER_DISPCTRLX(chan)) | SCALER_DISPCTRLX_RESET); HVS_WRITE(SCALER_DISPCTRLX(chan), HVS_READ(SCALER_DISPCTRLX(chan)) & ~SCALER_DISPCTRLX_ENABLE); /* Once we leave, the scaler should be disabled and its fifo empty. */ WARN_ON_ONCE(HVS_READ(SCALER_DISPCTRLX(chan)) & SCALER_DISPCTRLX_RESET); WARN_ON_ONCE(VC4_GET_FIELD(HVS_READ(SCALER_DISPSTATX(chan)), SCALER_DISPSTATX_MODE) != SCALER_DISPSTATX_MODE_DISABLED); WARN_ON_ONCE((HVS_READ(SCALER_DISPSTATX(chan)) & (SCALER_DISPSTATX_FULL | SCALER_DISPSTATX_EMPTY)) != SCALER_DISPSTATX_EMPTY); out: drm_dev_exit(idx); } int vc4_hvs_atomic_check(struct drm_crtc *crtc, struct drm_atomic_state *state) { struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state, crtc); struct vc4_crtc_state *vc4_state = to_vc4_crtc_state(crtc_state); struct drm_device *dev = crtc->dev; struct vc4_dev *vc4 = to_vc4_dev(dev); struct drm_plane *plane; unsigned long flags; const struct drm_plane_state *plane_state; u32 dlist_count = 0; int ret; /* The pixelvalve can only feed one encoder (and encoders are * 1:1 with connectors.) */ if (hweight32(crtc_state->connector_mask) > 1) return -EINVAL; drm_atomic_crtc_state_for_each_plane_state(plane, plane_state, crtc_state) dlist_count += vc4_plane_dlist_size(plane_state); dlist_count++; /* Account for SCALER_CTL0_END. */ spin_lock_irqsave(&vc4->hvs->mm_lock, flags); ret = drm_mm_insert_node(&vc4->hvs->dlist_mm, &vc4_state->mm, dlist_count); spin_unlock_irqrestore(&vc4->hvs->mm_lock, flags); if (ret) return ret; return 0; } static void vc4_hvs_install_dlist(struct drm_crtc *crtc) { struct drm_device *dev = crtc->dev; struct vc4_dev *vc4 = to_vc4_dev(dev); struct vc4_hvs *hvs = vc4->hvs; struct vc4_crtc_state *vc4_state = to_vc4_crtc_state(crtc->state); int idx; if (!drm_dev_enter(dev, &idx)) return; HVS_WRITE(SCALER_DISPLISTX(vc4_state->assigned_channel), vc4_state->mm.start); drm_dev_exit(idx); } static void vc4_hvs_update_dlist(struct drm_crtc *crtc) { struct drm_device *dev = crtc->dev; struct vc4_crtc *vc4_crtc = to_vc4_crtc(crtc); struct vc4_crtc_state *vc4_state = to_vc4_crtc_state(crtc->state); unsigned long flags; if (crtc->state->event) { crtc->state->event->pipe = drm_crtc_index(crtc); WARN_ON(drm_crtc_vblank_get(crtc) != 0); spin_lock_irqsave(&dev->event_lock, flags); if (!vc4_crtc->feeds_txp || vc4_state->txp_armed) { vc4_crtc->event = crtc->state->event; crtc->state->event = NULL; } spin_unlock_irqrestore(&dev->event_lock, flags); } spin_lock_irqsave(&vc4_crtc->irq_lock, flags); vc4_crtc->current_dlist = vc4_state->mm.start; spin_unlock_irqrestore(&vc4_crtc->irq_lock, flags); } void vc4_hvs_atomic_begin(struct drm_crtc *crtc, struct drm_atomic_state *state) { struct vc4_crtc *vc4_crtc = to_vc4_crtc(crtc); struct vc4_crtc_state *vc4_state = to_vc4_crtc_state(crtc->state); unsigned long flags; spin_lock_irqsave(&vc4_crtc->irq_lock, flags); vc4_crtc->current_hvs_channel = vc4_state->assigned_channel; spin_unlock_irqrestore(&vc4_crtc->irq_lock, flags); } void vc4_hvs_atomic_enable(struct drm_crtc *crtc, struct drm_atomic_state *state) { struct drm_device *dev = crtc->dev; struct vc4_dev *vc4 = to_vc4_dev(dev); struct drm_display_mode *mode = &crtc->state->adjusted_mode; struct vc4_crtc *vc4_crtc = to_vc4_crtc(crtc); bool oneshot = vc4_crtc->feeds_txp; vc4_hvs_install_dlist(crtc); vc4_hvs_update_dlist(crtc); vc4_hvs_init_channel(vc4->hvs, crtc, mode, oneshot); } void vc4_hvs_atomic_disable(struct drm_crtc *crtc, struct drm_atomic_state *state) { struct drm_device *dev = crtc->dev; struct vc4_dev *vc4 = to_vc4_dev(dev); struct drm_crtc_state *old_state = drm_atomic_get_old_crtc_state(state, crtc); struct vc4_crtc_state *vc4_state = to_vc4_crtc_state(old_state); unsigned int chan = vc4_state->assigned_channel; vc4_hvs_stop_channel(vc4->hvs, chan); } void vc4_hvs_atomic_flush(struct drm_crtc *crtc, struct drm_atomic_state *state) { struct drm_crtc_state *old_state = drm_atomic_get_old_crtc_state(state, crtc); struct drm_device *dev = crtc->dev; struct vc4_dev *vc4 = to_vc4_dev(dev); struct vc4_hvs *hvs = vc4->hvs; struct vc4_crtc *vc4_crtc = to_vc4_crtc(crtc); struct vc4_crtc_state *vc4_state = to_vc4_crtc_state(crtc->state); unsigned int channel = vc4_state->assigned_channel; struct drm_plane *plane; struct vc4_plane_state *vc4_plane_state; bool debug_dump_regs = false; bool enable_bg_fill = false; u32 __iomem *dlist_start = vc4->hvs->dlist + vc4_state->mm.start; u32 __iomem *dlist_next = dlist_start; unsigned int zpos = 0; bool found = false; int idx; if (!drm_dev_enter(dev, &idx)) { vc4_crtc_send_vblank(crtc); return; } if (vc4_state->assigned_channel == VC4_HVS_CHANNEL_DISABLED) return; if (debug_dump_regs) { DRM_INFO("CRTC %d HVS before:\n", drm_crtc_index(crtc)); vc4_hvs_dump_state(hvs); } /* Copy all the active planes' dlist contents to the hardware dlist. */ do { found = false; drm_atomic_crtc_for_each_plane(plane, crtc) { if (plane->state->normalized_zpos != zpos) continue; /* Is this the first active plane? */ if (dlist_next == dlist_start) { /* We need to enable background fill when a plane * could be alpha blending from the background, i.e. * where no other plane is underneath. It suffices to * consider the first active plane here since we set * needs_bg_fill such that either the first plane * already needs it or all planes on top blend from * the first or a lower plane. */ vc4_plane_state = to_vc4_plane_state(plane->state); enable_bg_fill = vc4_plane_state->needs_bg_fill; } dlist_next += vc4_plane_write_dlist(plane, dlist_next); found = true; } zpos++; } while (found); writel(SCALER_CTL0_END, dlist_next); dlist_next++; WARN_ON_ONCE(dlist_next - dlist_start != vc4_state->mm.size); if (enable_bg_fill) /* This sets a black background color fill, as is the case * with other DRM drivers. */ HVS_WRITE(SCALER_DISPBKGNDX(channel), HVS_READ(SCALER_DISPBKGNDX(channel)) | SCALER_DISPBKGND_FILL); /* Only update DISPLIST if the CRTC was already running and is not * being disabled. * vc4_crtc_enable() takes care of updating the dlist just after * re-enabling VBLANK interrupts and before enabling the engine. * If the CRTC is being disabled, there's no point in updating this * information. */ if (crtc->state->active && old_state->active) { vc4_hvs_install_dlist(crtc); vc4_hvs_update_dlist(crtc); } if (crtc->state->color_mgmt_changed) { u32 dispbkgndx = HVS_READ(SCALER_DISPBKGNDX(channel)); if (crtc->state->gamma_lut) { vc4_hvs_update_gamma_lut(hvs, vc4_crtc); dispbkgndx |= SCALER_DISPBKGND_GAMMA; } else { /* Unsetting DISPBKGND_GAMMA skips the gamma lut step * in hardware, which is the same as a linear lut that * DRM expects us to use in absence of a user lut. */ dispbkgndx &= ~SCALER_DISPBKGND_GAMMA; } HVS_WRITE(SCALER_DISPBKGNDX(channel), dispbkgndx); } if (debug_dump_regs) { DRM_INFO("CRTC %d HVS after:\n", drm_crtc_index(crtc)); vc4_hvs_dump_state(hvs); } drm_dev_exit(idx); } void vc4_hvs_mask_underrun(struct vc4_hvs *hvs, int channel) { struct drm_device *drm = &hvs->vc4->base; u32 dispctrl; int idx; if (!drm_dev_enter(drm, &idx)) return; dispctrl = HVS_READ(SCALER_DISPCTRL); dispctrl &= ~(hvs->vc4->is_vc5 ? SCALER5_DISPCTRL_DSPEISLUR(channel) : SCALER_DISPCTRL_DSPEISLUR(channel)); HVS_WRITE(SCALER_DISPCTRL, dispctrl); drm_dev_exit(idx); } void vc4_hvs_unmask_underrun(struct vc4_hvs *hvs, int channel) { struct drm_device *drm = &hvs->vc4->base; u32 dispctrl; int idx; if (!drm_dev_enter(drm, &idx)) return; dispctrl = HVS_READ(SCALER_DISPCTRL); dispctrl |= (hvs->vc4->is_vc5 ? SCALER5_DISPCTRL_DSPEISLUR(channel) : SCALER_DISPCTRL_DSPEISLUR(channel)); HVS_WRITE(SCALER_DISPSTAT, SCALER_DISPSTAT_EUFLOW(channel)); HVS_WRITE(SCALER_DISPCTRL, dispctrl); drm_dev_exit(idx); } static void vc4_hvs_report_underrun(struct drm_device *dev) { struct vc4_dev *vc4 = to_vc4_dev(dev); atomic_inc(&vc4->underrun); DRM_DEV_ERROR(dev->dev, "HVS underrun\n"); } static irqreturn_t vc4_hvs_irq_handler(int irq, void *data) { struct drm_device *dev = data; struct vc4_dev *vc4 = to_vc4_dev(dev); struct vc4_hvs *hvs = vc4->hvs; irqreturn_t irqret = IRQ_NONE; int channel; u32 control; u32 status; u32 dspeislur; /* * NOTE: We don't need to protect the register access using * drm_dev_enter() there because the interrupt handler lifetime * is tied to the device itself, and not to the DRM device. * * So when the device will be gone, one of the first thing we * will be doing will be to unregister the interrupt handler, * and then unregister the DRM device. drm_dev_enter() would * thus always succeed if we are here. */ status = HVS_READ(SCALER_DISPSTAT); control = HVS_READ(SCALER_DISPCTRL); for (channel = 0; channel < SCALER_CHANNELS_COUNT; channel++) { dspeislur = vc4->is_vc5 ? SCALER5_DISPCTRL_DSPEISLUR(channel) : SCALER_DISPCTRL_DSPEISLUR(channel); /* Interrupt masking is not always honored, so check it here. */ if (status & SCALER_DISPSTAT_EUFLOW(channel) && control & dspeislur) { vc4_hvs_mask_underrun(hvs, channel); vc4_hvs_report_underrun(dev); irqret = IRQ_HANDLED; } } /* Clear every per-channel interrupt flag. */ HVS_WRITE(SCALER_DISPSTAT, SCALER_DISPSTAT_IRQMASK(0) | SCALER_DISPSTAT_IRQMASK(1) | SCALER_DISPSTAT_IRQMASK(2)); return irqret; } int vc4_hvs_debugfs_init(struct drm_minor *minor) { struct drm_device *drm = minor->dev; struct vc4_dev *vc4 = to_vc4_dev(drm); struct vc4_hvs *hvs = vc4->hvs; if (!vc4->hvs) return -ENODEV; if (!vc4->is_vc5) debugfs_create_bool("hvs_load_tracker", S_IRUGO | S_IWUSR, minor->debugfs_root, &vc4->load_tracker_enabled); drm_debugfs_add_file(drm, "hvs_dlists", vc4_hvs_debugfs_dlist, NULL); drm_debugfs_add_file(drm, "hvs_underrun", vc4_hvs_debugfs_underrun, NULL); vc4_debugfs_add_regset32(drm, "hvs_regs", &hvs->regset); return 0; } struct vc4_hvs *__vc4_hvs_alloc(struct vc4_dev *vc4, struct platform_device *pdev) { struct drm_device *drm = &vc4->base; struct vc4_hvs *hvs; hvs = drmm_kzalloc(drm, sizeof(*hvs), GFP_KERNEL); if (!hvs) return ERR_PTR(-ENOMEM); hvs->vc4 = vc4; hvs->pdev = pdev; spin_lock_init(&hvs->mm_lock); /* Set up the HVS display list memory manager. We never * overwrite the setup from the bootloader (just 128b out of * our 16K), since we don't want to scramble the screen when * transitioning from the firmware's boot setup to runtime. */ drm_mm_init(&hvs->dlist_mm, HVS_BOOTLOADER_DLIST_END, (SCALER_DLIST_SIZE >> 2) - HVS_BOOTLOADER_DLIST_END); /* Set up the HVS LBM memory manager. We could have some more * complicated data structure that allowed reuse of LBM areas * between planes when they don't overlap on the screen, but * for now we just allocate globally. */ if (!vc4->is_vc5) /* 48k words of 2x12-bit pixels */ drm_mm_init(&hvs->lbm_mm, 0, 48 * 1024); else /* 60k words of 4x12-bit pixels */ drm_mm_init(&hvs->lbm_mm, 0, 60 * 1024); vc4->hvs = hvs; return hvs; } static int vc4_hvs_bind(struct device *dev, struct device *master, void *data) { struct platform_device *pdev = to_platform_device(dev); struct drm_device *drm = dev_get_drvdata(master); struct vc4_dev *vc4 = to_vc4_dev(drm); struct vc4_hvs *hvs = NULL; int ret; u32 dispctrl; u32 reg, top; hvs = __vc4_hvs_alloc(vc4, NULL); if (IS_ERR(hvs)) return PTR_ERR(hvs); hvs->regs = vc4_ioremap_regs(pdev, 0); if (IS_ERR(hvs->regs)) return PTR_ERR(hvs->regs); hvs->regset.base = hvs->regs; hvs->regset.regs = hvs_regs; hvs->regset.nregs = ARRAY_SIZE(hvs_regs); if (vc4->is_vc5) { struct rpi_firmware *firmware; struct device_node *node; unsigned int max_rate; node = rpi_firmware_find_node(); if (!node) return -EINVAL; firmware = rpi_firmware_get(node); of_node_put(node); if (!firmware) return -EPROBE_DEFER; hvs->core_clk = devm_clk_get(&pdev->dev, NULL); if (IS_ERR(hvs->core_clk)) { dev_err(&pdev->dev, "Couldn't get core clock\n"); return PTR_ERR(hvs->core_clk); } max_rate = rpi_firmware_clk_get_max_rate(firmware, RPI_FIRMWARE_CORE_CLK_ID); rpi_firmware_put(firmware); if (max_rate >= 550000000) hvs->vc5_hdmi_enable_hdmi_20 = true; if (max_rate >= 600000000) hvs->vc5_hdmi_enable_4096by2160 = true; hvs->max_core_rate = max_rate; ret = clk_prepare_enable(hvs->core_clk); if (ret) { dev_err(&pdev->dev, "Couldn't enable the core clock\n"); return ret; } } if (!vc4->is_vc5) hvs->dlist = hvs->regs + SCALER_DLIST_START; else hvs->dlist = hvs->regs + SCALER5_DLIST_START; /* Upload filter kernels. We only have the one for now, so we * keep it around for the lifetime of the driver. */ ret = vc4_hvs_upload_linear_kernel(hvs, &hvs->mitchell_netravali_filter, mitchell_netravali_1_3_1_3_kernel); if (ret) return ret; reg = HVS_READ(SCALER_DISPECTRL); reg &= ~SCALER_DISPECTRL_DSP2_MUX_MASK; HVS_WRITE(SCALER_DISPECTRL, reg | VC4_SET_FIELD(0, SCALER_DISPECTRL_DSP2_MUX)); reg = HVS_READ(SCALER_DISPCTRL); reg &= ~SCALER_DISPCTRL_DSP3_MUX_MASK; HVS_WRITE(SCALER_DISPCTRL, reg | VC4_SET_FIELD(3, SCALER_DISPCTRL_DSP3_MUX)); reg = HVS_READ(SCALER_DISPEOLN); reg &= ~SCALER_DISPEOLN_DSP4_MUX_MASK; HVS_WRITE(SCALER_DISPEOLN, reg | VC4_SET_FIELD(3, SCALER_DISPEOLN_DSP4_MUX)); reg = HVS_READ(SCALER_DISPDITHER); reg &= ~SCALER_DISPDITHER_DSP5_MUX_MASK; HVS_WRITE(SCALER_DISPDITHER, reg | VC4_SET_FIELD(3, SCALER_DISPDITHER_DSP5_MUX)); dispctrl = HVS_READ(SCALER_DISPCTRL); dispctrl |= SCALER_DISPCTRL_ENABLE; dispctrl |= SCALER_DISPCTRL_DISPEIRQ(0) | SCALER_DISPCTRL_DISPEIRQ(1) | SCALER_DISPCTRL_DISPEIRQ(2); if (!vc4->is_vc5) dispctrl &= ~(SCALER_DISPCTRL_DMAEIRQ | SCALER_DISPCTRL_SLVWREIRQ | SCALER_DISPCTRL_SLVRDEIRQ | SCALER_DISPCTRL_DSPEIEOF(0) | SCALER_DISPCTRL_DSPEIEOF(1) | SCALER_DISPCTRL_DSPEIEOF(2) | SCALER_DISPCTRL_DSPEIEOLN(0) | SCALER_DISPCTRL_DSPEIEOLN(1) | SCALER_DISPCTRL_DSPEIEOLN(2) | SCALER_DISPCTRL_DSPEISLUR(0) | SCALER_DISPCTRL_DSPEISLUR(1) | SCALER_DISPCTRL_DSPEISLUR(2) | SCALER_DISPCTRL_SCLEIRQ); else dispctrl &= ~(SCALER_DISPCTRL_DMAEIRQ | SCALER5_DISPCTRL_SLVEIRQ | SCALER5_DISPCTRL_DSPEIEOF(0) | SCALER5_DISPCTRL_DSPEIEOF(1) | SCALER5_DISPCTRL_DSPEIEOF(2) | SCALER5_DISPCTRL_DSPEIEOLN(0) | SCALER5_DISPCTRL_DSPEIEOLN(1) | SCALER5_DISPCTRL_DSPEIEOLN(2) | SCALER5_DISPCTRL_DSPEISLUR(0) | SCALER5_DISPCTRL_DSPEISLUR(1) | SCALER5_DISPCTRL_DSPEISLUR(2) | SCALER_DISPCTRL_SCLEIRQ); /* Set AXI panic mode. * VC4 panics when < 2 lines in FIFO. * VC5 panics when less than 1 line in the FIFO. */ dispctrl &= ~(SCALER_DISPCTRL_PANIC0_MASK | SCALER_DISPCTRL_PANIC1_MASK | SCALER_DISPCTRL_PANIC2_MASK); dispctrl |= VC4_SET_FIELD(2, SCALER_DISPCTRL_PANIC0); dispctrl |= VC4_SET_FIELD(2, SCALER_DISPCTRL_PANIC1); dispctrl |= VC4_SET_FIELD(2, SCALER_DISPCTRL_PANIC2); HVS_WRITE(SCALER_DISPCTRL, dispctrl); /* Recompute Composite Output Buffer (COB) allocations for the displays */ if (!vc4->is_vc5) { /* The COB is 20736 pixels, or just over 10 lines at 2048 wide. * The bottom 2048 pixels are full 32bpp RGBA (intended for the * TXP composing RGBA to memory), whilst the remainder are only * 24bpp RGB. * * Assign 3 lines to channels 1 & 2, and just over 4 lines to * channel 0. */ #define VC4_COB_SIZE 20736 #define VC4_COB_LINE_WIDTH 2048 #define VC4_COB_NUM_LINES 3 reg = 0; top = VC4_COB_LINE_WIDTH * VC4_COB_NUM_LINES; reg |= (top - 1) << 16; HVS_WRITE(SCALER_DISPBASE2, reg); reg = top; top += VC4_COB_LINE_WIDTH * VC4_COB_NUM_LINES; reg |= (top - 1) << 16; HVS_WRITE(SCALER_DISPBASE1, reg); reg = top; top = VC4_COB_SIZE; reg |= (top - 1) << 16; HVS_WRITE(SCALER_DISPBASE0, reg); } else { /* The COB is 44416 pixels, or 10.8 lines at 4096 wide. * The bottom 4096 pixels are full RGBA (intended for the TXP * composing RGBA to memory), whilst the remainder are only * RGB. Addressing is always pixel wide. * * Assign 3 lines of 4096 to channels 1 & 2, and just over 4 * lines. to channel 0. */ #define VC5_COB_SIZE 44416 #define VC5_COB_LINE_WIDTH 4096 #define VC5_COB_NUM_LINES 3 reg = 0; top = VC5_COB_LINE_WIDTH * VC5_COB_NUM_LINES; reg |= top << 16; HVS_WRITE(SCALER_DISPBASE2, reg); top += 16; reg = top; top += VC5_COB_LINE_WIDTH * VC5_COB_NUM_LINES; reg |= top << 16; HVS_WRITE(SCALER_DISPBASE1, reg); top += 16; reg = top; top = VC5_COB_SIZE; reg |= top << 16; HVS_WRITE(SCALER_DISPBASE0, reg); } ret = devm_request_irq(dev, platform_get_irq(pdev, 0), vc4_hvs_irq_handler, 0, "vc4 hvs", drm); if (ret) return ret; return 0; } static void vc4_hvs_unbind(struct device *dev, struct device *master, void *data) { struct drm_device *drm = dev_get_drvdata(master); struct vc4_dev *vc4 = to_vc4_dev(drm); struct vc4_hvs *hvs = vc4->hvs; struct drm_mm_node *node, *next; if (drm_mm_node_allocated(&vc4->hvs->mitchell_netravali_filter)) drm_mm_remove_node(&vc4->hvs->mitchell_netravali_filter); drm_mm_for_each_node_safe(node, next, &vc4->hvs->dlist_mm) drm_mm_remove_node(node); drm_mm_takedown(&vc4->hvs->dlist_mm); drm_mm_for_each_node_safe(node, next, &vc4->hvs->lbm_mm) drm_mm_remove_node(node); drm_mm_takedown(&vc4->hvs->lbm_mm); clk_disable_unprepare(hvs->core_clk); vc4->hvs = NULL; } static const struct component_ops vc4_hvs_ops = { .bind = vc4_hvs_bind, .unbind = vc4_hvs_unbind, }; static int vc4_hvs_dev_probe(struct platform_device *pdev) { return component_add(&pdev->dev, &vc4_hvs_ops); } static void vc4_hvs_dev_remove(struct platform_device *pdev) { component_del(&pdev->dev, &vc4_hvs_ops); } static const struct of_device_id vc4_hvs_dt_match[] = { { .compatible = "brcm,bcm2711-hvs" }, { .compatible = "brcm,bcm2835-hvs" }, {} }; struct platform_driver vc4_hvs_driver = { .probe = vc4_hvs_dev_probe, .remove_new = vc4_hvs_dev_remove, .driver = { .name = "vc4_hvs", .of_match_table = vc4_hvs_dt_match, }, };
linux-master
drivers/gpu/drm/vc4/vc4_hvs.c
// SPDX-License-Identifier: GPL-2.0-only /* * Copyright (C) 2016 Broadcom */ /** * DOC: VC4 SDTV module * * The VEC encoder generates PAL or NTSC composite video output. * * TV mode selection is done by an atomic property on the encoder, * because a drm_mode_modeinfo is insufficient to distinguish between * PAL and PAL-M or NTSC and NTSC-J. */ #include <drm/drm_atomic_helper.h> #include <drm/drm_drv.h> #include <drm/drm_edid.h> #include <drm/drm_panel.h> #include <drm/drm_probe_helper.h> #include <drm/drm_simple_kms_helper.h> #include <linux/clk.h> #include <linux/component.h> #include <linux/of.h> #include <linux/platform_device.h> #include <linux/pm_runtime.h> #include "vc4_drv.h" #include "vc4_regs.h" /* WSE Registers */ #define VEC_WSE_RESET 0xc0 #define VEC_WSE_CONTROL 0xc4 #define VEC_WSE_WSS_ENABLE BIT(7) #define VEC_WSE_WSS_DATA 0xc8 #define VEC_WSE_VPS_DATA1 0xcc #define VEC_WSE_VPS_CONTROL 0xd0 /* VEC Registers */ #define VEC_REVID 0x100 #define VEC_CONFIG0 0x104 #define VEC_CONFIG0_YDEL_MASK GENMASK(28, 26) #define VEC_CONFIG0_YDEL(x) ((x) << 26) #define VEC_CONFIG0_CDEL_MASK GENMASK(25, 24) #define VEC_CONFIG0_CDEL(x) ((x) << 24) #define VEC_CONFIG0_SECAM_STD BIT(21) #define VEC_CONFIG0_PBPR_FIL BIT(18) #define VEC_CONFIG0_CHROMA_GAIN_MASK GENMASK(17, 16) #define VEC_CONFIG0_CHROMA_GAIN_UNITY (0 << 16) #define VEC_CONFIG0_CHROMA_GAIN_1_32 (1 << 16) #define VEC_CONFIG0_CHROMA_GAIN_1_16 (2 << 16) #define VEC_CONFIG0_CHROMA_GAIN_1_8 (3 << 16) #define VEC_CONFIG0_CBURST_GAIN_MASK GENMASK(14, 13) #define VEC_CONFIG0_CBURST_GAIN_UNITY (0 << 13) #define VEC_CONFIG0_CBURST_GAIN_1_128 (1 << 13) #define VEC_CONFIG0_CBURST_GAIN_1_64 (2 << 13) #define VEC_CONFIG0_CBURST_GAIN_1_32 (3 << 13) #define VEC_CONFIG0_CHRBW1 BIT(11) #define VEC_CONFIG0_CHRBW0 BIT(10) #define VEC_CONFIG0_SYNCDIS BIT(9) #define VEC_CONFIG0_BURDIS BIT(8) #define VEC_CONFIG0_CHRDIS BIT(7) #define VEC_CONFIG0_PDEN BIT(6) #define VEC_CONFIG0_YCDELAY BIT(4) #define VEC_CONFIG0_RAMPEN BIT(2) #define VEC_CONFIG0_YCDIS BIT(2) #define VEC_CONFIG0_STD_MASK GENMASK(1, 0) #define VEC_CONFIG0_NTSC_STD 0 #define VEC_CONFIG0_PAL_BDGHI_STD 1 #define VEC_CONFIG0_PAL_M_STD 2 #define VEC_CONFIG0_PAL_N_STD 3 #define VEC_SCHPH 0x108 #define VEC_SOFT_RESET 0x10c #define VEC_CLMP0_START 0x144 #define VEC_CLMP0_END 0x148 /* * These set the color subcarrier frequency * if VEC_CONFIG1_CUSTOM_FREQ is enabled. * * VEC_FREQ1_0 contains the most significant 16-bit half-word, * VEC_FREQ3_2 contains the least significant 16-bit half-word. * 0x80000000 seems to be equivalent to the pixel clock * (which itself is the VEC clock divided by 8). * * Reference values (with the default pixel clock of 13.5 MHz): * * NTSC (3579545.[45] Hz) - 0x21F07C1F * PAL (4433618.75 Hz) - 0x2A098ACB * PAL-M (3575611.[888111] Hz) - 0x21E6EFE3 * PAL-N (3582056.25 Hz) - 0x21F69446 * * NOTE: For SECAM, it is used as the Dr center frequency, * regardless of whether VEC_CONFIG1_CUSTOM_FREQ is enabled or not; * that is specified as 4406250 Hz, which corresponds to 0x29C71C72. */ #define VEC_FREQ3_2 0x180 #define VEC_FREQ1_0 0x184 #define VEC_CONFIG1 0x188 #define VEC_CONFIG_VEC_RESYNC_OFF BIT(18) #define VEC_CONFIG_RGB219 BIT(17) #define VEC_CONFIG_CBAR_EN BIT(16) #define VEC_CONFIG_TC_OBB BIT(15) #define VEC_CONFIG1_OUTPUT_MODE_MASK GENMASK(12, 10) #define VEC_CONFIG1_C_Y_CVBS (0 << 10) #define VEC_CONFIG1_CVBS_Y_C (1 << 10) #define VEC_CONFIG1_PR_Y_PB (2 << 10) #define VEC_CONFIG1_RGB (4 << 10) #define VEC_CONFIG1_Y_C_CVBS (5 << 10) #define VEC_CONFIG1_C_CVBS_Y (6 << 10) #define VEC_CONFIG1_C_CVBS_CVBS (7 << 10) #define VEC_CONFIG1_DIS_CHR BIT(9) #define VEC_CONFIG1_DIS_LUMA BIT(8) #define VEC_CONFIG1_YCBCR_IN BIT(6) #define VEC_CONFIG1_DITHER_TYPE_LFSR 0 #define VEC_CONFIG1_DITHER_TYPE_COUNTER BIT(5) #define VEC_CONFIG1_DITHER_EN BIT(4) #define VEC_CONFIG1_CYDELAY BIT(3) #define VEC_CONFIG1_LUMADIS BIT(2) #define VEC_CONFIG1_COMPDIS BIT(1) #define VEC_CONFIG1_CUSTOM_FREQ BIT(0) #define VEC_CONFIG2 0x18c #define VEC_CONFIG2_PROG_SCAN BIT(15) #define VEC_CONFIG2_SYNC_ADJ_MASK GENMASK(14, 12) #define VEC_CONFIG2_SYNC_ADJ(x) (((x) / 2) << 12) #define VEC_CONFIG2_PBPR_EN BIT(10) #define VEC_CONFIG2_UV_DIG_DIS BIT(6) #define VEC_CONFIG2_RGB_DIG_DIS BIT(5) #define VEC_CONFIG2_TMUX_MASK GENMASK(3, 2) #define VEC_CONFIG2_TMUX_DRIVE0 (0 << 2) #define VEC_CONFIG2_TMUX_RG_COMP (1 << 2) #define VEC_CONFIG2_TMUX_UV_YC (2 << 2) #define VEC_CONFIG2_TMUX_SYNC_YC (3 << 2) #define VEC_INTERRUPT_CONTROL 0x190 #define VEC_INTERRUPT_STATUS 0x194 /* * Db center frequency for SECAM; the clock for this is the same as for * VEC_FREQ3_2/VEC_FREQ1_0, which is used for Dr center frequency. * * This is specified as 4250000 Hz, which corresponds to 0x284BDA13. * That is also the default value, so no need to set it explicitly. */ #define VEC_FCW_SECAM_B 0x198 #define VEC_SECAM_GAIN_VAL 0x19c #define VEC_CONFIG3 0x1a0 #define VEC_CONFIG3_HORIZ_LEN_STD (0 << 0) #define VEC_CONFIG3_HORIZ_LEN_MPEG1_SIF (1 << 0) #define VEC_CONFIG3_SHAPE_NON_LINEAR BIT(1) #define VEC_STATUS0 0x200 #define VEC_MASK0 0x204 #define VEC_CFG 0x208 #define VEC_CFG_SG_MODE_MASK GENMASK(6, 5) #define VEC_CFG_SG_MODE(x) ((x) << 5) #define VEC_CFG_SG_EN BIT(4) #define VEC_CFG_VEC_EN BIT(3) #define VEC_CFG_MB_EN BIT(2) #define VEC_CFG_ENABLE BIT(1) #define VEC_CFG_TB_EN BIT(0) #define VEC_DAC_TEST 0x20c #define VEC_DAC_CONFIG 0x210 #define VEC_DAC_CONFIG_LDO_BIAS_CTRL(x) ((x) << 24) #define VEC_DAC_CONFIG_DRIVER_CTRL(x) ((x) << 16) #define VEC_DAC_CONFIG_DAC_CTRL(x) (x) #define VEC_DAC_MISC 0x214 #define VEC_DAC_MISC_VCD_CTRL_MASK GENMASK(31, 16) #define VEC_DAC_MISC_VCD_CTRL(x) ((x) << 16) #define VEC_DAC_MISC_VID_ACT BIT(8) #define VEC_DAC_MISC_VCD_PWRDN BIT(6) #define VEC_DAC_MISC_BIAS_PWRDN BIT(5) #define VEC_DAC_MISC_DAC_PWRDN BIT(2) #define VEC_DAC_MISC_LDO_PWRDN BIT(1) #define VEC_DAC_MISC_DAC_RST_N BIT(0) struct vc4_vec_variant { u32 dac_config; }; /* General VEC hardware state. */ struct vc4_vec { struct vc4_encoder encoder; struct drm_connector connector; struct platform_device *pdev; const struct vc4_vec_variant *variant; void __iomem *regs; struct clk *clock; struct drm_property *legacy_tv_mode_property; struct debugfs_regset32 regset; }; #define VEC_READ(offset) \ ({ \ kunit_fail_current_test("Accessing a register in a unit test!\n"); \ readl(vec->regs + (offset)); \ }) #define VEC_WRITE(offset, val) \ do { \ kunit_fail_current_test("Accessing a register in a unit test!\n"); \ writel(val, vec->regs + (offset)); \ } while (0) #define encoder_to_vc4_vec(_encoder) \ container_of_const(_encoder, struct vc4_vec, encoder.base) #define connector_to_vc4_vec(_connector) \ container_of_const(_connector, struct vc4_vec, connector) enum vc4_vec_tv_mode_id { VC4_VEC_TV_MODE_NTSC, VC4_VEC_TV_MODE_NTSC_J, VC4_VEC_TV_MODE_PAL, VC4_VEC_TV_MODE_PAL_M, VC4_VEC_TV_MODE_NTSC_443, VC4_VEC_TV_MODE_PAL_60, VC4_VEC_TV_MODE_PAL_N, VC4_VEC_TV_MODE_SECAM, }; struct vc4_vec_tv_mode { unsigned int mode; u16 expected_htotal; u32 config0; u32 config1; u32 custom_freq; }; static const struct debugfs_reg32 vec_regs[] = { VC4_REG32(VEC_WSE_CONTROL), VC4_REG32(VEC_WSE_WSS_DATA), VC4_REG32(VEC_WSE_VPS_DATA1), VC4_REG32(VEC_WSE_VPS_CONTROL), VC4_REG32(VEC_REVID), VC4_REG32(VEC_CONFIG0), VC4_REG32(VEC_SCHPH), VC4_REG32(VEC_CLMP0_START), VC4_REG32(VEC_CLMP0_END), VC4_REG32(VEC_FREQ3_2), VC4_REG32(VEC_FREQ1_0), VC4_REG32(VEC_CONFIG1), VC4_REG32(VEC_CONFIG2), VC4_REG32(VEC_INTERRUPT_CONTROL), VC4_REG32(VEC_INTERRUPT_STATUS), VC4_REG32(VEC_FCW_SECAM_B), VC4_REG32(VEC_SECAM_GAIN_VAL), VC4_REG32(VEC_CONFIG3), VC4_REG32(VEC_STATUS0), VC4_REG32(VEC_MASK0), VC4_REG32(VEC_CFG), VC4_REG32(VEC_DAC_TEST), VC4_REG32(VEC_DAC_CONFIG), VC4_REG32(VEC_DAC_MISC), }; static const struct vc4_vec_tv_mode vc4_vec_tv_modes[] = { { .mode = DRM_MODE_TV_MODE_NTSC, .expected_htotal = 858, .config0 = VEC_CONFIG0_NTSC_STD | VEC_CONFIG0_PDEN, .config1 = VEC_CONFIG1_C_CVBS_CVBS, }, { .mode = DRM_MODE_TV_MODE_NTSC_443, .expected_htotal = 858, .config0 = VEC_CONFIG0_NTSC_STD, .config1 = VEC_CONFIG1_C_CVBS_CVBS | VEC_CONFIG1_CUSTOM_FREQ, .custom_freq = 0x2a098acb, }, { .mode = DRM_MODE_TV_MODE_NTSC_J, .expected_htotal = 858, .config0 = VEC_CONFIG0_NTSC_STD, .config1 = VEC_CONFIG1_C_CVBS_CVBS, }, { .mode = DRM_MODE_TV_MODE_PAL, .expected_htotal = 864, .config0 = VEC_CONFIG0_PAL_BDGHI_STD, .config1 = VEC_CONFIG1_C_CVBS_CVBS, }, { /* PAL-60 */ .mode = DRM_MODE_TV_MODE_PAL, .expected_htotal = 858, .config0 = VEC_CONFIG0_PAL_M_STD, .config1 = VEC_CONFIG1_C_CVBS_CVBS | VEC_CONFIG1_CUSTOM_FREQ, .custom_freq = 0x2a098acb, }, { .mode = DRM_MODE_TV_MODE_PAL_M, .expected_htotal = 858, .config0 = VEC_CONFIG0_PAL_M_STD, .config1 = VEC_CONFIG1_C_CVBS_CVBS, }, { .mode = DRM_MODE_TV_MODE_PAL_N, .expected_htotal = 864, .config0 = VEC_CONFIG0_PAL_N_STD, .config1 = VEC_CONFIG1_C_CVBS_CVBS, }, { .mode = DRM_MODE_TV_MODE_SECAM, .expected_htotal = 864, .config0 = VEC_CONFIG0_SECAM_STD, .config1 = VEC_CONFIG1_C_CVBS_CVBS, .custom_freq = 0x29c71c72, }, }; static inline const struct vc4_vec_tv_mode * vc4_vec_tv_mode_lookup(unsigned int mode, u16 htotal) { unsigned int i; for (i = 0; i < ARRAY_SIZE(vc4_vec_tv_modes); i++) { const struct vc4_vec_tv_mode *tv_mode = &vc4_vec_tv_modes[i]; if (tv_mode->mode == mode && tv_mode->expected_htotal == htotal) return tv_mode; } return NULL; } static const struct drm_prop_enum_list legacy_tv_mode_names[] = { { VC4_VEC_TV_MODE_NTSC, "NTSC", }, { VC4_VEC_TV_MODE_NTSC_443, "NTSC-443", }, { VC4_VEC_TV_MODE_NTSC_J, "NTSC-J", }, { VC4_VEC_TV_MODE_PAL, "PAL", }, { VC4_VEC_TV_MODE_PAL_60, "PAL-60", }, { VC4_VEC_TV_MODE_PAL_M, "PAL-M", }, { VC4_VEC_TV_MODE_PAL_N, "PAL-N", }, { VC4_VEC_TV_MODE_SECAM, "SECAM", }, }; static enum drm_connector_status vc4_vec_connector_detect(struct drm_connector *connector, bool force) { return connector_status_unknown; } static void vc4_vec_connector_reset(struct drm_connector *connector) { drm_atomic_helper_connector_reset(connector); drm_atomic_helper_connector_tv_reset(connector); } static int vc4_vec_connector_set_property(struct drm_connector *connector, struct drm_connector_state *state, struct drm_property *property, uint64_t val) { struct vc4_vec *vec = connector_to_vc4_vec(connector); if (property != vec->legacy_tv_mode_property) return -EINVAL; switch (val) { case VC4_VEC_TV_MODE_NTSC: state->tv.mode = DRM_MODE_TV_MODE_NTSC; break; case VC4_VEC_TV_MODE_NTSC_443: state->tv.mode = DRM_MODE_TV_MODE_NTSC_443; break; case VC4_VEC_TV_MODE_NTSC_J: state->tv.mode = DRM_MODE_TV_MODE_NTSC_J; break; case VC4_VEC_TV_MODE_PAL: case VC4_VEC_TV_MODE_PAL_60: state->tv.mode = DRM_MODE_TV_MODE_PAL; break; case VC4_VEC_TV_MODE_PAL_M: state->tv.mode = DRM_MODE_TV_MODE_PAL_M; break; case VC4_VEC_TV_MODE_PAL_N: state->tv.mode = DRM_MODE_TV_MODE_PAL_N; break; case VC4_VEC_TV_MODE_SECAM: state->tv.mode = DRM_MODE_TV_MODE_SECAM; break; default: return -EINVAL; } return 0; } static int vc4_vec_connector_get_property(struct drm_connector *connector, const struct drm_connector_state *state, struct drm_property *property, uint64_t *val) { struct vc4_vec *vec = connector_to_vc4_vec(connector); if (property != vec->legacy_tv_mode_property) return -EINVAL; switch (state->tv.mode) { case DRM_MODE_TV_MODE_NTSC: *val = VC4_VEC_TV_MODE_NTSC; break; case DRM_MODE_TV_MODE_NTSC_443: *val = VC4_VEC_TV_MODE_NTSC_443; break; case DRM_MODE_TV_MODE_NTSC_J: *val = VC4_VEC_TV_MODE_NTSC_J; break; case DRM_MODE_TV_MODE_PAL: *val = VC4_VEC_TV_MODE_PAL; break; case DRM_MODE_TV_MODE_PAL_M: *val = VC4_VEC_TV_MODE_PAL_M; break; case DRM_MODE_TV_MODE_PAL_N: *val = VC4_VEC_TV_MODE_PAL_N; break; case DRM_MODE_TV_MODE_SECAM: *val = VC4_VEC_TV_MODE_SECAM; break; default: return -EINVAL; } return 0; } static const struct drm_connector_funcs vc4_vec_connector_funcs = { .detect = vc4_vec_connector_detect, .fill_modes = drm_helper_probe_single_connector_modes, .reset = vc4_vec_connector_reset, .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state, .atomic_destroy_state = drm_atomic_helper_connector_destroy_state, .atomic_get_property = vc4_vec_connector_get_property, .atomic_set_property = vc4_vec_connector_set_property, }; static const struct drm_connector_helper_funcs vc4_vec_connector_helper_funcs = { .atomic_check = drm_atomic_helper_connector_tv_check, .get_modes = drm_connector_helper_tv_get_modes, }; static int vc4_vec_connector_init(struct drm_device *dev, struct vc4_vec *vec) { struct drm_connector *connector = &vec->connector; struct drm_property *prop; int ret; connector->interlace_allowed = true; ret = drmm_connector_init(dev, connector, &vc4_vec_connector_funcs, DRM_MODE_CONNECTOR_Composite, NULL); if (ret) return ret; drm_connector_helper_add(connector, &vc4_vec_connector_helper_funcs); drm_object_attach_property(&connector->base, dev->mode_config.tv_mode_property, DRM_MODE_TV_MODE_NTSC); prop = drm_property_create_enum(dev, 0, "mode", legacy_tv_mode_names, ARRAY_SIZE(legacy_tv_mode_names)); if (!prop) return -ENOMEM; vec->legacy_tv_mode_property = prop; drm_object_attach_property(&connector->base, prop, VC4_VEC_TV_MODE_NTSC); drm_connector_attach_encoder(connector, &vec->encoder.base); return 0; } static void vc4_vec_encoder_disable(struct drm_encoder *encoder, struct drm_atomic_state *state) { struct drm_device *drm = encoder->dev; struct vc4_vec *vec = encoder_to_vc4_vec(encoder); int idx, ret; if (!drm_dev_enter(drm, &idx)) return; VEC_WRITE(VEC_CFG, 0); VEC_WRITE(VEC_DAC_MISC, VEC_DAC_MISC_VCD_PWRDN | VEC_DAC_MISC_BIAS_PWRDN | VEC_DAC_MISC_DAC_PWRDN | VEC_DAC_MISC_LDO_PWRDN); clk_disable_unprepare(vec->clock); ret = pm_runtime_put(&vec->pdev->dev); if (ret < 0) { DRM_ERROR("Failed to release power domain: %d\n", ret); goto err_dev_exit; } drm_dev_exit(idx); return; err_dev_exit: drm_dev_exit(idx); } static void vc4_vec_encoder_enable(struct drm_encoder *encoder, struct drm_atomic_state *state) { struct drm_device *drm = encoder->dev; struct vc4_vec *vec = encoder_to_vc4_vec(encoder); struct drm_connector *connector = &vec->connector; struct drm_connector_state *conn_state = drm_atomic_get_new_connector_state(state, connector); struct drm_display_mode *adjusted_mode = &encoder->crtc->state->adjusted_mode; const struct vc4_vec_tv_mode *tv_mode; int idx, ret; if (!drm_dev_enter(drm, &idx)) return; tv_mode = vc4_vec_tv_mode_lookup(conn_state->tv.mode, adjusted_mode->htotal); if (!tv_mode) goto err_dev_exit; ret = pm_runtime_resume_and_get(&vec->pdev->dev); if (ret < 0) { DRM_ERROR("Failed to retain power domain: %d\n", ret); goto err_dev_exit; } /* * We need to set the clock rate each time we enable the encoder * because there's a chance we share the same parent with the HDMI * clock, and both drivers are requesting different rates. * The good news is, these 2 encoders cannot be enabled at the same * time, thus preventing incompatible rate requests. */ ret = clk_set_rate(vec->clock, 108000000); if (ret) { DRM_ERROR("Failed to set clock rate: %d\n", ret); goto err_put_runtime_pm; } ret = clk_prepare_enable(vec->clock); if (ret) { DRM_ERROR("Failed to turn on core clock: %d\n", ret); goto err_put_runtime_pm; } /* Reset the different blocks */ VEC_WRITE(VEC_WSE_RESET, 1); VEC_WRITE(VEC_SOFT_RESET, 1); /* Disable the CGSM-A and WSE blocks */ VEC_WRITE(VEC_WSE_CONTROL, 0); /* Write config common to all modes. */ /* * Color subcarrier phase: phase = 360 * SCHPH / 256. * 0x28 <=> 39.375 deg. */ VEC_WRITE(VEC_SCHPH, 0x28); /* * Reset to default values. */ VEC_WRITE(VEC_CLMP0_START, 0xac); VEC_WRITE(VEC_CLMP0_END, 0xec); VEC_WRITE(VEC_CONFIG2, VEC_CONFIG2_UV_DIG_DIS | VEC_CONFIG2_RGB_DIG_DIS | ((adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) ? 0 : VEC_CONFIG2_PROG_SCAN)); VEC_WRITE(VEC_CONFIG3, VEC_CONFIG3_HORIZ_LEN_STD); VEC_WRITE(VEC_DAC_CONFIG, vec->variant->dac_config); /* Mask all interrupts. */ VEC_WRITE(VEC_MASK0, 0); VEC_WRITE(VEC_CONFIG0, tv_mode->config0); VEC_WRITE(VEC_CONFIG1, tv_mode->config1); if (tv_mode->custom_freq) { VEC_WRITE(VEC_FREQ3_2, (tv_mode->custom_freq >> 16) & 0xffff); VEC_WRITE(VEC_FREQ1_0, tv_mode->custom_freq & 0xffff); } VEC_WRITE(VEC_DAC_MISC, VEC_DAC_MISC_VID_ACT | VEC_DAC_MISC_DAC_RST_N); VEC_WRITE(VEC_CFG, VEC_CFG_VEC_EN); drm_dev_exit(idx); return; err_put_runtime_pm: pm_runtime_put(&vec->pdev->dev); err_dev_exit: drm_dev_exit(idx); } static int vc4_vec_encoder_atomic_check(struct drm_encoder *encoder, struct drm_crtc_state *crtc_state, struct drm_connector_state *conn_state) { const struct drm_display_mode *mode = &crtc_state->adjusted_mode; const struct vc4_vec_tv_mode *tv_mode; tv_mode = vc4_vec_tv_mode_lookup(conn_state->tv.mode, mode->htotal); if (!tv_mode) return -EINVAL; if (mode->crtc_hdisplay % 4) return -EINVAL; if (!(mode->crtc_hsync_end - mode->crtc_hsync_start)) return -EINVAL; switch (mode->htotal) { /* NTSC */ case 858: if (mode->crtc_vtotal > 262) return -EINVAL; if (mode->crtc_vdisplay < 1 || mode->crtc_vdisplay > 253) return -EINVAL; if (!(mode->crtc_vsync_start - mode->crtc_vdisplay)) return -EINVAL; if ((mode->crtc_vsync_end - mode->crtc_vsync_start) != 3) return -EINVAL; if ((mode->crtc_vtotal - mode->crtc_vsync_end) < 4) return -EINVAL; break; /* PAL/SECAM */ case 864: if (mode->crtc_vtotal > 312) return -EINVAL; if (mode->crtc_vdisplay < 1 || mode->crtc_vdisplay > 305) return -EINVAL; if (!(mode->crtc_vsync_start - mode->crtc_vdisplay)) return -EINVAL; if ((mode->crtc_vsync_end - mode->crtc_vsync_start) != 3) return -EINVAL; if ((mode->crtc_vtotal - mode->crtc_vsync_end) < 2) return -EINVAL; break; default: return -EINVAL; } return 0; } static const struct drm_encoder_helper_funcs vc4_vec_encoder_helper_funcs = { .atomic_check = vc4_vec_encoder_atomic_check, .atomic_disable = vc4_vec_encoder_disable, .atomic_enable = vc4_vec_encoder_enable, }; static int vc4_vec_late_register(struct drm_encoder *encoder) { struct drm_device *drm = encoder->dev; struct vc4_vec *vec = encoder_to_vc4_vec(encoder); vc4_debugfs_add_regset32(drm, "vec_regs", &vec->regset); return 0; } static const struct drm_encoder_funcs vc4_vec_encoder_funcs = { .late_register = vc4_vec_late_register, }; static const struct vc4_vec_variant bcm2835_vec_variant = { .dac_config = VEC_DAC_CONFIG_DAC_CTRL(0xc) | VEC_DAC_CONFIG_DRIVER_CTRL(0xc) | VEC_DAC_CONFIG_LDO_BIAS_CTRL(0x46) }; static const struct vc4_vec_variant bcm2711_vec_variant = { .dac_config = VEC_DAC_CONFIG_DAC_CTRL(0x0) | VEC_DAC_CONFIG_DRIVER_CTRL(0x80) | VEC_DAC_CONFIG_LDO_BIAS_CTRL(0x61) }; static const struct of_device_id vc4_vec_dt_match[] = { { .compatible = "brcm,bcm2835-vec", .data = &bcm2835_vec_variant }, { .compatible = "brcm,bcm2711-vec", .data = &bcm2711_vec_variant }, { /* sentinel */ }, }; static int vc4_vec_bind(struct device *dev, struct device *master, void *data) { struct platform_device *pdev = to_platform_device(dev); struct drm_device *drm = dev_get_drvdata(master); struct vc4_vec *vec; int ret; ret = drm_mode_create_tv_properties(drm, BIT(DRM_MODE_TV_MODE_NTSC) | BIT(DRM_MODE_TV_MODE_NTSC_443) | BIT(DRM_MODE_TV_MODE_NTSC_J) | BIT(DRM_MODE_TV_MODE_PAL) | BIT(DRM_MODE_TV_MODE_PAL_M) | BIT(DRM_MODE_TV_MODE_PAL_N) | BIT(DRM_MODE_TV_MODE_SECAM)); if (ret) return ret; vec = drmm_kzalloc(drm, sizeof(*vec), GFP_KERNEL); if (!vec) return -ENOMEM; vec->encoder.type = VC4_ENCODER_TYPE_VEC; vec->pdev = pdev; vec->variant = (const struct vc4_vec_variant *) of_device_get_match_data(dev); vec->regs = vc4_ioremap_regs(pdev, 0); if (IS_ERR(vec->regs)) return PTR_ERR(vec->regs); vec->regset.base = vec->regs; vec->regset.regs = vec_regs; vec->regset.nregs = ARRAY_SIZE(vec_regs); vec->clock = devm_clk_get(dev, NULL); if (IS_ERR(vec->clock)) { ret = PTR_ERR(vec->clock); if (ret != -EPROBE_DEFER) DRM_ERROR("Failed to get clock: %d\n", ret); return ret; } ret = devm_pm_runtime_enable(dev); if (ret) return ret; ret = drmm_encoder_init(drm, &vec->encoder.base, &vc4_vec_encoder_funcs, DRM_MODE_ENCODER_TVDAC, NULL); if (ret) return ret; drm_encoder_helper_add(&vec->encoder.base, &vc4_vec_encoder_helper_funcs); ret = vc4_vec_connector_init(drm, vec); if (ret) return ret; dev_set_drvdata(dev, vec); return 0; } static const struct component_ops vc4_vec_ops = { .bind = vc4_vec_bind, }; static int vc4_vec_dev_probe(struct platform_device *pdev) { return component_add(&pdev->dev, &vc4_vec_ops); } static void vc4_vec_dev_remove(struct platform_device *pdev) { component_del(&pdev->dev, &vc4_vec_ops); } struct platform_driver vc4_vec_driver = { .probe = vc4_vec_dev_probe, .remove_new = vc4_vec_dev_remove, .driver = { .name = "vc4_vec", .of_match_table = vc4_vec_dt_match, }, };
linux-master
drivers/gpu/drm/vc4/vc4_vec.c
/* * Copyright © 2014 Broadcom * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice (including the next * paragraph) shall be included in all copies or substantial portions of the * Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ #include <linux/module.h> #include <linux/platform_device.h> #include <linux/pm_runtime.h> #include <linux/device.h> #include <linux/io.h> #include <linux/sched/signal.h> #include <linux/dma-fence-array.h> #include <drm/drm_syncobj.h> #include "uapi/drm/vc4_drm.h" #include "vc4_drv.h" #include "vc4_regs.h" #include "vc4_trace.h" static void vc4_queue_hangcheck(struct drm_device *dev) { struct vc4_dev *vc4 = to_vc4_dev(dev); mod_timer(&vc4->hangcheck.timer, round_jiffies_up(jiffies + msecs_to_jiffies(100))); } struct vc4_hang_state { struct drm_vc4_get_hang_state user_state; u32 bo_count; struct drm_gem_object **bo; }; static void vc4_free_hang_state(struct drm_device *dev, struct vc4_hang_state *state) { unsigned int i; for (i = 0; i < state->user_state.bo_count; i++) drm_gem_object_put(state->bo[i]); kfree(state); } int vc4_get_hang_state_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) { struct drm_vc4_get_hang_state *get_state = data; struct drm_vc4_get_hang_state_bo *bo_state; struct vc4_hang_state *kernel_state; struct drm_vc4_get_hang_state *state; struct vc4_dev *vc4 = to_vc4_dev(dev); unsigned long irqflags; u32 i; int ret = 0; if (WARN_ON_ONCE(vc4->is_vc5)) return -ENODEV; if (!vc4->v3d) { DRM_DEBUG("VC4_GET_HANG_STATE with no VC4 V3D probed\n"); return -ENODEV; } spin_lock_irqsave(&vc4->job_lock, irqflags); kernel_state = vc4->hang_state; if (!kernel_state) { spin_unlock_irqrestore(&vc4->job_lock, irqflags); return -ENOENT; } state = &kernel_state->user_state; /* If the user's array isn't big enough, just return the * required array size. */ if (get_state->bo_count < state->bo_count) { get_state->bo_count = state->bo_count; spin_unlock_irqrestore(&vc4->job_lock, irqflags); return 0; } vc4->hang_state = NULL; spin_unlock_irqrestore(&vc4->job_lock, irqflags); /* Save the user's BO pointer, so we don't stomp it with the memcpy. */ state->bo = get_state->bo; memcpy(get_state, state, sizeof(*state)); bo_state = kcalloc(state->bo_count, sizeof(*bo_state), GFP_KERNEL); if (!bo_state) { ret = -ENOMEM; goto err_free; } for (i = 0; i < state->bo_count; i++) { struct vc4_bo *vc4_bo = to_vc4_bo(kernel_state->bo[i]); u32 handle; ret = drm_gem_handle_create(file_priv, kernel_state->bo[i], &handle); if (ret) { state->bo_count = i; goto err_delete_handle; } bo_state[i].handle = handle; bo_state[i].paddr = vc4_bo->base.dma_addr; bo_state[i].size = vc4_bo->base.base.size; } if (copy_to_user(u64_to_user_ptr(get_state->bo), bo_state, state->bo_count * sizeof(*bo_state))) ret = -EFAULT; err_delete_handle: if (ret) { for (i = 0; i < state->bo_count; i++) drm_gem_handle_delete(file_priv, bo_state[i].handle); } err_free: vc4_free_hang_state(dev, kernel_state); kfree(bo_state); return ret; } static void vc4_save_hang_state(struct drm_device *dev) { struct vc4_dev *vc4 = to_vc4_dev(dev); struct drm_vc4_get_hang_state *state; struct vc4_hang_state *kernel_state; struct vc4_exec_info *exec[2]; struct vc4_bo *bo; unsigned long irqflags; unsigned int i, j, k, unref_list_count; kernel_state = kcalloc(1, sizeof(*kernel_state), GFP_KERNEL); if (!kernel_state) return; state = &kernel_state->user_state; spin_lock_irqsave(&vc4->job_lock, irqflags); exec[0] = vc4_first_bin_job(vc4); exec[1] = vc4_first_render_job(vc4); if (!exec[0] && !exec[1]) { spin_unlock_irqrestore(&vc4->job_lock, irqflags); return; } /* Get the bos from both binner and renderer into hang state. */ state->bo_count = 0; for (i = 0; i < 2; i++) { if (!exec[i]) continue; unref_list_count = 0; list_for_each_entry(bo, &exec[i]->unref_list, unref_head) unref_list_count++; state->bo_count += exec[i]->bo_count + unref_list_count; } kernel_state->bo = kcalloc(state->bo_count, sizeof(*kernel_state->bo), GFP_ATOMIC); if (!kernel_state->bo) { spin_unlock_irqrestore(&vc4->job_lock, irqflags); return; } k = 0; for (i = 0; i < 2; i++) { if (!exec[i]) continue; for (j = 0; j < exec[i]->bo_count; j++) { bo = to_vc4_bo(exec[i]->bo[j]); /* Retain BOs just in case they were marked purgeable. * This prevents the BO from being purged before * someone had a chance to dump the hang state. */ WARN_ON(!refcount_read(&bo->usecnt)); refcount_inc(&bo->usecnt); drm_gem_object_get(exec[i]->bo[j]); kernel_state->bo[k++] = exec[i]->bo[j]; } list_for_each_entry(bo, &exec[i]->unref_list, unref_head) { /* No need to retain BOs coming from the ->unref_list * because they are naturally unpurgeable. */ drm_gem_object_get(&bo->base.base); kernel_state->bo[k++] = &bo->base.base; } } WARN_ON_ONCE(k != state->bo_count); if (exec[0]) state->start_bin = exec[0]->ct0ca; if (exec[1]) state->start_render = exec[1]->ct1ca; spin_unlock_irqrestore(&vc4->job_lock, irqflags); state->ct0ca = V3D_READ(V3D_CTNCA(0)); state->ct0ea = V3D_READ(V3D_CTNEA(0)); state->ct1ca = V3D_READ(V3D_CTNCA(1)); state->ct1ea = V3D_READ(V3D_CTNEA(1)); state->ct0cs = V3D_READ(V3D_CTNCS(0)); state->ct1cs = V3D_READ(V3D_CTNCS(1)); state->ct0ra0 = V3D_READ(V3D_CT00RA0); state->ct1ra0 = V3D_READ(V3D_CT01RA0); state->bpca = V3D_READ(V3D_BPCA); state->bpcs = V3D_READ(V3D_BPCS); state->bpoa = V3D_READ(V3D_BPOA); state->bpos = V3D_READ(V3D_BPOS); state->vpmbase = V3D_READ(V3D_VPMBASE); state->dbge = V3D_READ(V3D_DBGE); state->fdbgo = V3D_READ(V3D_FDBGO); state->fdbgb = V3D_READ(V3D_FDBGB); state->fdbgr = V3D_READ(V3D_FDBGR); state->fdbgs = V3D_READ(V3D_FDBGS); state->errstat = V3D_READ(V3D_ERRSTAT); /* We need to turn purgeable BOs into unpurgeable ones so that * userspace has a chance to dump the hang state before the kernel * decides to purge those BOs. * Note that BO consistency at dump time cannot be guaranteed. For * example, if the owner of these BOs decides to re-use them or mark * them purgeable again there's nothing we can do to prevent it. */ for (i = 0; i < kernel_state->user_state.bo_count; i++) { struct vc4_bo *bo = to_vc4_bo(kernel_state->bo[i]); if (bo->madv == __VC4_MADV_NOTSUPP) continue; mutex_lock(&bo->madv_lock); if (!WARN_ON(bo->madv == __VC4_MADV_PURGED)) bo->madv = VC4_MADV_WILLNEED; refcount_dec(&bo->usecnt); mutex_unlock(&bo->madv_lock); } spin_lock_irqsave(&vc4->job_lock, irqflags); if (vc4->hang_state) { spin_unlock_irqrestore(&vc4->job_lock, irqflags); vc4_free_hang_state(dev, kernel_state); } else { vc4->hang_state = kernel_state; spin_unlock_irqrestore(&vc4->job_lock, irqflags); } } static void vc4_reset(struct drm_device *dev) { struct vc4_dev *vc4 = to_vc4_dev(dev); DRM_INFO("Resetting GPU.\n"); mutex_lock(&vc4->power_lock); if (vc4->power_refcount) { /* Power the device off and back on the by dropping the * reference on runtime PM. */ pm_runtime_put_sync_suspend(&vc4->v3d->pdev->dev); pm_runtime_get_sync(&vc4->v3d->pdev->dev); } mutex_unlock(&vc4->power_lock); vc4_irq_reset(dev); /* Rearm the hangcheck -- another job might have been waiting * for our hung one to get kicked off, and vc4_irq_reset() * would have started it. */ vc4_queue_hangcheck(dev); } static void vc4_reset_work(struct work_struct *work) { struct vc4_dev *vc4 = container_of(work, struct vc4_dev, hangcheck.reset_work); vc4_save_hang_state(&vc4->base); vc4_reset(&vc4->base); } static void vc4_hangcheck_elapsed(struct timer_list *t) { struct vc4_dev *vc4 = from_timer(vc4, t, hangcheck.timer); struct drm_device *dev = &vc4->base; uint32_t ct0ca, ct1ca; unsigned long irqflags; struct vc4_exec_info *bin_exec, *render_exec; spin_lock_irqsave(&vc4->job_lock, irqflags); bin_exec = vc4_first_bin_job(vc4); render_exec = vc4_first_render_job(vc4); /* If idle, we can stop watching for hangs. */ if (!bin_exec && !render_exec) { spin_unlock_irqrestore(&vc4->job_lock, irqflags); return; } ct0ca = V3D_READ(V3D_CTNCA(0)); ct1ca = V3D_READ(V3D_CTNCA(1)); /* If we've made any progress in execution, rearm the timer * and wait. */ if ((bin_exec && ct0ca != bin_exec->last_ct0ca) || (render_exec && ct1ca != render_exec->last_ct1ca)) { if (bin_exec) bin_exec->last_ct0ca = ct0ca; if (render_exec) render_exec->last_ct1ca = ct1ca; spin_unlock_irqrestore(&vc4->job_lock, irqflags); vc4_queue_hangcheck(dev); return; } spin_unlock_irqrestore(&vc4->job_lock, irqflags); /* We've gone too long with no progress, reset. This has to * be done from a work struct, since resetting can sleep and * this timer hook isn't allowed to. */ schedule_work(&vc4->hangcheck.reset_work); } static void submit_cl(struct drm_device *dev, uint32_t thread, uint32_t start, uint32_t end) { struct vc4_dev *vc4 = to_vc4_dev(dev); /* Set the current and end address of the control list. * Writing the end register is what starts the job. */ V3D_WRITE(V3D_CTNCA(thread), start); V3D_WRITE(V3D_CTNEA(thread), end); } int vc4_wait_for_seqno(struct drm_device *dev, uint64_t seqno, uint64_t timeout_ns, bool interruptible) { struct vc4_dev *vc4 = to_vc4_dev(dev); int ret = 0; unsigned long timeout_expire; DEFINE_WAIT(wait); if (WARN_ON_ONCE(vc4->is_vc5)) return -ENODEV; if (vc4->finished_seqno >= seqno) return 0; if (timeout_ns == 0) return -ETIME; timeout_expire = jiffies + nsecs_to_jiffies(timeout_ns); trace_vc4_wait_for_seqno_begin(dev, seqno, timeout_ns); for (;;) { prepare_to_wait(&vc4->job_wait_queue, &wait, interruptible ? TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE); if (interruptible && signal_pending(current)) { ret = -ERESTARTSYS; break; } if (vc4->finished_seqno >= seqno) break; if (timeout_ns != ~0ull) { if (time_after_eq(jiffies, timeout_expire)) { ret = -ETIME; break; } schedule_timeout(timeout_expire - jiffies); } else { schedule(); } } finish_wait(&vc4->job_wait_queue, &wait); trace_vc4_wait_for_seqno_end(dev, seqno); return ret; } static void vc4_flush_caches(struct drm_device *dev) { struct vc4_dev *vc4 = to_vc4_dev(dev); /* Flush the GPU L2 caches. These caches sit on top of system * L3 (the 128kb or so shared with the CPU), and are * non-allocating in the L3. */ V3D_WRITE(V3D_L2CACTL, V3D_L2CACTL_L2CCLR); V3D_WRITE(V3D_SLCACTL, VC4_SET_FIELD(0xf, V3D_SLCACTL_T1CC) | VC4_SET_FIELD(0xf, V3D_SLCACTL_T0CC) | VC4_SET_FIELD(0xf, V3D_SLCACTL_UCC) | VC4_SET_FIELD(0xf, V3D_SLCACTL_ICC)); } static void vc4_flush_texture_caches(struct drm_device *dev) { struct vc4_dev *vc4 = to_vc4_dev(dev); V3D_WRITE(V3D_L2CACTL, V3D_L2CACTL_L2CCLR); V3D_WRITE(V3D_SLCACTL, VC4_SET_FIELD(0xf, V3D_SLCACTL_T1CC) | VC4_SET_FIELD(0xf, V3D_SLCACTL_T0CC)); } /* Sets the registers for the next job to be actually be executed in * the hardware. * * The job_lock should be held during this. */ void vc4_submit_next_bin_job(struct drm_device *dev) { struct vc4_dev *vc4 = to_vc4_dev(dev); struct vc4_exec_info *exec; if (WARN_ON_ONCE(vc4->is_vc5)) return; again: exec = vc4_first_bin_job(vc4); if (!exec) return; vc4_flush_caches(dev); /* Only start the perfmon if it was not already started by a previous * job. */ if (exec->perfmon && vc4->active_perfmon != exec->perfmon) vc4_perfmon_start(vc4, exec->perfmon); /* Either put the job in the binner if it uses the binner, or * immediately move it to the to-be-rendered queue. */ if (exec->ct0ca != exec->ct0ea) { trace_vc4_submit_cl(dev, false, exec->seqno, exec->ct0ca, exec->ct0ea); submit_cl(dev, 0, exec->ct0ca, exec->ct0ea); } else { struct vc4_exec_info *next; vc4_move_job_to_render(dev, exec); next = vc4_first_bin_job(vc4); /* We can't start the next bin job if the previous job had a * different perfmon instance attached to it. The same goes * if one of them had a perfmon attached to it and the other * one doesn't. */ if (next && next->perfmon == exec->perfmon) goto again; } } void vc4_submit_next_render_job(struct drm_device *dev) { struct vc4_dev *vc4 = to_vc4_dev(dev); struct vc4_exec_info *exec = vc4_first_render_job(vc4); if (!exec) return; if (WARN_ON_ONCE(vc4->is_vc5)) return; /* A previous RCL may have written to one of our textures, and * our full cache flush at bin time may have occurred before * that RCL completed. Flush the texture cache now, but not * the instructions or uniforms (since we don't write those * from an RCL). */ vc4_flush_texture_caches(dev); trace_vc4_submit_cl(dev, true, exec->seqno, exec->ct1ca, exec->ct1ea); submit_cl(dev, 1, exec->ct1ca, exec->ct1ea); } void vc4_move_job_to_render(struct drm_device *dev, struct vc4_exec_info *exec) { struct vc4_dev *vc4 = to_vc4_dev(dev); bool was_empty = list_empty(&vc4->render_job_list); if (WARN_ON_ONCE(vc4->is_vc5)) return; list_move_tail(&exec->head, &vc4->render_job_list); if (was_empty) vc4_submit_next_render_job(dev); } static void vc4_update_bo_seqnos(struct vc4_exec_info *exec, uint64_t seqno) { struct vc4_bo *bo; unsigned i; for (i = 0; i < exec->bo_count; i++) { bo = to_vc4_bo(exec->bo[i]); bo->seqno = seqno; dma_resv_add_fence(bo->base.base.resv, exec->fence, DMA_RESV_USAGE_READ); } list_for_each_entry(bo, &exec->unref_list, unref_head) { bo->seqno = seqno; } for (i = 0; i < exec->rcl_write_bo_count; i++) { bo = to_vc4_bo(&exec->rcl_write_bo[i]->base); bo->write_seqno = seqno; dma_resv_add_fence(bo->base.base.resv, exec->fence, DMA_RESV_USAGE_WRITE); } } static void vc4_unlock_bo_reservations(struct drm_device *dev, struct vc4_exec_info *exec, struct ww_acquire_ctx *acquire_ctx) { int i; for (i = 0; i < exec->bo_count; i++) dma_resv_unlock(exec->bo[i]->resv); ww_acquire_fini(acquire_ctx); } /* Takes the reservation lock on all the BOs being referenced, so that * at queue submit time we can update the reservations. * * We don't lock the RCL the tile alloc/state BOs, or overflow memory * (all of which are on exec->unref_list). They're entirely private * to vc4, so we don't attach dma-buf fences to them. */ static int vc4_lock_bo_reservations(struct drm_device *dev, struct vc4_exec_info *exec, struct ww_acquire_ctx *acquire_ctx) { int contended_lock = -1; int i, ret; struct drm_gem_object *bo; ww_acquire_init(acquire_ctx, &reservation_ww_class); retry: if (contended_lock != -1) { bo = exec->bo[contended_lock]; ret = dma_resv_lock_slow_interruptible(bo->resv, acquire_ctx); if (ret) { ww_acquire_done(acquire_ctx); return ret; } } for (i = 0; i < exec->bo_count; i++) { if (i == contended_lock) continue; bo = exec->bo[i]; ret = dma_resv_lock_interruptible(bo->resv, acquire_ctx); if (ret) { int j; for (j = 0; j < i; j++) { bo = exec->bo[j]; dma_resv_unlock(bo->resv); } if (contended_lock != -1 && contended_lock >= i) { bo = exec->bo[contended_lock]; dma_resv_unlock(bo->resv); } if (ret == -EDEADLK) { contended_lock = i; goto retry; } ww_acquire_done(acquire_ctx); return ret; } } ww_acquire_done(acquire_ctx); /* Reserve space for our shared (read-only) fence references, * before we commit the CL to the hardware. */ for (i = 0; i < exec->bo_count; i++) { bo = exec->bo[i]; ret = dma_resv_reserve_fences(bo->resv, 1); if (ret) { vc4_unlock_bo_reservations(dev, exec, acquire_ctx); return ret; } } return 0; } /* Queues a struct vc4_exec_info for execution. If no job is * currently executing, then submits it. * * Unlike most GPUs, our hardware only handles one command list at a * time. To queue multiple jobs at once, we'd need to edit the * previous command list to have a jump to the new one at the end, and * then bump the end address. That's a change for a later date, * though. */ static int vc4_queue_submit(struct drm_device *dev, struct vc4_exec_info *exec, struct ww_acquire_ctx *acquire_ctx, struct drm_syncobj *out_sync) { struct vc4_dev *vc4 = to_vc4_dev(dev); struct vc4_exec_info *renderjob; uint64_t seqno; unsigned long irqflags; struct vc4_fence *fence; fence = kzalloc(sizeof(*fence), GFP_KERNEL); if (!fence) return -ENOMEM; fence->dev = dev; spin_lock_irqsave(&vc4->job_lock, irqflags); seqno = ++vc4->emit_seqno; exec->seqno = seqno; dma_fence_init(&fence->base, &vc4_fence_ops, &vc4->job_lock, vc4->dma_fence_context, exec->seqno); fence->seqno = exec->seqno; exec->fence = &fence->base; if (out_sync) drm_syncobj_replace_fence(out_sync, exec->fence); vc4_update_bo_seqnos(exec, seqno); vc4_unlock_bo_reservations(dev, exec, acquire_ctx); list_add_tail(&exec->head, &vc4->bin_job_list); /* If no bin job was executing and if the render job (if any) has the * same perfmon as our job attached to it (or if both jobs don't have * perfmon activated), then kick ours off. Otherwise, it'll get * started when the previous job's flush/render done interrupt occurs. */ renderjob = vc4_first_render_job(vc4); if (vc4_first_bin_job(vc4) == exec && (!renderjob || renderjob->perfmon == exec->perfmon)) { vc4_submit_next_bin_job(dev); vc4_queue_hangcheck(dev); } spin_unlock_irqrestore(&vc4->job_lock, irqflags); return 0; } /** * vc4_cl_lookup_bos() - Sets up exec->bo[] with the GEM objects * referenced by the job. * @dev: DRM device * @file_priv: DRM file for this fd * @exec: V3D job being set up * * The command validator needs to reference BOs by their index within * the submitted job's BO list. This does the validation of the job's * BO list and reference counting for the lifetime of the job. */ static int vc4_cl_lookup_bos(struct drm_device *dev, struct drm_file *file_priv, struct vc4_exec_info *exec) { struct drm_vc4_submit_cl *args = exec->args; int ret = 0; int i; exec->bo_count = args->bo_handle_count; if (!exec->bo_count) { /* See comment on bo_index for why we have to check * this. */ DRM_DEBUG("Rendering requires BOs to validate\n"); return -EINVAL; } ret = drm_gem_objects_lookup(file_priv, u64_to_user_ptr(args->bo_handles), exec->bo_count, &exec->bo); if (ret) goto fail_put_bo; for (i = 0; i < exec->bo_count; i++) { ret = vc4_bo_inc_usecnt(to_vc4_bo(exec->bo[i])); if (ret) goto fail_dec_usecnt; } return 0; fail_dec_usecnt: /* Decrease usecnt on acquired objects. * We cannot rely on vc4_complete_exec() to release resources here, * because vc4_complete_exec() has no information about which BO has * had its ->usecnt incremented. * To make things easier we just free everything explicitly and set * exec->bo to NULL so that vc4_complete_exec() skips the 'BO release' * step. */ for (i-- ; i >= 0; i--) vc4_bo_dec_usecnt(to_vc4_bo(exec->bo[i])); fail_put_bo: /* Release any reference to acquired objects. */ for (i = 0; i < exec->bo_count && exec->bo[i]; i++) drm_gem_object_put(exec->bo[i]); kvfree(exec->bo); exec->bo = NULL; return ret; } static int vc4_get_bcl(struct drm_device *dev, struct vc4_exec_info *exec) { struct drm_vc4_submit_cl *args = exec->args; struct vc4_dev *vc4 = to_vc4_dev(dev); void *temp = NULL; void *bin; int ret = 0; uint32_t bin_offset = 0; uint32_t shader_rec_offset = roundup(bin_offset + args->bin_cl_size, 16); uint32_t uniforms_offset = shader_rec_offset + args->shader_rec_size; uint32_t exec_size = uniforms_offset + args->uniforms_size; uint32_t temp_size = exec_size + (sizeof(struct vc4_shader_state) * args->shader_rec_count); struct vc4_bo *bo; if (shader_rec_offset < args->bin_cl_size || uniforms_offset < shader_rec_offset || exec_size < uniforms_offset || args->shader_rec_count >= (UINT_MAX / sizeof(struct vc4_shader_state)) || temp_size < exec_size) { DRM_DEBUG("overflow in exec arguments\n"); ret = -EINVAL; goto fail; } /* Allocate space where we'll store the copied in user command lists * and shader records. * * We don't just copy directly into the BOs because we need to * read the contents back for validation, and I think the * bo->vaddr is uncached access. */ temp = kvmalloc_array(temp_size, 1, GFP_KERNEL); if (!temp) { DRM_ERROR("Failed to allocate storage for copying " "in bin/render CLs.\n"); ret = -ENOMEM; goto fail; } bin = temp + bin_offset; exec->shader_rec_u = temp + shader_rec_offset; exec->uniforms_u = temp + uniforms_offset; exec->shader_state = temp + exec_size; exec->shader_state_size = args->shader_rec_count; if (copy_from_user(bin, u64_to_user_ptr(args->bin_cl), args->bin_cl_size)) { ret = -EFAULT; goto fail; } if (copy_from_user(exec->shader_rec_u, u64_to_user_ptr(args->shader_rec), args->shader_rec_size)) { ret = -EFAULT; goto fail; } if (copy_from_user(exec->uniforms_u, u64_to_user_ptr(args->uniforms), args->uniforms_size)) { ret = -EFAULT; goto fail; } bo = vc4_bo_create(dev, exec_size, true, VC4_BO_TYPE_BCL); if (IS_ERR(bo)) { DRM_ERROR("Couldn't allocate BO for binning\n"); ret = PTR_ERR(bo); goto fail; } exec->exec_bo = &bo->base; list_add_tail(&to_vc4_bo(&exec->exec_bo->base)->unref_head, &exec->unref_list); exec->ct0ca = exec->exec_bo->dma_addr + bin_offset; exec->bin_u = bin; exec->shader_rec_v = exec->exec_bo->vaddr + shader_rec_offset; exec->shader_rec_p = exec->exec_bo->dma_addr + shader_rec_offset; exec->shader_rec_size = args->shader_rec_size; exec->uniforms_v = exec->exec_bo->vaddr + uniforms_offset; exec->uniforms_p = exec->exec_bo->dma_addr + uniforms_offset; exec->uniforms_size = args->uniforms_size; ret = vc4_validate_bin_cl(dev, exec->exec_bo->vaddr + bin_offset, bin, exec); if (ret) goto fail; ret = vc4_validate_shader_recs(dev, exec); if (ret) goto fail; if (exec->found_tile_binning_mode_config_packet) { ret = vc4_v3d_bin_bo_get(vc4, &exec->bin_bo_used); if (ret) goto fail; } /* Block waiting on any previous rendering into the CS's VBO, * IB, or textures, so that pixels are actually written by the * time we try to read them. */ ret = vc4_wait_for_seqno(dev, exec->bin_dep_seqno, ~0ull, true); fail: kvfree(temp); return ret; } static void vc4_complete_exec(struct drm_device *dev, struct vc4_exec_info *exec) { struct vc4_dev *vc4 = to_vc4_dev(dev); unsigned long irqflags; unsigned i; /* If we got force-completed because of GPU reset rather than * through our IRQ handler, signal the fence now. */ if (exec->fence) { dma_fence_signal(exec->fence); dma_fence_put(exec->fence); } if (exec->bo) { for (i = 0; i < exec->bo_count; i++) { struct vc4_bo *bo = to_vc4_bo(exec->bo[i]); vc4_bo_dec_usecnt(bo); drm_gem_object_put(exec->bo[i]); } kvfree(exec->bo); } while (!list_empty(&exec->unref_list)) { struct vc4_bo *bo = list_first_entry(&exec->unref_list, struct vc4_bo, unref_head); list_del(&bo->unref_head); drm_gem_object_put(&bo->base.base); } /* Free up the allocation of any bin slots we used. */ spin_lock_irqsave(&vc4->job_lock, irqflags); vc4->bin_alloc_used &= ~exec->bin_slots; spin_unlock_irqrestore(&vc4->job_lock, irqflags); /* Release the reference on the binner BO if needed. */ if (exec->bin_bo_used) vc4_v3d_bin_bo_put(vc4); /* Release the reference we had on the perf monitor. */ vc4_perfmon_put(exec->perfmon); vc4_v3d_pm_put(vc4); kfree(exec); } void vc4_job_handle_completed(struct vc4_dev *vc4) { unsigned long irqflags; struct vc4_seqno_cb *cb, *cb_temp; if (WARN_ON_ONCE(vc4->is_vc5)) return; spin_lock_irqsave(&vc4->job_lock, irqflags); while (!list_empty(&vc4->job_done_list)) { struct vc4_exec_info *exec = list_first_entry(&vc4->job_done_list, struct vc4_exec_info, head); list_del(&exec->head); spin_unlock_irqrestore(&vc4->job_lock, irqflags); vc4_complete_exec(&vc4->base, exec); spin_lock_irqsave(&vc4->job_lock, irqflags); } list_for_each_entry_safe(cb, cb_temp, &vc4->seqno_cb_list, work.entry) { if (cb->seqno <= vc4->finished_seqno) { list_del_init(&cb->work.entry); schedule_work(&cb->work); } } spin_unlock_irqrestore(&vc4->job_lock, irqflags); } static void vc4_seqno_cb_work(struct work_struct *work) { struct vc4_seqno_cb *cb = container_of(work, struct vc4_seqno_cb, work); cb->func(cb); } int vc4_queue_seqno_cb(struct drm_device *dev, struct vc4_seqno_cb *cb, uint64_t seqno, void (*func)(struct vc4_seqno_cb *cb)) { struct vc4_dev *vc4 = to_vc4_dev(dev); unsigned long irqflags; if (WARN_ON_ONCE(vc4->is_vc5)) return -ENODEV; cb->func = func; INIT_WORK(&cb->work, vc4_seqno_cb_work); spin_lock_irqsave(&vc4->job_lock, irqflags); if (seqno > vc4->finished_seqno) { cb->seqno = seqno; list_add_tail(&cb->work.entry, &vc4->seqno_cb_list); } else { schedule_work(&cb->work); } spin_unlock_irqrestore(&vc4->job_lock, irqflags); return 0; } /* Scheduled when any job has been completed, this walks the list of * jobs that had completed and unrefs their BOs and frees their exec * structs. */ static void vc4_job_done_work(struct work_struct *work) { struct vc4_dev *vc4 = container_of(work, struct vc4_dev, job_done_work); vc4_job_handle_completed(vc4); } static int vc4_wait_for_seqno_ioctl_helper(struct drm_device *dev, uint64_t seqno, uint64_t *timeout_ns) { unsigned long start = jiffies; int ret = vc4_wait_for_seqno(dev, seqno, *timeout_ns, true); if ((ret == -EINTR || ret == -ERESTARTSYS) && *timeout_ns != ~0ull) { uint64_t delta = jiffies_to_nsecs(jiffies - start); if (*timeout_ns >= delta) *timeout_ns -= delta; } return ret; } int vc4_wait_seqno_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) { struct vc4_dev *vc4 = to_vc4_dev(dev); struct drm_vc4_wait_seqno *args = data; if (WARN_ON_ONCE(vc4->is_vc5)) return -ENODEV; return vc4_wait_for_seqno_ioctl_helper(dev, args->seqno, &args->timeout_ns); } int vc4_wait_bo_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) { struct vc4_dev *vc4 = to_vc4_dev(dev); int ret; struct drm_vc4_wait_bo *args = data; struct drm_gem_object *gem_obj; struct vc4_bo *bo; if (WARN_ON_ONCE(vc4->is_vc5)) return -ENODEV; if (args->pad != 0) return -EINVAL; gem_obj = drm_gem_object_lookup(file_priv, args->handle); if (!gem_obj) { DRM_DEBUG("Failed to look up GEM BO %d\n", args->handle); return -EINVAL; } bo = to_vc4_bo(gem_obj); ret = vc4_wait_for_seqno_ioctl_helper(dev, bo->seqno, &args->timeout_ns); drm_gem_object_put(gem_obj); return ret; } /** * vc4_submit_cl_ioctl() - Submits a job (frame) to the VC4. * @dev: DRM device * @data: ioctl argument * @file_priv: DRM file for this fd * * This is the main entrypoint for userspace to submit a 3D frame to * the GPU. Userspace provides the binner command list (if * applicable), and the kernel sets up the render command list to draw * to the framebuffer described in the ioctl, using the command lists * that the 3D engine's binner will produce. */ int vc4_submit_cl_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) { struct vc4_dev *vc4 = to_vc4_dev(dev); struct vc4_file *vc4file = file_priv->driver_priv; struct drm_vc4_submit_cl *args = data; struct drm_syncobj *out_sync = NULL; struct vc4_exec_info *exec; struct ww_acquire_ctx acquire_ctx; struct dma_fence *in_fence; int ret = 0; trace_vc4_submit_cl_ioctl(dev, args->bin_cl_size, args->shader_rec_size, args->bo_handle_count); if (WARN_ON_ONCE(vc4->is_vc5)) return -ENODEV; if (!vc4->v3d) { DRM_DEBUG("VC4_SUBMIT_CL with no VC4 V3D probed\n"); return -ENODEV; } if ((args->flags & ~(VC4_SUBMIT_CL_USE_CLEAR_COLOR | VC4_SUBMIT_CL_FIXED_RCL_ORDER | VC4_SUBMIT_CL_RCL_ORDER_INCREASING_X | VC4_SUBMIT_CL_RCL_ORDER_INCREASING_Y)) != 0) { DRM_DEBUG("Unknown flags: 0x%02x\n", args->flags); return -EINVAL; } if (args->pad2 != 0) { DRM_DEBUG("Invalid pad: 0x%08x\n", args->pad2); return -EINVAL; } exec = kcalloc(1, sizeof(*exec), GFP_KERNEL); if (!exec) { DRM_ERROR("malloc failure on exec struct\n"); return -ENOMEM; } exec->dev = vc4; ret = vc4_v3d_pm_get(vc4); if (ret) { kfree(exec); return ret; } exec->args = args; INIT_LIST_HEAD(&exec->unref_list); ret = vc4_cl_lookup_bos(dev, file_priv, exec); if (ret) goto fail; if (args->perfmonid) { exec->perfmon = vc4_perfmon_find(vc4file, args->perfmonid); if (!exec->perfmon) { ret = -ENOENT; goto fail; } } if (args->in_sync) { ret = drm_syncobj_find_fence(file_priv, args->in_sync, 0, 0, &in_fence); if (ret) goto fail; /* When the fence (or fence array) is exclusively from our * context we can skip the wait since jobs are executed in * order of their submission through this ioctl and this can * only have fences from a prior job. */ if (!dma_fence_match_context(in_fence, vc4->dma_fence_context)) { ret = dma_fence_wait(in_fence, true); if (ret) { dma_fence_put(in_fence); goto fail; } } dma_fence_put(in_fence); } if (exec->args->bin_cl_size != 0) { ret = vc4_get_bcl(dev, exec); if (ret) goto fail; } else { exec->ct0ca = 0; exec->ct0ea = 0; } ret = vc4_get_rcl(dev, exec); if (ret) goto fail; ret = vc4_lock_bo_reservations(dev, exec, &acquire_ctx); if (ret) goto fail; if (args->out_sync) { out_sync = drm_syncobj_find(file_priv, args->out_sync); if (!out_sync) { ret = -EINVAL; goto fail; } /* We replace the fence in out_sync in vc4_queue_submit since * the render job could execute immediately after that call. * If it finishes before our ioctl processing resumes the * render job fence could already have been freed. */ } /* Clear this out of the struct we'll be putting in the queue, * since it's part of our stack. */ exec->args = NULL; ret = vc4_queue_submit(dev, exec, &acquire_ctx, out_sync); /* The syncobj isn't part of the exec data and we need to free our * reference even if job submission failed. */ if (out_sync) drm_syncobj_put(out_sync); if (ret) goto fail; /* Return the seqno for our job. */ args->seqno = vc4->emit_seqno; return 0; fail: vc4_complete_exec(&vc4->base, exec); return ret; } static void vc4_gem_destroy(struct drm_device *dev, void *unused); int vc4_gem_init(struct drm_device *dev) { struct vc4_dev *vc4 = to_vc4_dev(dev); int ret; if (WARN_ON_ONCE(vc4->is_vc5)) return -ENODEV; vc4->dma_fence_context = dma_fence_context_alloc(1); INIT_LIST_HEAD(&vc4->bin_job_list); INIT_LIST_HEAD(&vc4->render_job_list); INIT_LIST_HEAD(&vc4->job_done_list); INIT_LIST_HEAD(&vc4->seqno_cb_list); spin_lock_init(&vc4->job_lock); INIT_WORK(&vc4->hangcheck.reset_work, vc4_reset_work); timer_setup(&vc4->hangcheck.timer, vc4_hangcheck_elapsed, 0); INIT_WORK(&vc4->job_done_work, vc4_job_done_work); ret = drmm_mutex_init(dev, &vc4->power_lock); if (ret) return ret; INIT_LIST_HEAD(&vc4->purgeable.list); ret = drmm_mutex_init(dev, &vc4->purgeable.lock); if (ret) return ret; return drmm_add_action_or_reset(dev, vc4_gem_destroy, NULL); } static void vc4_gem_destroy(struct drm_device *dev, void *unused) { struct vc4_dev *vc4 = to_vc4_dev(dev); /* Waiting for exec to finish would need to be done before * unregistering V3D. */ WARN_ON(vc4->emit_seqno != vc4->finished_seqno); /* V3D should already have disabled its interrupt and cleared * the overflow allocation registers. Now free the object. */ if (vc4->bin_bo) { drm_gem_object_put(&vc4->bin_bo->base.base); vc4->bin_bo = NULL; } if (vc4->hang_state) vc4_free_hang_state(dev, vc4->hang_state); } int vc4_gem_madvise_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) { struct vc4_dev *vc4 = to_vc4_dev(dev); struct drm_vc4_gem_madvise *args = data; struct drm_gem_object *gem_obj; struct vc4_bo *bo; int ret; if (WARN_ON_ONCE(vc4->is_vc5)) return -ENODEV; switch (args->madv) { case VC4_MADV_DONTNEED: case VC4_MADV_WILLNEED: break; default: return -EINVAL; } if (args->pad != 0) return -EINVAL; gem_obj = drm_gem_object_lookup(file_priv, args->handle); if (!gem_obj) { DRM_DEBUG("Failed to look up GEM BO %d\n", args->handle); return -ENOENT; } bo = to_vc4_bo(gem_obj); /* Only BOs exposed to userspace can be purged. */ if (bo->madv == __VC4_MADV_NOTSUPP) { DRM_DEBUG("madvise not supported on this BO\n"); ret = -EINVAL; goto out_put_gem; } /* Not sure it's safe to purge imported BOs. Let's just assume it's * not until proven otherwise. */ if (gem_obj->import_attach) { DRM_DEBUG("madvise not supported on imported BOs\n"); ret = -EINVAL; goto out_put_gem; } mutex_lock(&bo->madv_lock); if (args->madv == VC4_MADV_DONTNEED && bo->madv == VC4_MADV_WILLNEED && !refcount_read(&bo->usecnt)) { /* If the BO is about to be marked as purgeable, is not used * and is not already purgeable or purged, add it to the * purgeable list. */ vc4_bo_add_to_purgeable_pool(bo); } else if (args->madv == VC4_MADV_WILLNEED && bo->madv == VC4_MADV_DONTNEED && !refcount_read(&bo->usecnt)) { /* The BO has not been purged yet, just remove it from * the purgeable list. */ vc4_bo_remove_from_purgeable_pool(bo); } /* Save the purged state. */ args->retained = bo->madv != __VC4_MADV_PURGED; /* Update internal madv state only if the bo was not purged. */ if (bo->madv != __VC4_MADV_PURGED) bo->madv = args->madv; mutex_unlock(&bo->madv_lock); ret = 0; out_put_gem: drm_gem_object_put(gem_obj); return ret; }
linux-master
drivers/gpu/drm/vc4/vc4_gem.c
// SPDX-License-Identifier: GPL-2.0-only /* * Copyright (C) 2014-2015 Broadcom * Copyright (C) 2013 Red Hat */ /** * DOC: Broadcom VC4 Graphics Driver * * The Broadcom VideoCore 4 (present in the Raspberry Pi) contains a * OpenGL ES 2.0-compatible 3D engine called V3D, and a highly * configurable display output pipeline that supports HDMI, DSI, DPI, * and Composite TV output. * * The 3D engine also has an interface for submitting arbitrary * compute shader-style jobs using the same shader processor as is * used for vertex and fragment shaders in GLES 2.0. However, given * that the hardware isn't able to expose any standard interfaces like * OpenGL compute shaders or OpenCL, it isn't supported by this * driver. */ #include <linux/clk.h> #include <linux/component.h> #include <linux/device.h> #include <linux/dma-mapping.h> #include <linux/io.h> #include <linux/module.h> #include <linux/of_device.h> #include <linux/platform_device.h> #include <linux/pm_runtime.h> #include <drm/drm_aperture.h> #include <drm/drm_atomic_helper.h> #include <drm/drm_drv.h> #include <drm/drm_fbdev_dma.h> #include <drm/drm_vblank.h> #include <soc/bcm2835/raspberrypi-firmware.h> #include "uapi/drm/vc4_drm.h" #include "vc4_drv.h" #include "vc4_regs.h" #define DRIVER_NAME "vc4" #define DRIVER_DESC "Broadcom VC4 graphics" #define DRIVER_DATE "20140616" #define DRIVER_MAJOR 0 #define DRIVER_MINOR 0 #define DRIVER_PATCHLEVEL 0 /* Helper function for mapping the regs on a platform device. */ void __iomem *vc4_ioremap_regs(struct platform_device *pdev, int index) { void __iomem *map; map = devm_platform_ioremap_resource(pdev, index); if (IS_ERR(map)) return map; return map; } int vc4_dumb_fixup_args(struct drm_mode_create_dumb *args) { int min_pitch = DIV_ROUND_UP(args->width * args->bpp, 8); if (args->pitch < min_pitch) args->pitch = min_pitch; if (args->size < args->pitch * args->height) args->size = args->pitch * args->height; return 0; } static int vc5_dumb_create(struct drm_file *file_priv, struct drm_device *dev, struct drm_mode_create_dumb *args) { int ret; ret = vc4_dumb_fixup_args(args); if (ret) return ret; return drm_gem_dma_dumb_create_internal(file_priv, dev, args); } static int vc4_get_param_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) { struct vc4_dev *vc4 = to_vc4_dev(dev); struct drm_vc4_get_param *args = data; int ret; if (args->pad != 0) return -EINVAL; if (WARN_ON_ONCE(vc4->is_vc5)) return -ENODEV; if (!vc4->v3d) return -ENODEV; switch (args->param) { case DRM_VC4_PARAM_V3D_IDENT0: ret = vc4_v3d_pm_get(vc4); if (ret) return ret; args->value = V3D_READ(V3D_IDENT0); vc4_v3d_pm_put(vc4); break; case DRM_VC4_PARAM_V3D_IDENT1: ret = vc4_v3d_pm_get(vc4); if (ret) return ret; args->value = V3D_READ(V3D_IDENT1); vc4_v3d_pm_put(vc4); break; case DRM_VC4_PARAM_V3D_IDENT2: ret = vc4_v3d_pm_get(vc4); if (ret) return ret; args->value = V3D_READ(V3D_IDENT2); vc4_v3d_pm_put(vc4); break; case DRM_VC4_PARAM_SUPPORTS_BRANCHES: case DRM_VC4_PARAM_SUPPORTS_ETC1: case DRM_VC4_PARAM_SUPPORTS_THREADED_FS: case DRM_VC4_PARAM_SUPPORTS_FIXED_RCL_ORDER: case DRM_VC4_PARAM_SUPPORTS_MADVISE: case DRM_VC4_PARAM_SUPPORTS_PERFMON: args->value = true; break; default: DRM_DEBUG("Unknown parameter %d\n", args->param); return -EINVAL; } return 0; } static int vc4_open(struct drm_device *dev, struct drm_file *file) { struct vc4_dev *vc4 = to_vc4_dev(dev); struct vc4_file *vc4file; if (WARN_ON_ONCE(vc4->is_vc5)) return -ENODEV; vc4file = kzalloc(sizeof(*vc4file), GFP_KERNEL); if (!vc4file) return -ENOMEM; vc4file->dev = vc4; vc4_perfmon_open_file(vc4file); file->driver_priv = vc4file; return 0; } static void vc4_close(struct drm_device *dev, struct drm_file *file) { struct vc4_dev *vc4 = to_vc4_dev(dev); struct vc4_file *vc4file = file->driver_priv; if (WARN_ON_ONCE(vc4->is_vc5)) return; if (vc4file->bin_bo_used) vc4_v3d_bin_bo_put(vc4); vc4_perfmon_close_file(vc4file); kfree(vc4file); } DEFINE_DRM_GEM_FOPS(vc4_drm_fops); static const struct drm_ioctl_desc vc4_drm_ioctls[] = { DRM_IOCTL_DEF_DRV(VC4_SUBMIT_CL, vc4_submit_cl_ioctl, DRM_RENDER_ALLOW), DRM_IOCTL_DEF_DRV(VC4_WAIT_SEQNO, vc4_wait_seqno_ioctl, DRM_RENDER_ALLOW), DRM_IOCTL_DEF_DRV(VC4_WAIT_BO, vc4_wait_bo_ioctl, DRM_RENDER_ALLOW), DRM_IOCTL_DEF_DRV(VC4_CREATE_BO, vc4_create_bo_ioctl, DRM_RENDER_ALLOW), DRM_IOCTL_DEF_DRV(VC4_MMAP_BO, vc4_mmap_bo_ioctl, DRM_RENDER_ALLOW), DRM_IOCTL_DEF_DRV(VC4_CREATE_SHADER_BO, vc4_create_shader_bo_ioctl, DRM_RENDER_ALLOW), DRM_IOCTL_DEF_DRV(VC4_GET_HANG_STATE, vc4_get_hang_state_ioctl, DRM_ROOT_ONLY), DRM_IOCTL_DEF_DRV(VC4_GET_PARAM, vc4_get_param_ioctl, DRM_RENDER_ALLOW), DRM_IOCTL_DEF_DRV(VC4_SET_TILING, vc4_set_tiling_ioctl, DRM_RENDER_ALLOW), DRM_IOCTL_DEF_DRV(VC4_GET_TILING, vc4_get_tiling_ioctl, DRM_RENDER_ALLOW), DRM_IOCTL_DEF_DRV(VC4_LABEL_BO, vc4_label_bo_ioctl, DRM_RENDER_ALLOW), DRM_IOCTL_DEF_DRV(VC4_GEM_MADVISE, vc4_gem_madvise_ioctl, DRM_RENDER_ALLOW), DRM_IOCTL_DEF_DRV(VC4_PERFMON_CREATE, vc4_perfmon_create_ioctl, DRM_RENDER_ALLOW), DRM_IOCTL_DEF_DRV(VC4_PERFMON_DESTROY, vc4_perfmon_destroy_ioctl, DRM_RENDER_ALLOW), DRM_IOCTL_DEF_DRV(VC4_PERFMON_GET_VALUES, vc4_perfmon_get_values_ioctl, DRM_RENDER_ALLOW), }; const struct drm_driver vc4_drm_driver = { .driver_features = (DRIVER_MODESET | DRIVER_ATOMIC | DRIVER_GEM | DRIVER_RENDER | DRIVER_SYNCOBJ), .open = vc4_open, .postclose = vc4_close, #if defined(CONFIG_DEBUG_FS) .debugfs_init = vc4_debugfs_init, #endif .gem_create_object = vc4_create_object, DRM_GEM_DMA_DRIVER_OPS_WITH_DUMB_CREATE(vc4_bo_dumb_create), .ioctls = vc4_drm_ioctls, .num_ioctls = ARRAY_SIZE(vc4_drm_ioctls), .fops = &vc4_drm_fops, .name = DRIVER_NAME, .desc = DRIVER_DESC, .date = DRIVER_DATE, .major = DRIVER_MAJOR, .minor = DRIVER_MINOR, .patchlevel = DRIVER_PATCHLEVEL, }; const struct drm_driver vc5_drm_driver = { .driver_features = (DRIVER_MODESET | DRIVER_ATOMIC | DRIVER_GEM), #if defined(CONFIG_DEBUG_FS) .debugfs_init = vc4_debugfs_init, #endif DRM_GEM_DMA_DRIVER_OPS_WITH_DUMB_CREATE(vc5_dumb_create), .fops = &vc4_drm_fops, .name = DRIVER_NAME, .desc = DRIVER_DESC, .date = DRIVER_DATE, .major = DRIVER_MAJOR, .minor = DRIVER_MINOR, .patchlevel = DRIVER_PATCHLEVEL, }; static void vc4_match_add_drivers(struct device *dev, struct component_match **match, struct platform_driver *const *drivers, int count) { int i; for (i = 0; i < count; i++) { struct device_driver *drv = &drivers[i]->driver; struct device *p = NULL, *d; while ((d = platform_find_device_by_driver(p, drv))) { put_device(p); component_match_add(dev, match, component_compare_dev, d); p = d; } put_device(p); } } static void vc4_component_unbind_all(void *ptr) { struct vc4_dev *vc4 = ptr; component_unbind_all(vc4->dev, &vc4->base); } static const struct of_device_id vc4_dma_range_matches[] = { { .compatible = "brcm,bcm2711-hvs" }, { .compatible = "brcm,bcm2835-hvs" }, { .compatible = "brcm,bcm2835-v3d" }, { .compatible = "brcm,cygnus-v3d" }, { .compatible = "brcm,vc4-v3d" }, {} }; static int vc4_drm_bind(struct device *dev) { struct platform_device *pdev = to_platform_device(dev); const struct drm_driver *driver; struct rpi_firmware *firmware = NULL; struct drm_device *drm; struct vc4_dev *vc4; struct device_node *node; struct drm_crtc *crtc; bool is_vc5; int ret = 0; dev->coherent_dma_mask = DMA_BIT_MASK(32); is_vc5 = of_device_is_compatible(dev->of_node, "brcm,bcm2711-vc5"); if (is_vc5) driver = &vc5_drm_driver; else driver = &vc4_drm_driver; node = of_find_matching_node_and_match(NULL, vc4_dma_range_matches, NULL); if (node) { ret = of_dma_configure(dev, node, true); of_node_put(node); if (ret) return ret; } vc4 = devm_drm_dev_alloc(dev, driver, struct vc4_dev, base); if (IS_ERR(vc4)) return PTR_ERR(vc4); vc4->is_vc5 = is_vc5; vc4->dev = dev; drm = &vc4->base; platform_set_drvdata(pdev, drm); if (!is_vc5) { ret = drmm_mutex_init(drm, &vc4->bin_bo_lock); if (ret) return ret; ret = vc4_bo_cache_init(drm); if (ret) return ret; } ret = drmm_mode_config_init(drm); if (ret) return ret; if (!is_vc5) { ret = vc4_gem_init(drm); if (ret) return ret; } node = of_find_compatible_node(NULL, NULL, "raspberrypi,bcm2835-firmware"); if (node) { firmware = rpi_firmware_get(node); of_node_put(node); if (!firmware) return -EPROBE_DEFER; } ret = drm_aperture_remove_framebuffers(driver); if (ret) return ret; if (firmware) { ret = rpi_firmware_property(firmware, RPI_FIRMWARE_NOTIFY_DISPLAY_DONE, NULL, 0); if (ret) drm_warn(drm, "Couldn't stop firmware display driver: %d\n", ret); rpi_firmware_put(firmware); } ret = component_bind_all(dev, drm); if (ret) return ret; ret = devm_add_action_or_reset(dev, vc4_component_unbind_all, vc4); if (ret) return ret; ret = vc4_plane_create_additional_planes(drm); if (ret) goto unbind_all; ret = vc4_kms_load(drm); if (ret < 0) goto unbind_all; drm_for_each_crtc(crtc, drm) vc4_crtc_disable_at_boot(crtc); ret = drm_dev_register(drm, 0); if (ret < 0) goto unbind_all; drm_fbdev_dma_setup(drm, 16); return 0; unbind_all: return ret; } static void vc4_drm_unbind(struct device *dev) { struct drm_device *drm = dev_get_drvdata(dev); drm_dev_unplug(drm); drm_atomic_helper_shutdown(drm); } static const struct component_master_ops vc4_drm_ops = { .bind = vc4_drm_bind, .unbind = vc4_drm_unbind, }; /* * This list determines the binding order of our components, and we have * a few constraints: * - The TXP driver needs to be bound before the PixelValves (CRTC) * but after the HVS to set the possible_crtc field properly * - The HDMI driver needs to be bound after the HVS so that we can * lookup the HVS maximum core clock rate and figure out if we * support 4kp60 or not. */ static struct platform_driver *const component_drivers[] = { &vc4_hvs_driver, &vc4_hdmi_driver, &vc4_vec_driver, &vc4_dpi_driver, &vc4_dsi_driver, &vc4_txp_driver, &vc4_crtc_driver, &vc4_v3d_driver, }; static int vc4_platform_drm_probe(struct platform_device *pdev) { struct component_match *match = NULL; struct device *dev = &pdev->dev; vc4_match_add_drivers(dev, &match, component_drivers, ARRAY_SIZE(component_drivers)); return component_master_add_with_match(dev, &vc4_drm_ops, match); } static void vc4_platform_drm_remove(struct platform_device *pdev) { component_master_del(&pdev->dev, &vc4_drm_ops); } static const struct of_device_id vc4_of_match[] = { { .compatible = "brcm,bcm2711-vc5", }, { .compatible = "brcm,bcm2835-vc4", }, { .compatible = "brcm,cygnus-vc4", }, {}, }; MODULE_DEVICE_TABLE(of, vc4_of_match); static struct platform_driver vc4_platform_driver = { .probe = vc4_platform_drm_probe, .remove_new = vc4_platform_drm_remove, .driver = { .name = "vc4-drm", .of_match_table = vc4_of_match, }, }; static int __init vc4_drm_register(void) { int ret; if (drm_firmware_drivers_only()) return -ENODEV; ret = platform_register_drivers(component_drivers, ARRAY_SIZE(component_drivers)); if (ret) return ret; ret = platform_driver_register(&vc4_platform_driver); if (ret) platform_unregister_drivers(component_drivers, ARRAY_SIZE(component_drivers)); return ret; } static void __exit vc4_drm_unregister(void) { platform_unregister_drivers(component_drivers, ARRAY_SIZE(component_drivers)); platform_driver_unregister(&vc4_platform_driver); } module_init(vc4_drm_register); module_exit(vc4_drm_unregister); MODULE_ALIAS("platform:vc4-drm"); MODULE_SOFTDEP("pre: snd-soc-hdmi-codec"); MODULE_DESCRIPTION("Broadcom VC4 DRM Driver"); MODULE_AUTHOR("Eric Anholt <[email protected]>"); MODULE_LICENSE("GPL v2");
linux-master
drivers/gpu/drm/vc4/vc4_drv.c
// SPDX-License-Identifier: GPL-2.0 #include <drm/drm_atomic_state_helper.h> #include <drm/drm_atomic_uapi.h> #include <drm/drm_connector.h> #include <drm/drm_crtc.h> #include <drm/drm_encoder.h> #include <drm/drm_modeset_helper_vtables.h> #include <kunit/test.h> #include "vc4_mock.h" static const struct drm_connector_helper_funcs vc4_dummy_connector_helper_funcs = { }; static const struct drm_connector_funcs vc4_dummy_connector_funcs = { .atomic_destroy_state = drm_atomic_helper_connector_destroy_state, .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state, .reset = drm_atomic_helper_connector_reset, }; struct vc4_dummy_output *vc4_dummy_output(struct kunit *test, struct drm_device *drm, struct drm_crtc *crtc, enum vc4_encoder_type vc4_encoder_type, unsigned int kms_encoder_type, unsigned int connector_type) { struct vc4_dummy_output *dummy_output; struct drm_connector *conn; struct drm_encoder *enc; int ret; dummy_output = kunit_kzalloc(test, sizeof(*dummy_output), GFP_KERNEL); KUNIT_ASSERT_NOT_ERR_OR_NULL(test, dummy_output); dummy_output->encoder.type = vc4_encoder_type; enc = &dummy_output->encoder.base; ret = drmm_encoder_init(drm, enc, NULL, kms_encoder_type, NULL); KUNIT_ASSERT_EQ(test, ret, 0); enc->possible_crtcs = drm_crtc_mask(crtc); conn = &dummy_output->connector; ret = drmm_connector_init(drm, conn, &vc4_dummy_connector_funcs, connector_type, NULL); KUNIT_ASSERT_EQ(test, ret, 0); drm_connector_helper_add(conn, &vc4_dummy_connector_helper_funcs); drm_connector_attach_encoder(conn, enc); return dummy_output; } static const struct drm_display_mode default_mode = { DRM_SIMPLE_MODE(640, 480, 64, 48) }; int vc4_mock_atomic_add_output(struct kunit *test, struct drm_atomic_state *state, enum vc4_encoder_type type) { struct drm_device *drm = state->dev; struct drm_connector_state *conn_state; struct drm_crtc_state *crtc_state; struct vc4_dummy_output *output; struct drm_connector *conn; struct drm_encoder *encoder; struct drm_crtc *crtc; int ret; encoder = vc4_find_encoder_by_type(drm, type); KUNIT_ASSERT_NOT_ERR_OR_NULL(test, encoder); crtc = vc4_find_crtc_for_encoder(test, drm, encoder); KUNIT_ASSERT_NOT_ERR_OR_NULL(test, crtc); output = encoder_to_vc4_dummy_output(encoder); conn = &output->connector; conn_state = drm_atomic_get_connector_state(state, conn); KUNIT_ASSERT_NOT_ERR_OR_NULL(test, conn_state); ret = drm_atomic_set_crtc_for_connector(conn_state, crtc); KUNIT_EXPECT_EQ(test, ret, 0); crtc_state = drm_atomic_get_crtc_state(state, crtc); KUNIT_ASSERT_NOT_ERR_OR_NULL(test, crtc_state); ret = drm_atomic_set_mode_for_crtc(crtc_state, &default_mode); KUNIT_EXPECT_EQ(test, ret, 0); crtc_state->active = true; return 0; } int vc4_mock_atomic_del_output(struct kunit *test, struct drm_atomic_state *state, enum vc4_encoder_type type) { struct drm_device *drm = state->dev; struct drm_connector_state *conn_state; struct drm_crtc_state *crtc_state; struct vc4_dummy_output *output; struct drm_connector *conn; struct drm_encoder *encoder; struct drm_crtc *crtc; int ret; encoder = vc4_find_encoder_by_type(drm, type); KUNIT_ASSERT_NOT_ERR_OR_NULL(test, encoder); crtc = vc4_find_crtc_for_encoder(test, drm, encoder); KUNIT_ASSERT_NOT_ERR_OR_NULL(test, crtc); crtc_state = drm_atomic_get_crtc_state(state, crtc); KUNIT_ASSERT_NOT_ERR_OR_NULL(test, crtc_state); crtc_state->active = false; ret = drm_atomic_set_mode_for_crtc(crtc_state, NULL); KUNIT_ASSERT_EQ(test, ret, 0); output = encoder_to_vc4_dummy_output(encoder); conn = &output->connector; conn_state = drm_atomic_get_connector_state(state, conn); KUNIT_ASSERT_NOT_ERR_OR_NULL(test, conn_state); ret = drm_atomic_set_crtc_for_connector(conn_state, NULL); KUNIT_ASSERT_EQ(test, ret, 0); return 0; }
linux-master
drivers/gpu/drm/vc4/tests/vc4_mock_output.c
// SPDX-License-Identifier: GPL-2.0 #include <drm/drm_atomic_state_helper.h> #include <drm/drm_fourcc.h> #include <drm/drm_modeset_helper_vtables.h> #include <drm/drm_plane.h> #include <kunit/test.h> #include "vc4_mock.h" static const struct drm_plane_helper_funcs vc4_dummy_plane_helper_funcs = { }; static const struct drm_plane_funcs vc4_dummy_plane_funcs = { .atomic_destroy_state = drm_atomic_helper_plane_destroy_state, .atomic_duplicate_state = drm_atomic_helper_plane_duplicate_state, .reset = drm_atomic_helper_plane_reset, }; static const uint32_t vc4_dummy_plane_formats[] = { DRM_FORMAT_XRGB8888, }; struct vc4_dummy_plane *vc4_dummy_plane(struct kunit *test, struct drm_device *drm, enum drm_plane_type type) { struct vc4_dummy_plane *dummy_plane; struct drm_plane *plane; dummy_plane = drmm_universal_plane_alloc(drm, struct vc4_dummy_plane, plane.base, 0, &vc4_dummy_plane_funcs, vc4_dummy_plane_formats, ARRAY_SIZE(vc4_dummy_plane_formats), NULL, DRM_PLANE_TYPE_PRIMARY, NULL); KUNIT_ASSERT_NOT_ERR_OR_NULL(test, dummy_plane); plane = &dummy_plane->plane.base; drm_plane_helper_add(plane, &vc4_dummy_plane_helper_funcs); return dummy_plane; }
linux-master
drivers/gpu/drm/vc4/tests/vc4_mock_plane.c
// SPDX-License-Identifier: GPL-2.0 #include <drm/drm_atomic.h> #include <drm/drm_atomic_helper.h> #include <drm/drm_atomic_state_helper.h> #include <drm/drm_atomic_uapi.h> #include <drm/drm_crtc.h> #include <drm/drm_drv.h> #include <drm/drm_fourcc.h> #include <drm/drm_kunit_helpers.h> #include <drm/drm_mode.h> #include <drm/drm_modeset_helper_vtables.h> #include <drm/drm_plane.h> #include <kunit/test.h> #include "../vc4_drv.h" #include "vc4_mock.h" struct pv_muxing_priv { struct vc4_dev *vc4; struct drm_atomic_state *state; }; static bool check_fifo_conflict(struct kunit *test, const struct drm_atomic_state *state) { struct vc4_hvs_state *hvs_state; unsigned int used_fifos = 0; unsigned int i; hvs_state = vc4_hvs_get_new_global_state(state); KUNIT_ASSERT_NOT_ERR_OR_NULL(test, hvs_state); for (i = 0; i < HVS_NUM_CHANNELS; i++) { if (!hvs_state->fifo_state[i].in_use) continue; KUNIT_EXPECT_FALSE(test, used_fifos & BIT(i)); used_fifos |= BIT(i); } return true; } struct encoder_constraint { enum vc4_encoder_type type; unsigned int *channels; size_t nchannels; }; #define ENCODER_CONSTRAINT(_type, ...) \ { \ .type = _type, \ .channels = (unsigned int[]) { __VA_ARGS__ }, \ .nchannels = sizeof((unsigned int[]) { __VA_ARGS__ }) / \ sizeof(unsigned int), \ } static bool __check_encoder_constraints(const struct encoder_constraint *constraints, size_t nconstraints, enum vc4_encoder_type type, unsigned int channel) { unsigned int i; for (i = 0; i < nconstraints; i++) { const struct encoder_constraint *constraint = &constraints[i]; unsigned int j; if (constraint->type != type) continue; for (j = 0; j < constraint->nchannels; j++) { unsigned int _channel = constraint->channels[j]; if (channel != _channel) continue; return true; } } return false; } static const struct encoder_constraint vc4_encoder_constraints[] = { ENCODER_CONSTRAINT(VC4_ENCODER_TYPE_DPI, 0), ENCODER_CONSTRAINT(VC4_ENCODER_TYPE_DSI0, 0), ENCODER_CONSTRAINT(VC4_ENCODER_TYPE_HDMI0, 1), ENCODER_CONSTRAINT(VC4_ENCODER_TYPE_VEC, 1), ENCODER_CONSTRAINT(VC4_ENCODER_TYPE_TXP, 2), ENCODER_CONSTRAINT(VC4_ENCODER_TYPE_DSI1, 2), }; static const struct encoder_constraint vc5_encoder_constraints[] = { ENCODER_CONSTRAINT(VC4_ENCODER_TYPE_DPI, 0), ENCODER_CONSTRAINT(VC4_ENCODER_TYPE_DSI0, 0), ENCODER_CONSTRAINT(VC4_ENCODER_TYPE_VEC, 1), ENCODER_CONSTRAINT(VC4_ENCODER_TYPE_TXP, 0, 2), ENCODER_CONSTRAINT(VC4_ENCODER_TYPE_DSI1, 0, 1, 2), ENCODER_CONSTRAINT(VC4_ENCODER_TYPE_HDMI0, 0, 1, 2), ENCODER_CONSTRAINT(VC4_ENCODER_TYPE_HDMI1, 0, 1, 2), }; static bool check_vc4_encoder_constraints(enum vc4_encoder_type type, unsigned int channel) { return __check_encoder_constraints(vc4_encoder_constraints, ARRAY_SIZE(vc4_encoder_constraints), type, channel); } static bool check_vc5_encoder_constraints(enum vc4_encoder_type type, unsigned int channel) { return __check_encoder_constraints(vc5_encoder_constraints, ARRAY_SIZE(vc5_encoder_constraints), type, channel); } static struct vc4_crtc_state * get_vc4_crtc_state_for_encoder(struct kunit *test, const struct drm_atomic_state *state, enum vc4_encoder_type type) { struct drm_device *drm = state->dev; struct drm_crtc_state *new_crtc_state; struct drm_encoder *encoder; struct drm_crtc *crtc; encoder = vc4_find_encoder_by_type(drm, type); KUNIT_ASSERT_NOT_ERR_OR_NULL(test, encoder); crtc = vc4_find_crtc_for_encoder(test, drm, encoder); KUNIT_ASSERT_NOT_ERR_OR_NULL(test, crtc); new_crtc_state = drm_atomic_get_new_crtc_state(state, crtc); if (!new_crtc_state) return NULL; return to_vc4_crtc_state(new_crtc_state); } static bool check_channel_for_encoder(struct kunit *test, const struct drm_atomic_state *state, enum vc4_encoder_type type, bool (*check_fn)(enum vc4_encoder_type type, unsigned int channel)) { struct vc4_crtc_state *new_vc4_crtc_state; struct vc4_hvs_state *new_hvs_state; unsigned int channel; new_hvs_state = vc4_hvs_get_new_global_state(state); KUNIT_ASSERT_NOT_ERR_OR_NULL(test, new_hvs_state); new_vc4_crtc_state = get_vc4_crtc_state_for_encoder(test, state, type); KUNIT_ASSERT_NOT_NULL(test, new_vc4_crtc_state); channel = new_vc4_crtc_state->assigned_channel; KUNIT_EXPECT_NE(test, channel, VC4_HVS_CHANNEL_DISABLED); KUNIT_EXPECT_TRUE(test, new_hvs_state->fifo_state[channel].in_use); KUNIT_EXPECT_TRUE(test, check_fn(type, channel)); return true; } struct pv_muxing_param { const char *name; struct vc4_dev *(*mock_fn)(struct kunit *test); bool (*check_fn)(enum vc4_encoder_type type, unsigned int channel); enum vc4_encoder_type *encoders; size_t nencoders; }; static void vc4_test_pv_muxing_desc(const struct pv_muxing_param *t, char *desc) { strscpy(desc, t->name, KUNIT_PARAM_DESC_SIZE); } #define PV_MUXING_TEST(_name, _mock_fn, _check_fn, ...) \ { \ .name = _name, \ .mock_fn = &_mock_fn, \ .check_fn = &_check_fn, \ .encoders = (enum vc4_encoder_type[]) { __VA_ARGS__ }, \ .nencoders = sizeof((enum vc4_encoder_type[]) { __VA_ARGS__ }) / \ sizeof(enum vc4_encoder_type), \ } #define VC4_PV_MUXING_TEST(_name, ...) \ PV_MUXING_TEST(_name, vc4_mock_device, check_vc4_encoder_constraints, __VA_ARGS__) #define VC5_PV_MUXING_TEST(_name, ...) \ PV_MUXING_TEST(_name, vc5_mock_device, check_vc5_encoder_constraints, __VA_ARGS__) static const struct pv_muxing_param vc4_test_pv_muxing_params[] = { VC4_PV_MUXING_TEST("1 output: DSI0", VC4_ENCODER_TYPE_DSI0), VC4_PV_MUXING_TEST("1 output: DPI", VC4_ENCODER_TYPE_DPI), VC4_PV_MUXING_TEST("1 output: HDMI0", VC4_ENCODER_TYPE_HDMI0), VC4_PV_MUXING_TEST("1 output: VEC", VC4_ENCODER_TYPE_VEC), VC4_PV_MUXING_TEST("1 output: DSI1", VC4_ENCODER_TYPE_DSI1), VC4_PV_MUXING_TEST("1 output: TXP", VC4_ENCODER_TYPE_TXP), VC4_PV_MUXING_TEST("2 outputs: DSI0, HDMI0", VC4_ENCODER_TYPE_DSI0, VC4_ENCODER_TYPE_HDMI0), VC4_PV_MUXING_TEST("2 outputs: DSI0, VEC", VC4_ENCODER_TYPE_DSI0, VC4_ENCODER_TYPE_VEC), VC4_PV_MUXING_TEST("2 outputs: DSI0, DSI1", VC4_ENCODER_TYPE_DSI0, VC4_ENCODER_TYPE_DSI1), VC4_PV_MUXING_TEST("2 outputs: DSI0, TXP", VC4_ENCODER_TYPE_DSI0, VC4_ENCODER_TYPE_TXP), VC4_PV_MUXING_TEST("2 outputs: DPI, HDMI0", VC4_ENCODER_TYPE_DPI, VC4_ENCODER_TYPE_HDMI0), VC4_PV_MUXING_TEST("2 outputs: DPI, VEC", VC4_ENCODER_TYPE_DPI, VC4_ENCODER_TYPE_VEC), VC4_PV_MUXING_TEST("2 outputs: DPI, DSI1", VC4_ENCODER_TYPE_DPI, VC4_ENCODER_TYPE_DSI1), VC4_PV_MUXING_TEST("2 outputs: DPI, TXP", VC4_ENCODER_TYPE_DPI, VC4_ENCODER_TYPE_TXP), VC4_PV_MUXING_TEST("2 outputs: HDMI0, DSI1", VC4_ENCODER_TYPE_HDMI0, VC4_ENCODER_TYPE_DSI1), VC4_PV_MUXING_TEST("2 outputs: HDMI0, TXP", VC4_ENCODER_TYPE_HDMI0, VC4_ENCODER_TYPE_TXP), VC4_PV_MUXING_TEST("2 outputs: VEC, DSI1", VC4_ENCODER_TYPE_VEC, VC4_ENCODER_TYPE_DSI1), VC4_PV_MUXING_TEST("2 outputs: VEC, TXP", VC4_ENCODER_TYPE_VEC, VC4_ENCODER_TYPE_TXP), VC4_PV_MUXING_TEST("3 outputs: DSI0, HDMI0, DSI1", VC4_ENCODER_TYPE_DSI0, VC4_ENCODER_TYPE_HDMI0, VC4_ENCODER_TYPE_DSI1), VC4_PV_MUXING_TEST("3 outputs: DSI0, HDMI0, TXP", VC4_ENCODER_TYPE_DSI0, VC4_ENCODER_TYPE_HDMI0, VC4_ENCODER_TYPE_TXP), VC4_PV_MUXING_TEST("3 outputs: DSI0, VEC, DSI1", VC4_ENCODER_TYPE_DSI0, VC4_ENCODER_TYPE_VEC, VC4_ENCODER_TYPE_DSI1), VC4_PV_MUXING_TEST("3 outputs: DSI0, VEC, TXP", VC4_ENCODER_TYPE_DSI0, VC4_ENCODER_TYPE_VEC, VC4_ENCODER_TYPE_TXP), VC4_PV_MUXING_TEST("3 outputs: DPI, HDMI0, DSI1", VC4_ENCODER_TYPE_DPI, VC4_ENCODER_TYPE_HDMI0, VC4_ENCODER_TYPE_DSI1), VC4_PV_MUXING_TEST("3 outputs: DPI, HDMI0, TXP", VC4_ENCODER_TYPE_DPI, VC4_ENCODER_TYPE_HDMI0, VC4_ENCODER_TYPE_TXP), VC4_PV_MUXING_TEST("3 outputs: DPI, VEC, DSI1", VC4_ENCODER_TYPE_DPI, VC4_ENCODER_TYPE_VEC, VC4_ENCODER_TYPE_DSI1), VC4_PV_MUXING_TEST("3 outputs: DPI, VEC, TXP", VC4_ENCODER_TYPE_DPI, VC4_ENCODER_TYPE_VEC, VC4_ENCODER_TYPE_TXP), }; KUNIT_ARRAY_PARAM(vc4_test_pv_muxing, vc4_test_pv_muxing_params, vc4_test_pv_muxing_desc); static const struct pv_muxing_param vc4_test_pv_muxing_invalid_params[] = { VC4_PV_MUXING_TEST("DPI/DSI0 Conflict", VC4_ENCODER_TYPE_DPI, VC4_ENCODER_TYPE_DSI0), VC4_PV_MUXING_TEST("TXP/DSI1 Conflict", VC4_ENCODER_TYPE_TXP, VC4_ENCODER_TYPE_DSI1), VC4_PV_MUXING_TEST("HDMI0/VEC Conflict", VC4_ENCODER_TYPE_HDMI0, VC4_ENCODER_TYPE_VEC), VC4_PV_MUXING_TEST("More than 3 outputs: DSI0, HDMI0, DSI1, TXP", VC4_ENCODER_TYPE_DSI0, VC4_ENCODER_TYPE_HDMI0, VC4_ENCODER_TYPE_DSI1, VC4_ENCODER_TYPE_TXP), VC4_PV_MUXING_TEST("More than 3 outputs: DSI0, VEC, DSI1, TXP", VC4_ENCODER_TYPE_DSI0, VC4_ENCODER_TYPE_VEC, VC4_ENCODER_TYPE_DSI1, VC4_ENCODER_TYPE_TXP), VC4_PV_MUXING_TEST("More than 3 outputs: DPI, HDMI0, DSI1, TXP", VC4_ENCODER_TYPE_DPI, VC4_ENCODER_TYPE_HDMI0, VC4_ENCODER_TYPE_DSI1, VC4_ENCODER_TYPE_TXP), VC4_PV_MUXING_TEST("More than 3 outputs: DPI, VEC, DSI1, TXP", VC4_ENCODER_TYPE_DPI, VC4_ENCODER_TYPE_VEC, VC4_ENCODER_TYPE_DSI1, VC4_ENCODER_TYPE_TXP), }; KUNIT_ARRAY_PARAM(vc4_test_pv_muxing_invalid, vc4_test_pv_muxing_invalid_params, vc4_test_pv_muxing_desc); static const struct pv_muxing_param vc5_test_pv_muxing_params[] = { VC5_PV_MUXING_TEST("1 output: DPI", VC4_ENCODER_TYPE_DPI), VC5_PV_MUXING_TEST("1 output: DSI0", VC4_ENCODER_TYPE_DSI0), VC5_PV_MUXING_TEST("1 output: DSI1", VC4_ENCODER_TYPE_DSI1), VC5_PV_MUXING_TEST("1 output: HDMI0", VC4_ENCODER_TYPE_HDMI0), VC5_PV_MUXING_TEST("1 output: HDMI1", VC4_ENCODER_TYPE_HDMI1), VC5_PV_MUXING_TEST("1 output: VEC", VC4_ENCODER_TYPE_VEC), VC5_PV_MUXING_TEST("2 outputs: DPI, DSI1", VC4_ENCODER_TYPE_DPI, VC4_ENCODER_TYPE_DSI1), VC5_PV_MUXING_TEST("2 outputs: DPI, HDMI0", VC4_ENCODER_TYPE_DPI, VC4_ENCODER_TYPE_HDMI0), VC5_PV_MUXING_TEST("2 outputs: DPI, HDMI1", VC4_ENCODER_TYPE_DPI, VC4_ENCODER_TYPE_HDMI1), VC5_PV_MUXING_TEST("2 outputs: DPI, TXP", VC4_ENCODER_TYPE_DPI, VC4_ENCODER_TYPE_TXP), VC5_PV_MUXING_TEST("2 outputs: DPI, VEC", VC4_ENCODER_TYPE_DPI, VC4_ENCODER_TYPE_VEC), VC5_PV_MUXING_TEST("2 outputs: DPI, DSI1", VC4_ENCODER_TYPE_DPI, VC4_ENCODER_TYPE_DSI1), VC5_PV_MUXING_TEST("2 outputs: DSI0, DSI1", VC4_ENCODER_TYPE_DSI0, VC4_ENCODER_TYPE_DSI1), VC5_PV_MUXING_TEST("2 outputs: DSI0, HDMI0", VC4_ENCODER_TYPE_DSI0, VC4_ENCODER_TYPE_HDMI0), VC5_PV_MUXING_TEST("2 outputs: DSI0, HDMI1", VC4_ENCODER_TYPE_DSI0, VC4_ENCODER_TYPE_HDMI1), VC5_PV_MUXING_TEST("2 outputs: DSI0, TXP", VC4_ENCODER_TYPE_DSI0, VC4_ENCODER_TYPE_TXP), VC5_PV_MUXING_TEST("2 outputs: DSI0, VEC", VC4_ENCODER_TYPE_DSI0, VC4_ENCODER_TYPE_VEC), VC5_PV_MUXING_TEST("2 outputs: DSI0, DSI1", VC4_ENCODER_TYPE_DSI0, VC4_ENCODER_TYPE_DSI1), VC5_PV_MUXING_TEST("2 outputs: DSI1, VEC", VC4_ENCODER_TYPE_DSI1, VC4_ENCODER_TYPE_VEC), VC5_PV_MUXING_TEST("2 outputs: DSI1, TXP", VC4_ENCODER_TYPE_DSI1, VC4_ENCODER_TYPE_TXP), VC5_PV_MUXING_TEST("2 outputs: DSI1, HDMI0", VC4_ENCODER_TYPE_DSI1, VC4_ENCODER_TYPE_HDMI0), VC5_PV_MUXING_TEST("2 outputs: DSI1, HDMI1", VC4_ENCODER_TYPE_DSI1, VC4_ENCODER_TYPE_HDMI1), VC5_PV_MUXING_TEST("2 outputs: HDMI0, VEC", VC4_ENCODER_TYPE_HDMI0, VC4_ENCODER_TYPE_VEC), VC5_PV_MUXING_TEST("2 outputs: HDMI0, TXP", VC4_ENCODER_TYPE_HDMI0, VC4_ENCODER_TYPE_TXP), VC5_PV_MUXING_TEST("2 outputs: HDMI0, HDMI1", VC4_ENCODER_TYPE_HDMI0, VC4_ENCODER_TYPE_HDMI1), VC5_PV_MUXING_TEST("2 outputs: HDMI1, VEC", VC4_ENCODER_TYPE_HDMI1, VC4_ENCODER_TYPE_VEC), VC5_PV_MUXING_TEST("2 outputs: HDMI1, TXP", VC4_ENCODER_TYPE_HDMI1, VC4_ENCODER_TYPE_TXP), VC5_PV_MUXING_TEST("2 outputs: TXP, VEC", VC4_ENCODER_TYPE_TXP, VC4_ENCODER_TYPE_VEC), VC5_PV_MUXING_TEST("3 outputs: DPI, VEC, TXP", VC4_ENCODER_TYPE_DPI, VC4_ENCODER_TYPE_VEC, VC4_ENCODER_TYPE_TXP), VC5_PV_MUXING_TEST("3 outputs: DPI, VEC, DSI1", VC4_ENCODER_TYPE_DPI, VC4_ENCODER_TYPE_VEC, VC4_ENCODER_TYPE_DSI1), VC5_PV_MUXING_TEST("3 outputs: DPI, VEC, HDMI0", VC4_ENCODER_TYPE_DPI, VC4_ENCODER_TYPE_VEC, VC4_ENCODER_TYPE_HDMI0), VC5_PV_MUXING_TEST("3 outputs: DPI, VEC, HDMI1", VC4_ENCODER_TYPE_DPI, VC4_ENCODER_TYPE_VEC, VC4_ENCODER_TYPE_HDMI1), VC5_PV_MUXING_TEST("3 outputs: DPI, TXP, DSI1", VC4_ENCODER_TYPE_DPI, VC4_ENCODER_TYPE_TXP, VC4_ENCODER_TYPE_DSI1), VC5_PV_MUXING_TEST("3 outputs: DPI, TXP, HDMI0", VC4_ENCODER_TYPE_DPI, VC4_ENCODER_TYPE_TXP, VC4_ENCODER_TYPE_HDMI0), VC5_PV_MUXING_TEST("3 outputs: DPI, TXP, HDMI1", VC4_ENCODER_TYPE_DPI, VC4_ENCODER_TYPE_TXP, VC4_ENCODER_TYPE_HDMI1), VC5_PV_MUXING_TEST("3 outputs: DPI, DSI1, HDMI0", VC4_ENCODER_TYPE_DPI, VC4_ENCODER_TYPE_DSI1, VC4_ENCODER_TYPE_HDMI0), VC5_PV_MUXING_TEST("3 outputs: DPI, DSI1, HDMI1", VC4_ENCODER_TYPE_DPI, VC4_ENCODER_TYPE_DSI1, VC4_ENCODER_TYPE_HDMI1), VC5_PV_MUXING_TEST("3 outputs: DPI, HDMI0, HDMI1", VC4_ENCODER_TYPE_DPI, VC4_ENCODER_TYPE_HDMI0, VC4_ENCODER_TYPE_HDMI1), VC5_PV_MUXING_TEST("3 outputs: DSI0, VEC, TXP", VC4_ENCODER_TYPE_DSI0, VC4_ENCODER_TYPE_VEC, VC4_ENCODER_TYPE_TXP), VC5_PV_MUXING_TEST("3 outputs: DSI0, VEC, DSI1", VC4_ENCODER_TYPE_DSI0, VC4_ENCODER_TYPE_VEC, VC4_ENCODER_TYPE_DSI1), VC5_PV_MUXING_TEST("3 outputs: DSI0, VEC, HDMI0", VC4_ENCODER_TYPE_DSI0, VC4_ENCODER_TYPE_VEC, VC4_ENCODER_TYPE_HDMI0), VC5_PV_MUXING_TEST("3 outputs: DSI0, VEC, HDMI1", VC4_ENCODER_TYPE_DSI0, VC4_ENCODER_TYPE_VEC, VC4_ENCODER_TYPE_HDMI1), VC5_PV_MUXING_TEST("3 outputs: DSI0, TXP, DSI1", VC4_ENCODER_TYPE_DSI0, VC4_ENCODER_TYPE_TXP, VC4_ENCODER_TYPE_DSI1), VC5_PV_MUXING_TEST("3 outputs: DSI0, TXP, HDMI0", VC4_ENCODER_TYPE_DSI0, VC4_ENCODER_TYPE_TXP, VC4_ENCODER_TYPE_HDMI0), VC5_PV_MUXING_TEST("3 outputs: DSI0, TXP, HDMI1", VC4_ENCODER_TYPE_DSI0, VC4_ENCODER_TYPE_TXP, VC4_ENCODER_TYPE_HDMI1), VC5_PV_MUXING_TEST("3 outputs: DSI0, DSI1, HDMI0", VC4_ENCODER_TYPE_DSI0, VC4_ENCODER_TYPE_DSI1, VC4_ENCODER_TYPE_HDMI0), VC5_PV_MUXING_TEST("3 outputs: DSI0, DSI1, HDMI1", VC4_ENCODER_TYPE_DSI0, VC4_ENCODER_TYPE_DSI1, VC4_ENCODER_TYPE_HDMI1), VC5_PV_MUXING_TEST("3 outputs: DSI0, HDMI0, HDMI1", VC4_ENCODER_TYPE_DSI0, VC4_ENCODER_TYPE_HDMI0, VC4_ENCODER_TYPE_HDMI1), }; KUNIT_ARRAY_PARAM(vc5_test_pv_muxing, vc5_test_pv_muxing_params, vc4_test_pv_muxing_desc); static const struct pv_muxing_param vc5_test_pv_muxing_invalid_params[] = { VC5_PV_MUXING_TEST("DPI/DSI0 Conflict", VC4_ENCODER_TYPE_DPI, VC4_ENCODER_TYPE_DSI0), VC5_PV_MUXING_TEST("More than 3 outputs: DPI, VEC, TXP, DSI1", VC4_ENCODER_TYPE_DPI, VC4_ENCODER_TYPE_VEC, VC4_ENCODER_TYPE_TXP, VC4_ENCODER_TYPE_DSI1), VC5_PV_MUXING_TEST("More than 3 outputs: DPI, VEC, TXP, HDMI0", VC4_ENCODER_TYPE_DPI, VC4_ENCODER_TYPE_VEC, VC4_ENCODER_TYPE_TXP, VC4_ENCODER_TYPE_HDMI0), VC5_PV_MUXING_TEST("More than 3 outputs: DPI, VEC, TXP, HDMI1", VC4_ENCODER_TYPE_DPI, VC4_ENCODER_TYPE_VEC, VC4_ENCODER_TYPE_TXP, VC4_ENCODER_TYPE_HDMI1), VC5_PV_MUXING_TEST("More than 3 outputs: DPI, VEC, DSI1, HDMI0", VC4_ENCODER_TYPE_DPI, VC4_ENCODER_TYPE_VEC, VC4_ENCODER_TYPE_DSI1, VC4_ENCODER_TYPE_HDMI0), VC5_PV_MUXING_TEST("More than 3 outputs: DPI, VEC, DSI1, HDMI1", VC4_ENCODER_TYPE_DPI, VC4_ENCODER_TYPE_VEC, VC4_ENCODER_TYPE_DSI1, VC4_ENCODER_TYPE_HDMI1), VC5_PV_MUXING_TEST("More than 3 outputs: DPI, VEC, HDMI0, HDMI1", VC4_ENCODER_TYPE_DPI, VC4_ENCODER_TYPE_VEC, VC4_ENCODER_TYPE_HDMI0, VC4_ENCODER_TYPE_HDMI1), VC5_PV_MUXING_TEST("More than 3 outputs: DPI, TXP, DSI1, HDMI0", VC4_ENCODER_TYPE_DPI, VC4_ENCODER_TYPE_TXP, VC4_ENCODER_TYPE_DSI1, VC4_ENCODER_TYPE_HDMI0), VC5_PV_MUXING_TEST("More than 3 outputs: DPI, TXP, DSI1, HDMI1", VC4_ENCODER_TYPE_DPI, VC4_ENCODER_TYPE_TXP, VC4_ENCODER_TYPE_DSI1, VC4_ENCODER_TYPE_HDMI1), VC5_PV_MUXING_TEST("More than 3 outputs: DPI, TXP, HDMI0, HDMI1", VC4_ENCODER_TYPE_DPI, VC4_ENCODER_TYPE_TXP, VC4_ENCODER_TYPE_HDMI0, VC4_ENCODER_TYPE_HDMI1), VC5_PV_MUXING_TEST("More than 3 outputs: DPI, DSI1, HDMI0, HDMI1", VC4_ENCODER_TYPE_DPI, VC4_ENCODER_TYPE_DSI1, VC4_ENCODER_TYPE_HDMI0, VC4_ENCODER_TYPE_HDMI1), VC5_PV_MUXING_TEST("More than 3 outputs: DPI, VEC, TXP, DSI1, HDMI0", VC4_ENCODER_TYPE_DPI, VC4_ENCODER_TYPE_VEC, VC4_ENCODER_TYPE_TXP, VC4_ENCODER_TYPE_DSI1, VC4_ENCODER_TYPE_HDMI0), VC5_PV_MUXING_TEST("More than 3 outputs: DPI, VEC, TXP, DSI1, HDMI1", VC4_ENCODER_TYPE_DPI, VC4_ENCODER_TYPE_VEC, VC4_ENCODER_TYPE_TXP, VC4_ENCODER_TYPE_DSI1, VC4_ENCODER_TYPE_HDMI1), VC5_PV_MUXING_TEST("More than 3 outputs: DPI, VEC, TXP, HDMI0, HDMI1", VC4_ENCODER_TYPE_DPI, VC4_ENCODER_TYPE_VEC, VC4_ENCODER_TYPE_TXP, VC4_ENCODER_TYPE_HDMI0, VC4_ENCODER_TYPE_HDMI1), VC5_PV_MUXING_TEST("More than 3 outputs: DPI, VEC, DSI1, HDMI0, HDMI1", VC4_ENCODER_TYPE_DPI, VC4_ENCODER_TYPE_VEC, VC4_ENCODER_TYPE_DSI1, VC4_ENCODER_TYPE_HDMI0, VC4_ENCODER_TYPE_HDMI1), VC5_PV_MUXING_TEST("More than 3 outputs: DPI, TXP, DSI1, HDMI0, HDMI1", VC4_ENCODER_TYPE_DPI, VC4_ENCODER_TYPE_TXP, VC4_ENCODER_TYPE_DSI1, VC4_ENCODER_TYPE_HDMI0, VC4_ENCODER_TYPE_HDMI1), VC5_PV_MUXING_TEST("More than 3 outputs: DSI0, VEC, TXP, DSI1", VC4_ENCODER_TYPE_DSI0, VC4_ENCODER_TYPE_VEC, VC4_ENCODER_TYPE_TXP, VC4_ENCODER_TYPE_DSI1), VC5_PV_MUXING_TEST("More than 3 outputs: DSI0, VEC, TXP, HDMI0", VC4_ENCODER_TYPE_DSI0, VC4_ENCODER_TYPE_VEC, VC4_ENCODER_TYPE_TXP, VC4_ENCODER_TYPE_HDMI0), VC5_PV_MUXING_TEST("More than 3 outputs: DSI0, VEC, TXP, HDMI1", VC4_ENCODER_TYPE_DSI0, VC4_ENCODER_TYPE_VEC, VC4_ENCODER_TYPE_TXP, VC4_ENCODER_TYPE_HDMI1), VC5_PV_MUXING_TEST("More than 3 outputs: DSI0, VEC, DSI1, HDMI0", VC4_ENCODER_TYPE_DSI0, VC4_ENCODER_TYPE_VEC, VC4_ENCODER_TYPE_DSI1, VC4_ENCODER_TYPE_HDMI0), VC5_PV_MUXING_TEST("More than 3 outputs: DSI0, VEC, DSI1, HDMI1", VC4_ENCODER_TYPE_DSI0, VC4_ENCODER_TYPE_VEC, VC4_ENCODER_TYPE_DSI1, VC4_ENCODER_TYPE_HDMI1), VC5_PV_MUXING_TEST("More than 3 outputs: DSI0, VEC, HDMI0, HDMI1", VC4_ENCODER_TYPE_DSI0, VC4_ENCODER_TYPE_VEC, VC4_ENCODER_TYPE_HDMI0, VC4_ENCODER_TYPE_HDMI1), VC5_PV_MUXING_TEST("More than 3 outputs: DSI0, TXP, DSI1, HDMI0", VC4_ENCODER_TYPE_DSI0, VC4_ENCODER_TYPE_TXP, VC4_ENCODER_TYPE_DSI1, VC4_ENCODER_TYPE_HDMI0), VC5_PV_MUXING_TEST("More than 3 outputs: DSI0, TXP, DSI1, HDMI1", VC4_ENCODER_TYPE_DSI0, VC4_ENCODER_TYPE_TXP, VC4_ENCODER_TYPE_DSI1, VC4_ENCODER_TYPE_HDMI1), VC5_PV_MUXING_TEST("More than 3 outputs: DSI0, TXP, HDMI0, HDMI1", VC4_ENCODER_TYPE_DSI0, VC4_ENCODER_TYPE_TXP, VC4_ENCODER_TYPE_HDMI0, VC4_ENCODER_TYPE_HDMI1), VC5_PV_MUXING_TEST("More than 3 outputs: DSI0, DSI1, HDMI0, HDMI1", VC4_ENCODER_TYPE_DSI0, VC4_ENCODER_TYPE_DSI1, VC4_ENCODER_TYPE_HDMI0, VC4_ENCODER_TYPE_HDMI1), VC5_PV_MUXING_TEST("More than 3 outputs: DSI0, VEC, TXP, DSI1, HDMI0", VC4_ENCODER_TYPE_DSI0, VC4_ENCODER_TYPE_VEC, VC4_ENCODER_TYPE_TXP, VC4_ENCODER_TYPE_DSI1, VC4_ENCODER_TYPE_HDMI0), VC5_PV_MUXING_TEST("More than 3 outputs: DSI0, VEC, TXP, DSI1, HDMI1", VC4_ENCODER_TYPE_DSI0, VC4_ENCODER_TYPE_VEC, VC4_ENCODER_TYPE_TXP, VC4_ENCODER_TYPE_DSI1, VC4_ENCODER_TYPE_HDMI1), VC5_PV_MUXING_TEST("More than 3 outputs: DSI0, VEC, TXP, HDMI0, HDMI1", VC4_ENCODER_TYPE_DSI0, VC4_ENCODER_TYPE_VEC, VC4_ENCODER_TYPE_TXP, VC4_ENCODER_TYPE_HDMI0, VC4_ENCODER_TYPE_HDMI1), VC5_PV_MUXING_TEST("More than 3 outputs: DSI0, VEC, DSI1, HDMI0, HDMI1", VC4_ENCODER_TYPE_DSI0, VC4_ENCODER_TYPE_VEC, VC4_ENCODER_TYPE_DSI1, VC4_ENCODER_TYPE_HDMI0, VC4_ENCODER_TYPE_HDMI1), VC5_PV_MUXING_TEST("More than 3 outputs: DSI0, TXP, DSI1, HDMI0, HDMI1", VC4_ENCODER_TYPE_DSI0, VC4_ENCODER_TYPE_TXP, VC4_ENCODER_TYPE_DSI1, VC4_ENCODER_TYPE_HDMI0, VC4_ENCODER_TYPE_HDMI1), VC5_PV_MUXING_TEST("More than 3 outputs: VEC, TXP, DSI1, HDMI0, HDMI1", VC4_ENCODER_TYPE_VEC, VC4_ENCODER_TYPE_TXP, VC4_ENCODER_TYPE_DSI1, VC4_ENCODER_TYPE_HDMI0, VC4_ENCODER_TYPE_HDMI1), VC5_PV_MUXING_TEST("More than 3 outputs: DPI, VEC, TXP, DSI1, HDMI0, HDMI1", VC4_ENCODER_TYPE_DPI, VC4_ENCODER_TYPE_VEC, VC4_ENCODER_TYPE_TXP, VC4_ENCODER_TYPE_DSI1, VC4_ENCODER_TYPE_HDMI0, VC4_ENCODER_TYPE_HDMI1), VC5_PV_MUXING_TEST("More than 3 outputs: DSI0, VEC, TXP, DSI1, HDMI0, HDMI1", VC4_ENCODER_TYPE_DSI0, VC4_ENCODER_TYPE_VEC, VC4_ENCODER_TYPE_TXP, VC4_ENCODER_TYPE_DSI1, VC4_ENCODER_TYPE_HDMI0, VC4_ENCODER_TYPE_HDMI1), }; KUNIT_ARRAY_PARAM(vc5_test_pv_muxing_invalid, vc5_test_pv_muxing_invalid_params, vc4_test_pv_muxing_desc); static void drm_vc4_test_pv_muxing(struct kunit *test) { const struct pv_muxing_param *params = test->param_value; const struct pv_muxing_priv *priv = test->priv; struct drm_atomic_state *state = priv->state; unsigned int i; int ret; for (i = 0; i < params->nencoders; i++) { enum vc4_encoder_type enc_type = params->encoders[i]; ret = vc4_mock_atomic_add_output(test, state, enc_type); KUNIT_ASSERT_EQ(test, ret, 0); } ret = drm_atomic_check_only(state); KUNIT_EXPECT_EQ(test, ret, 0); KUNIT_EXPECT_TRUE(test, check_fifo_conflict(test, state)); for (i = 0; i < params->nencoders; i++) { enum vc4_encoder_type enc_type = params->encoders[i]; KUNIT_EXPECT_TRUE(test, check_channel_for_encoder(test, state, enc_type, params->check_fn)); } } static void drm_vc4_test_pv_muxing_invalid(struct kunit *test) { const struct pv_muxing_param *params = test->param_value; const struct pv_muxing_priv *priv = test->priv; struct drm_atomic_state *state = priv->state; unsigned int i; int ret; for (i = 0; i < params->nencoders; i++) { enum vc4_encoder_type enc_type = params->encoders[i]; ret = vc4_mock_atomic_add_output(test, state, enc_type); KUNIT_ASSERT_EQ(test, ret, 0); } ret = drm_atomic_check_only(state); KUNIT_EXPECT_LT(test, ret, 0); } static int vc4_pv_muxing_test_init(struct kunit *test) { const struct pv_muxing_param *params = test->param_value; struct drm_modeset_acquire_ctx *ctx; struct pv_muxing_priv *priv; struct drm_device *drm; struct vc4_dev *vc4; priv = kunit_kzalloc(test, sizeof(*priv), GFP_KERNEL); KUNIT_ASSERT_NOT_NULL(test, priv); test->priv = priv; vc4 = params->mock_fn(test); KUNIT_ASSERT_NOT_ERR_OR_NULL(test, vc4); priv->vc4 = vc4; ctx = drm_kunit_helper_acquire_ctx_alloc(test); KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctx); drm = &vc4->base; priv->state = drm_kunit_helper_atomic_state_alloc(test, drm, ctx); KUNIT_ASSERT_NOT_ERR_OR_NULL(test, priv->state); return 0; } static struct kunit_case vc4_pv_muxing_tests[] = { KUNIT_CASE_PARAM(drm_vc4_test_pv_muxing, vc4_test_pv_muxing_gen_params), KUNIT_CASE_PARAM(drm_vc4_test_pv_muxing_invalid, vc4_test_pv_muxing_invalid_gen_params), {} }; static struct kunit_suite vc4_pv_muxing_test_suite = { .name = "vc4-pv-muxing-combinations", .init = vc4_pv_muxing_test_init, .test_cases = vc4_pv_muxing_tests, }; static struct kunit_case vc5_pv_muxing_tests[] = { KUNIT_CASE_PARAM(drm_vc4_test_pv_muxing, vc5_test_pv_muxing_gen_params), KUNIT_CASE_PARAM(drm_vc4_test_pv_muxing_invalid, vc5_test_pv_muxing_invalid_gen_params), {} }; static struct kunit_suite vc5_pv_muxing_test_suite = { .name = "vc5-pv-muxing-combinations", .init = vc4_pv_muxing_test_init, .test_cases = vc5_pv_muxing_tests, }; /* See * https://lore.kernel.org/all/[email protected]/ * and * https://lore.kernel.org/dri-devel/[email protected]/ */ static void drm_test_vc5_pv_muxing_bugs_subsequent_crtc_enable(struct kunit *test) { struct drm_modeset_acquire_ctx *ctx; struct drm_atomic_state *state; struct vc4_crtc_state *new_vc4_crtc_state; struct vc4_hvs_state *new_hvs_state; unsigned int hdmi0_channel; unsigned int hdmi1_channel; struct drm_device *drm; struct vc4_dev *vc4; int ret; vc4 = vc5_mock_device(test); KUNIT_ASSERT_NOT_ERR_OR_NULL(test, vc4); ctx = drm_kunit_helper_acquire_ctx_alloc(test); KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctx); drm = &vc4->base; state = drm_kunit_helper_atomic_state_alloc(test, drm, ctx); KUNIT_ASSERT_NOT_ERR_OR_NULL(test, state); ret = vc4_mock_atomic_add_output(test, state, VC4_ENCODER_TYPE_HDMI0); KUNIT_ASSERT_EQ(test, ret, 0); ret = drm_atomic_check_only(state); KUNIT_ASSERT_EQ(test, ret, 0); new_hvs_state = vc4_hvs_get_new_global_state(state); KUNIT_ASSERT_NOT_ERR_OR_NULL(test, new_hvs_state); new_vc4_crtc_state = get_vc4_crtc_state_for_encoder(test, state, VC4_ENCODER_TYPE_HDMI0); KUNIT_ASSERT_NOT_NULL(test, new_vc4_crtc_state); hdmi0_channel = new_vc4_crtc_state->assigned_channel; KUNIT_ASSERT_NE(test, hdmi0_channel, VC4_HVS_CHANNEL_DISABLED); KUNIT_ASSERT_TRUE(test, new_hvs_state->fifo_state[hdmi0_channel].in_use); ret = drm_atomic_helper_swap_state(state, false); KUNIT_ASSERT_EQ(test, ret, 0); state = drm_kunit_helper_atomic_state_alloc(test, drm, ctx); KUNIT_ASSERT_NOT_ERR_OR_NULL(test, state); ret = vc4_mock_atomic_add_output(test, state, VC4_ENCODER_TYPE_HDMI1); KUNIT_ASSERT_EQ(test, ret, 0); ret = drm_atomic_check_only(state); KUNIT_ASSERT_EQ(test, ret, 0); new_hvs_state = vc4_hvs_get_new_global_state(state); KUNIT_ASSERT_NOT_ERR_OR_NULL(test, new_hvs_state); new_vc4_crtc_state = get_vc4_crtc_state_for_encoder(test, state, VC4_ENCODER_TYPE_HDMI1); KUNIT_ASSERT_NOT_NULL(test, new_vc4_crtc_state); hdmi1_channel = new_vc4_crtc_state->assigned_channel; KUNIT_ASSERT_NE(test, hdmi1_channel, VC4_HVS_CHANNEL_DISABLED); KUNIT_ASSERT_TRUE(test, new_hvs_state->fifo_state[hdmi1_channel].in_use); KUNIT_EXPECT_NE(test, hdmi0_channel, hdmi1_channel); } /* * This test makes sure that we never change the FIFO of an active HVS * channel if we disable a FIFO with a lower index. * * Doing so would result in a FIFO stall and would disrupt an output * supposed to be unaffected by the commit. */ static void drm_test_vc5_pv_muxing_bugs_stable_fifo(struct kunit *test) { struct drm_modeset_acquire_ctx *ctx; struct drm_atomic_state *state; struct vc4_crtc_state *new_vc4_crtc_state; struct vc4_hvs_state *new_hvs_state; unsigned int old_hdmi0_channel; unsigned int old_hdmi1_channel; struct drm_device *drm; struct vc4_dev *vc4; int ret; vc4 = vc5_mock_device(test); KUNIT_ASSERT_NOT_ERR_OR_NULL(test, vc4); ctx = drm_kunit_helper_acquire_ctx_alloc(test); KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctx); drm = &vc4->base; state = drm_kunit_helper_atomic_state_alloc(test, drm, ctx); KUNIT_ASSERT_NOT_ERR_OR_NULL(test, state); ret = vc4_mock_atomic_add_output(test, state, VC4_ENCODER_TYPE_HDMI0); KUNIT_ASSERT_EQ(test, ret, 0); ret = vc4_mock_atomic_add_output(test, state, VC4_ENCODER_TYPE_HDMI1); KUNIT_ASSERT_EQ(test, ret, 0); ret = drm_atomic_check_only(state); KUNIT_ASSERT_EQ(test, ret, 0); new_hvs_state = vc4_hvs_get_new_global_state(state); KUNIT_ASSERT_NOT_ERR_OR_NULL(test, new_hvs_state); new_vc4_crtc_state = get_vc4_crtc_state_for_encoder(test, state, VC4_ENCODER_TYPE_HDMI0); KUNIT_ASSERT_NOT_NULL(test, new_vc4_crtc_state); old_hdmi0_channel = new_vc4_crtc_state->assigned_channel; KUNIT_ASSERT_NE(test, old_hdmi0_channel, VC4_HVS_CHANNEL_DISABLED); KUNIT_ASSERT_TRUE(test, new_hvs_state->fifo_state[old_hdmi0_channel].in_use); new_vc4_crtc_state = get_vc4_crtc_state_for_encoder(test, state, VC4_ENCODER_TYPE_HDMI1); KUNIT_ASSERT_NOT_NULL(test, new_vc4_crtc_state); old_hdmi1_channel = new_vc4_crtc_state->assigned_channel; KUNIT_ASSERT_NE(test, old_hdmi1_channel, VC4_HVS_CHANNEL_DISABLED); KUNIT_ASSERT_TRUE(test, new_hvs_state->fifo_state[old_hdmi1_channel].in_use); ret = drm_atomic_helper_swap_state(state, false); KUNIT_ASSERT_EQ(test, ret, 0); state = drm_kunit_helper_atomic_state_alloc(test, drm, ctx); KUNIT_ASSERT_NOT_ERR_OR_NULL(test, state); ret = vc4_mock_atomic_del_output(test, state, VC4_ENCODER_TYPE_HDMI0); KUNIT_ASSERT_EQ(test, ret, 0); ret = drm_atomic_check_only(state); KUNIT_ASSERT_EQ(test, ret, 0); new_hvs_state = vc4_hvs_get_new_global_state(state); KUNIT_ASSERT_NOT_ERR_OR_NULL(test, new_hvs_state); new_vc4_crtc_state = get_vc4_crtc_state_for_encoder(test, state, VC4_ENCODER_TYPE_HDMI1); if (new_vc4_crtc_state) { unsigned int hdmi1_channel; hdmi1_channel = new_vc4_crtc_state->assigned_channel; KUNIT_ASSERT_NE(test, hdmi1_channel, VC4_HVS_CHANNEL_DISABLED); KUNIT_ASSERT_TRUE(test, new_hvs_state->fifo_state[hdmi1_channel].in_use); KUNIT_EXPECT_EQ(test, old_hdmi1_channel, hdmi1_channel); } } /* * Test that if we affect a single output, only the CRTC state of that * output will be pulled in the global atomic state. * * This is relevant for two things: * * - If we don't have that state at all, we are unlikely to affect the * FIFO muxing. This is somewhat redundant with * drm_test_vc5_pv_muxing_bugs_stable_fifo() * * - KMS waits for page flips to occur on all the CRTC found in the * CRTC state. Since the CRTC is unaffected, we would over-wait, but * most importantly run into corner cases like waiting on an * inactive CRTC that never completes. */ static void drm_test_vc5_pv_muxing_bugs_subsequent_crtc_enable_too_many_crtc_state(struct kunit *test) { struct drm_modeset_acquire_ctx *ctx; struct drm_atomic_state *state; struct vc4_crtc_state *new_vc4_crtc_state; struct drm_device *drm; struct vc4_dev *vc4; int ret; vc4 = vc5_mock_device(test); KUNIT_ASSERT_NOT_ERR_OR_NULL(test, vc4); ctx = drm_kunit_helper_acquire_ctx_alloc(test); KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctx); drm = &vc4->base; state = drm_kunit_helper_atomic_state_alloc(test, drm, ctx); KUNIT_ASSERT_NOT_ERR_OR_NULL(test, state); ret = vc4_mock_atomic_add_output(test, state, VC4_ENCODER_TYPE_HDMI0); KUNIT_ASSERT_EQ(test, ret, 0); ret = drm_atomic_check_only(state); KUNIT_ASSERT_EQ(test, ret, 0); ret = drm_atomic_helper_swap_state(state, false); KUNIT_ASSERT_EQ(test, ret, 0); state = drm_kunit_helper_atomic_state_alloc(test, drm, ctx); KUNIT_ASSERT_NOT_ERR_OR_NULL(test, state); ret = vc4_mock_atomic_add_output(test, state, VC4_ENCODER_TYPE_HDMI1); KUNIT_ASSERT_EQ(test, ret, 0); ret = drm_atomic_check_only(state); KUNIT_ASSERT_EQ(test, ret, 0); new_vc4_crtc_state = get_vc4_crtc_state_for_encoder(test, state, VC4_ENCODER_TYPE_HDMI0); KUNIT_EXPECT_NULL(test, new_vc4_crtc_state); } static struct kunit_case vc5_pv_muxing_bugs_tests[] = { KUNIT_CASE(drm_test_vc5_pv_muxing_bugs_subsequent_crtc_enable), KUNIT_CASE(drm_test_vc5_pv_muxing_bugs_subsequent_crtc_enable_too_many_crtc_state), KUNIT_CASE(drm_test_vc5_pv_muxing_bugs_stable_fifo), {} }; static struct kunit_suite vc5_pv_muxing_bugs_test_suite = { .name = "vc5-pv-muxing-bugs", .test_cases = vc5_pv_muxing_bugs_tests, }; kunit_test_suites( &vc4_pv_muxing_test_suite, &vc5_pv_muxing_test_suite, &vc5_pv_muxing_bugs_test_suite );
linux-master
drivers/gpu/drm/vc4/tests/vc4_test_pv_muxing.c
// SPDX-License-Identifier: GPL-2.0 #include <drm/drm_atomic_state_helper.h> #include <drm/drm_modeset_helper_vtables.h> #include <kunit/test.h> #include "vc4_mock.h" static const struct drm_crtc_helper_funcs vc4_dummy_crtc_helper_funcs = { .atomic_check = vc4_crtc_atomic_check, }; static const struct drm_crtc_funcs vc4_dummy_crtc_funcs = { .atomic_destroy_state = vc4_crtc_destroy_state, .atomic_duplicate_state = vc4_crtc_duplicate_state, .reset = vc4_crtc_reset, }; struct vc4_dummy_crtc *vc4_mock_pv(struct kunit *test, struct drm_device *drm, struct drm_plane *plane, const struct vc4_crtc_data *data) { struct vc4_dummy_crtc *dummy_crtc; struct vc4_crtc *vc4_crtc; int ret; dummy_crtc = kunit_kzalloc(test, sizeof(*dummy_crtc), GFP_KERNEL); KUNIT_ASSERT_NOT_NULL(test, dummy_crtc); vc4_crtc = &dummy_crtc->crtc; ret = __vc4_crtc_init(drm, NULL, vc4_crtc, data, plane, &vc4_dummy_crtc_funcs, &vc4_dummy_crtc_helper_funcs, false); KUNIT_ASSERT_EQ(test, ret, 0); return dummy_crtc; }
linux-master
drivers/gpu/drm/vc4/tests/vc4_mock_crtc.c
// SPDX-License-Identifier: GPL-2.0 #include <drm/drm_drv.h> #include <drm/drm_kunit_helpers.h> #include <kunit/test.h> #include "vc4_mock.h" struct vc4_mock_output_desc { enum vc4_encoder_type vc4_encoder_type; unsigned int encoder_type; unsigned int connector_type; }; #define VC4_MOCK_OUTPUT_DESC(_vc4_type, _etype, _ctype) \ { \ .vc4_encoder_type = _vc4_type, \ .encoder_type = _etype, \ .connector_type = _ctype, \ } struct vc4_mock_pipe_desc { const struct vc4_crtc_data *data; const struct vc4_mock_output_desc *outputs; unsigned int noutputs; }; #define VC4_MOCK_CRTC_DESC(_data, ...) \ { \ .data = _data, \ .outputs = (struct vc4_mock_output_desc[]) { __VA_ARGS__ }, \ .noutputs = sizeof((struct vc4_mock_output_desc[]) { __VA_ARGS__ }) / \ sizeof(struct vc4_mock_output_desc), \ } #define VC4_MOCK_PIXELVALVE_DESC(_data, ...) \ VC4_MOCK_CRTC_DESC(&(_data)->base, __VA_ARGS__) struct vc4_mock_desc { const struct vc4_mock_pipe_desc *pipes; unsigned int npipes; }; #define VC4_MOCK_DESC(...) \ { \ .pipes = (struct vc4_mock_pipe_desc[]) { __VA_ARGS__ }, \ .npipes = sizeof((struct vc4_mock_pipe_desc[]) { __VA_ARGS__ }) / \ sizeof(struct vc4_mock_pipe_desc), \ } static const struct vc4_mock_desc vc4_mock = VC4_MOCK_DESC( VC4_MOCK_CRTC_DESC(&vc4_txp_crtc_data, VC4_MOCK_OUTPUT_DESC(VC4_ENCODER_TYPE_TXP, DRM_MODE_ENCODER_VIRTUAL, DRM_MODE_CONNECTOR_WRITEBACK)), VC4_MOCK_PIXELVALVE_DESC(&bcm2835_pv0_data, VC4_MOCK_OUTPUT_DESC(VC4_ENCODER_TYPE_DSI0, DRM_MODE_ENCODER_DSI, DRM_MODE_CONNECTOR_DSI), VC4_MOCK_OUTPUT_DESC(VC4_ENCODER_TYPE_DPI, DRM_MODE_ENCODER_DPI, DRM_MODE_CONNECTOR_DPI)), VC4_MOCK_PIXELVALVE_DESC(&bcm2835_pv1_data, VC4_MOCK_OUTPUT_DESC(VC4_ENCODER_TYPE_DSI1, DRM_MODE_ENCODER_DSI, DRM_MODE_CONNECTOR_DSI)), VC4_MOCK_PIXELVALVE_DESC(&bcm2835_pv2_data, VC4_MOCK_OUTPUT_DESC(VC4_ENCODER_TYPE_HDMI0, DRM_MODE_ENCODER_TMDS, DRM_MODE_CONNECTOR_HDMIA), VC4_MOCK_OUTPUT_DESC(VC4_ENCODER_TYPE_VEC, DRM_MODE_ENCODER_TVDAC, DRM_MODE_CONNECTOR_Composite)), ); static const struct vc4_mock_desc vc5_mock = VC4_MOCK_DESC( VC4_MOCK_CRTC_DESC(&vc4_txp_crtc_data, VC4_MOCK_OUTPUT_DESC(VC4_ENCODER_TYPE_TXP, DRM_MODE_ENCODER_VIRTUAL, DRM_MODE_CONNECTOR_WRITEBACK)), VC4_MOCK_PIXELVALVE_DESC(&bcm2711_pv0_data, VC4_MOCK_OUTPUT_DESC(VC4_ENCODER_TYPE_DSI0, DRM_MODE_ENCODER_DSI, DRM_MODE_CONNECTOR_DSI), VC4_MOCK_OUTPUT_DESC(VC4_ENCODER_TYPE_DPI, DRM_MODE_ENCODER_DPI, DRM_MODE_CONNECTOR_DPI)), VC4_MOCK_PIXELVALVE_DESC(&bcm2711_pv1_data, VC4_MOCK_OUTPUT_DESC(VC4_ENCODER_TYPE_DSI1, DRM_MODE_ENCODER_DSI, DRM_MODE_CONNECTOR_DSI)), VC4_MOCK_PIXELVALVE_DESC(&bcm2711_pv2_data, VC4_MOCK_OUTPUT_DESC(VC4_ENCODER_TYPE_HDMI0, DRM_MODE_ENCODER_TMDS, DRM_MODE_CONNECTOR_HDMIA)), VC4_MOCK_PIXELVALVE_DESC(&bcm2711_pv3_data, VC4_MOCK_OUTPUT_DESC(VC4_ENCODER_TYPE_VEC, DRM_MODE_ENCODER_TVDAC, DRM_MODE_CONNECTOR_Composite)), VC4_MOCK_PIXELVALVE_DESC(&bcm2711_pv4_data, VC4_MOCK_OUTPUT_DESC(VC4_ENCODER_TYPE_HDMI1, DRM_MODE_ENCODER_TMDS, DRM_MODE_CONNECTOR_HDMIA)), ); static int __build_one_pipe(struct kunit *test, struct drm_device *drm, const struct vc4_mock_pipe_desc *pipe) { struct vc4_dummy_plane *dummy_plane; struct drm_plane *plane; struct vc4_dummy_crtc *dummy_crtc; struct drm_crtc *crtc; unsigned int i; dummy_plane = vc4_dummy_plane(test, drm, DRM_PLANE_TYPE_PRIMARY); KUNIT_ASSERT_NOT_ERR_OR_NULL(test, dummy_plane); plane = &dummy_plane->plane.base; dummy_crtc = vc4_mock_pv(test, drm, plane, pipe->data); KUNIT_ASSERT_NOT_ERR_OR_NULL(test, dummy_crtc); crtc = &dummy_crtc->crtc.base; for (i = 0; i < pipe->noutputs; i++) { const struct vc4_mock_output_desc *mock_output = &pipe->outputs[i]; struct vc4_dummy_output *dummy_output; dummy_output = vc4_dummy_output(test, drm, crtc, mock_output->vc4_encoder_type, mock_output->encoder_type, mock_output->connector_type); KUNIT_ASSERT_NOT_ERR_OR_NULL(test, dummy_output); } return 0; } static int __build_mock(struct kunit *test, struct drm_device *drm, const struct vc4_mock_desc *mock) { unsigned int i; for (i = 0; i < mock->npipes; i++) { const struct vc4_mock_pipe_desc *pipe = &mock->pipes[i]; int ret; ret = __build_one_pipe(test, drm, pipe); KUNIT_ASSERT_EQ(test, ret, 0); } return 0; } static void kunit_action_drm_dev_unregister(void *ptr) { struct drm_device *drm = ptr; drm_dev_unregister(drm); } static struct vc4_dev *__mock_device(struct kunit *test, bool is_vc5) { struct drm_device *drm; const struct drm_driver *drv = is_vc5 ? &vc5_drm_driver : &vc4_drm_driver; const struct vc4_mock_desc *desc = is_vc5 ? &vc5_mock : &vc4_mock; struct vc4_dev *vc4; struct device *dev; int ret; dev = drm_kunit_helper_alloc_device(test); KUNIT_ASSERT_NOT_ERR_OR_NULL(test, dev); vc4 = drm_kunit_helper_alloc_drm_device_with_driver(test, dev, struct vc4_dev, base, drv); KUNIT_ASSERT_NOT_ERR_OR_NULL(test, vc4); vc4->dev = dev; vc4->is_vc5 = is_vc5; vc4->hvs = __vc4_hvs_alloc(vc4, NULL); KUNIT_ASSERT_NOT_ERR_OR_NULL(test, vc4->hvs); drm = &vc4->base; ret = __build_mock(test, drm, desc); KUNIT_ASSERT_EQ(test, ret, 0); ret = vc4_kms_load(drm); KUNIT_ASSERT_EQ(test, ret, 0); ret = drm_dev_register(drm, 0); KUNIT_ASSERT_EQ(test, ret, 0); ret = kunit_add_action_or_reset(test, kunit_action_drm_dev_unregister, drm); KUNIT_ASSERT_EQ(test, ret, 0); return vc4; } struct vc4_dev *vc4_mock_device(struct kunit *test) { return __mock_device(test, false); } struct vc4_dev *vc5_mock_device(struct kunit *test) { return __mock_device(test, true); }
linux-master
drivers/gpu/drm/vc4/tests/vc4_mock.c
// SPDX-License-Identifier: GPL-2.0 /* * Copyright 2019 NXP. * * Scaling algorithms were contributed by Dzung Hoang <[email protected]> */ #include <linux/device.h> #include <linux/slab.h> #include "dcss-dev.h" #define DCSS_SCALER_CTRL 0x00 #define SCALER_EN BIT(0) #define REPEAT_EN BIT(4) #define SCALE2MEM_EN BIT(8) #define MEM2OFIFO_EN BIT(12) #define DCSS_SCALER_OFIFO_CTRL 0x04 #define OFIFO_LOW_THRES_POS 0 #define OFIFO_LOW_THRES_MASK GENMASK(9, 0) #define OFIFO_HIGH_THRES_POS 16 #define OFIFO_HIGH_THRES_MASK GENMASK(25, 16) #define UNDERRUN_DETECT_CLR BIT(26) #define LOW_THRES_DETECT_CLR BIT(27) #define HIGH_THRES_DETECT_CLR BIT(28) #define UNDERRUN_DETECT_EN BIT(29) #define LOW_THRES_DETECT_EN BIT(30) #define HIGH_THRES_DETECT_EN BIT(31) #define DCSS_SCALER_SDATA_CTRL 0x08 #define YUV_EN BIT(0) #define RTRAM_8LINES BIT(1) #define Y_UV_BYTE_SWAP BIT(4) #define A2R10G10B10_FORMAT_POS 8 #define A2R10G10B10_FORMAT_MASK GENMASK(11, 8) #define DCSS_SCALER_BIT_DEPTH 0x0C #define LUM_BIT_DEPTH_POS 0 #define LUM_BIT_DEPTH_MASK GENMASK(1, 0) #define CHR_BIT_DEPTH_POS 4 #define CHR_BIT_DEPTH_MASK GENMASK(5, 4) #define DCSS_SCALER_SRC_FORMAT 0x10 #define DCSS_SCALER_DST_FORMAT 0x14 #define FORMAT_MASK GENMASK(1, 0) #define DCSS_SCALER_SRC_LUM_RES 0x18 #define DCSS_SCALER_SRC_CHR_RES 0x1C #define DCSS_SCALER_DST_LUM_RES 0x20 #define DCSS_SCALER_DST_CHR_RES 0x24 #define WIDTH_POS 0 #define WIDTH_MASK GENMASK(11, 0) #define HEIGHT_POS 16 #define HEIGHT_MASK GENMASK(27, 16) #define DCSS_SCALER_V_LUM_START 0x48 #define V_START_MASK GENMASK(15, 0) #define DCSS_SCALER_V_LUM_INC 0x4C #define V_INC_MASK GENMASK(15, 0) #define DCSS_SCALER_H_LUM_START 0x50 #define H_START_MASK GENMASK(18, 0) #define DCSS_SCALER_H_LUM_INC 0x54 #define H_INC_MASK GENMASK(15, 0) #define DCSS_SCALER_V_CHR_START 0x58 #define DCSS_SCALER_V_CHR_INC 0x5C #define DCSS_SCALER_H_CHR_START 0x60 #define DCSS_SCALER_H_CHR_INC 0x64 #define DCSS_SCALER_COEF_VLUM 0x80 #define DCSS_SCALER_COEF_HLUM 0x140 #define DCSS_SCALER_COEF_VCHR 0x200 #define DCSS_SCALER_COEF_HCHR 0x300 struct dcss_scaler_ch { void __iomem *base_reg; u32 base_ofs; struct dcss_scaler *scl; u32 sdata_ctrl; u32 scaler_ctrl; bool scaler_ctrl_chgd; u32 c_vstart; u32 c_hstart; bool use_nn_interpolation; }; struct dcss_scaler { struct device *dev; struct dcss_ctxld *ctxld; u32 ctx_id; struct dcss_scaler_ch ch[3]; }; /* scaler coefficients generator */ #define PSC_FRAC_BITS 30 #define PSC_FRAC_SCALE BIT(PSC_FRAC_BITS) #define PSC_BITS_FOR_PHASE 4 #define PSC_NUM_PHASES 16 #define PSC_STORED_PHASES (PSC_NUM_PHASES / 2 + 1) #define PSC_NUM_TAPS 7 #define PSC_NUM_TAPS_RGBA 5 #define PSC_COEFF_PRECISION 10 #define PSC_PHASE_FRACTION_BITS 13 #define PSC_PHASE_MASK (PSC_NUM_PHASES - 1) #define PSC_Q_FRACTION 19 #define PSC_Q_ROUND_OFFSET (1 << (PSC_Q_FRACTION - 1)) /** * mult_q() - Performs fixed-point multiplication. * @A: multiplier * @B: multiplicand */ static int mult_q(int A, int B) { int result; s64 temp; temp = (int64_t)A * (int64_t)B; temp += PSC_Q_ROUND_OFFSET; result = (int)(temp >> PSC_Q_FRACTION); return result; } /** * div_q() - Performs fixed-point division. * @A: dividend * @B: divisor */ static int div_q(int A, int B) { int result; s64 temp; temp = (int64_t)A << PSC_Q_FRACTION; if ((temp >= 0 && B >= 0) || (temp < 0 && B < 0)) temp += B / 2; else temp -= B / 2; result = (int)(temp / B); return result; } /** * exp_approx_q() - Compute approximation to exp(x) function using Taylor * series. * @x: fixed-point argument of exp function */ static int exp_approx_q(int x) { int sum = 1 << PSC_Q_FRACTION; int term = 1 << PSC_Q_FRACTION; term = mult_q(term, div_q(x, 1 << PSC_Q_FRACTION)); sum += term; term = mult_q(term, div_q(x, 2 << PSC_Q_FRACTION)); sum += term; term = mult_q(term, div_q(x, 3 << PSC_Q_FRACTION)); sum += term; term = mult_q(term, div_q(x, 4 << PSC_Q_FRACTION)); sum += term; return sum; } /** * dcss_scaler_gaussian_filter() - Generate gaussian prototype filter. * @fc_q: fixed-point cutoff frequency normalized to range [0, 1] * @use_5_taps: indicates whether to use 5 taps or 7 taps * @coef: output filter coefficients */ static void dcss_scaler_gaussian_filter(int fc_q, bool use_5_taps, bool phase0_identity, int coef[][PSC_NUM_TAPS]) { int sigma_q, g0_q, g1_q, g2_q; int tap_cnt1, tap_cnt2, tap_idx, phase_cnt; int mid; int phase; int i; int taps; if (use_5_taps) for (phase = 0; phase < PSC_STORED_PHASES; phase++) { coef[phase][0] = 0; coef[phase][PSC_NUM_TAPS - 1] = 0; } /* seed coefficient scanner */ taps = use_5_taps ? PSC_NUM_TAPS_RGBA : PSC_NUM_TAPS; mid = (PSC_NUM_PHASES * taps) / 2 - 1; phase_cnt = (PSC_NUM_PHASES * (PSC_NUM_TAPS + 1)) / 2; tap_cnt1 = (PSC_NUM_PHASES * PSC_NUM_TAPS) / 2; tap_cnt2 = (PSC_NUM_PHASES * PSC_NUM_TAPS) / 2; /* seed gaussian filter generator */ sigma_q = div_q(PSC_Q_ROUND_OFFSET, fc_q); g0_q = 1 << PSC_Q_FRACTION; g1_q = exp_approx_q(div_q(-PSC_Q_ROUND_OFFSET, mult_q(sigma_q, sigma_q))); g2_q = mult_q(g1_q, g1_q); coef[phase_cnt & PSC_PHASE_MASK][tap_cnt1 >> PSC_BITS_FOR_PHASE] = g0_q; for (i = 0; i < mid; i++) { phase_cnt++; tap_cnt1--; tap_cnt2++; g0_q = mult_q(g0_q, g1_q); g1_q = mult_q(g1_q, g2_q); if ((phase_cnt & PSC_PHASE_MASK) <= 8) { tap_idx = tap_cnt1 >> PSC_BITS_FOR_PHASE; coef[phase_cnt & PSC_PHASE_MASK][tap_idx] = g0_q; } if (((-phase_cnt) & PSC_PHASE_MASK) <= 8) { tap_idx = tap_cnt2 >> PSC_BITS_FOR_PHASE; coef[(-phase_cnt) & PSC_PHASE_MASK][tap_idx] = g0_q; } } phase_cnt++; tap_cnt1--; coef[phase_cnt & PSC_PHASE_MASK][tap_cnt1 >> PSC_BITS_FOR_PHASE] = 0; /* override phase 0 with identity filter if specified */ if (phase0_identity) for (i = 0; i < PSC_NUM_TAPS; i++) coef[0][i] = i == (PSC_NUM_TAPS >> 1) ? (1 << PSC_COEFF_PRECISION) : 0; /* normalize coef */ for (phase = 0; phase < PSC_STORED_PHASES; phase++) { int sum = 0; s64 ll_temp; for (i = 0; i < PSC_NUM_TAPS; i++) sum += coef[phase][i]; for (i = 0; i < PSC_NUM_TAPS; i++) { ll_temp = coef[phase][i]; ll_temp <<= PSC_COEFF_PRECISION; ll_temp += sum >> 1; ll_temp /= sum; coef[phase][i] = (int)ll_temp; } } } static void dcss_scaler_nearest_neighbor_filter(bool use_5_taps, int coef[][PSC_NUM_TAPS]) { int i, j; for (i = 0; i < PSC_STORED_PHASES; i++) for (j = 0; j < PSC_NUM_TAPS; j++) coef[i][j] = j == PSC_NUM_TAPS >> 1 ? (1 << PSC_COEFF_PRECISION) : 0; } /** * dcss_scaler_filter_design() - Compute filter coefficients using * Gaussian filter. * @src_length: length of input * @dst_length: length of output * @use_5_taps: 0 for 7 taps per phase, 1 for 5 taps * @coef: output coefficients */ static void dcss_scaler_filter_design(int src_length, int dst_length, bool use_5_taps, bool phase0_identity, int coef[][PSC_NUM_TAPS], bool nn_interpolation) { int fc_q; /* compute cutoff frequency */ if (dst_length >= src_length) fc_q = div_q(1, PSC_NUM_PHASES); else fc_q = div_q(dst_length, src_length * PSC_NUM_PHASES); if (nn_interpolation) dcss_scaler_nearest_neighbor_filter(use_5_taps, coef); else /* compute gaussian filter coefficients */ dcss_scaler_gaussian_filter(fc_q, use_5_taps, phase0_identity, coef); } static void dcss_scaler_write(struct dcss_scaler_ch *ch, u32 val, u32 ofs) { struct dcss_scaler *scl = ch->scl; dcss_ctxld_write(scl->ctxld, scl->ctx_id, val, ch->base_ofs + ofs); } static int dcss_scaler_ch_init_all(struct dcss_scaler *scl, unsigned long scaler_base) { struct dcss_scaler_ch *ch; int i; for (i = 0; i < 3; i++) { ch = &scl->ch[i]; ch->base_ofs = scaler_base + i * 0x400; ch->base_reg = ioremap(ch->base_ofs, SZ_4K); if (!ch->base_reg) { dev_err(scl->dev, "scaler: unable to remap ch base\n"); return -ENOMEM; } ch->scl = scl; } return 0; } int dcss_scaler_init(struct dcss_dev *dcss, unsigned long scaler_base) { struct dcss_scaler *scaler; scaler = kzalloc(sizeof(*scaler), GFP_KERNEL); if (!scaler) return -ENOMEM; dcss->scaler = scaler; scaler->dev = dcss->dev; scaler->ctxld = dcss->ctxld; scaler->ctx_id = CTX_SB_HP; if (dcss_scaler_ch_init_all(scaler, scaler_base)) { int i; for (i = 0; i < 3; i++) { if (scaler->ch[i].base_reg) iounmap(scaler->ch[i].base_reg); } kfree(scaler); return -ENOMEM; } return 0; } void dcss_scaler_exit(struct dcss_scaler *scl) { int ch_no; for (ch_no = 0; ch_no < 3; ch_no++) { struct dcss_scaler_ch *ch = &scl->ch[ch_no]; dcss_writel(0, ch->base_reg + DCSS_SCALER_CTRL); if (ch->base_reg) iounmap(ch->base_reg); } kfree(scl); } void dcss_scaler_ch_enable(struct dcss_scaler *scl, int ch_num, bool en) { struct dcss_scaler_ch *ch = &scl->ch[ch_num]; u32 scaler_ctrl; scaler_ctrl = en ? SCALER_EN | REPEAT_EN : 0; if (en) dcss_scaler_write(ch, ch->sdata_ctrl, DCSS_SCALER_SDATA_CTRL); if (ch->scaler_ctrl != scaler_ctrl) ch->scaler_ctrl_chgd = true; ch->scaler_ctrl = scaler_ctrl; } static void dcss_scaler_yuv_enable(struct dcss_scaler_ch *ch, bool en) { ch->sdata_ctrl &= ~YUV_EN; ch->sdata_ctrl |= en ? YUV_EN : 0; } static void dcss_scaler_rtr_8lines_enable(struct dcss_scaler_ch *ch, bool en) { ch->sdata_ctrl &= ~RTRAM_8LINES; ch->sdata_ctrl |= en ? RTRAM_8LINES : 0; } static void dcss_scaler_bit_depth_set(struct dcss_scaler_ch *ch, int depth) { u32 val; val = depth == 30 ? 2 : 0; dcss_scaler_write(ch, ((val << CHR_BIT_DEPTH_POS) & CHR_BIT_DEPTH_MASK) | ((val << LUM_BIT_DEPTH_POS) & LUM_BIT_DEPTH_MASK), DCSS_SCALER_BIT_DEPTH); } enum buffer_format { BUF_FMT_YUV420, BUF_FMT_YUV422, BUF_FMT_ARGB8888_YUV444, }; enum chroma_location { PSC_LOC_HORZ_0_VERT_1_OVER_4 = 0, PSC_LOC_HORZ_1_OVER_4_VERT_1_OVER_4 = 1, PSC_LOC_HORZ_0_VERT_0 = 2, PSC_LOC_HORZ_1_OVER_4_VERT_0 = 3, PSC_LOC_HORZ_0_VERT_1_OVER_2 = 4, PSC_LOC_HORZ_1_OVER_4_VERT_1_OVER_2 = 5 }; static void dcss_scaler_format_set(struct dcss_scaler_ch *ch, enum buffer_format src_fmt, enum buffer_format dst_fmt) { dcss_scaler_write(ch, src_fmt, DCSS_SCALER_SRC_FORMAT); dcss_scaler_write(ch, dst_fmt, DCSS_SCALER_DST_FORMAT); } static void dcss_scaler_res_set(struct dcss_scaler_ch *ch, int src_xres, int src_yres, int dst_xres, int dst_yres, u32 pix_format, enum buffer_format dst_format) { u32 lsrc_xres, lsrc_yres, csrc_xres, csrc_yres; u32 ldst_xres, ldst_yres, cdst_xres, cdst_yres; bool src_is_444 = true; lsrc_xres = src_xres; csrc_xres = src_xres; lsrc_yres = src_yres; csrc_yres = src_yres; ldst_xres = dst_xres; cdst_xres = dst_xres; ldst_yres = dst_yres; cdst_yres = dst_yres; if (pix_format == DRM_FORMAT_UYVY || pix_format == DRM_FORMAT_VYUY || pix_format == DRM_FORMAT_YUYV || pix_format == DRM_FORMAT_YVYU) { csrc_xres >>= 1; src_is_444 = false; } else if (pix_format == DRM_FORMAT_NV12 || pix_format == DRM_FORMAT_NV21) { csrc_xres >>= 1; csrc_yres >>= 1; src_is_444 = false; } if (dst_format == BUF_FMT_YUV422) cdst_xres >>= 1; /* for 4:4:4 to 4:2:2 conversion, source height should be 1 less */ if (src_is_444 && dst_format == BUF_FMT_YUV422) { lsrc_yres--; csrc_yres--; } dcss_scaler_write(ch, (((lsrc_yres - 1) << HEIGHT_POS) & HEIGHT_MASK) | (((lsrc_xres - 1) << WIDTH_POS) & WIDTH_MASK), DCSS_SCALER_SRC_LUM_RES); dcss_scaler_write(ch, (((csrc_yres - 1) << HEIGHT_POS) & HEIGHT_MASK) | (((csrc_xres - 1) << WIDTH_POS) & WIDTH_MASK), DCSS_SCALER_SRC_CHR_RES); dcss_scaler_write(ch, (((ldst_yres - 1) << HEIGHT_POS) & HEIGHT_MASK) | (((ldst_xres - 1) << WIDTH_POS) & WIDTH_MASK), DCSS_SCALER_DST_LUM_RES); dcss_scaler_write(ch, (((cdst_yres - 1) << HEIGHT_POS) & HEIGHT_MASK) | (((cdst_xres - 1) << WIDTH_POS) & WIDTH_MASK), DCSS_SCALER_DST_CHR_RES); } #define downscale_fp(factor, fp_pos) ((factor) << (fp_pos)) #define upscale_fp(factor, fp_pos) ((1 << (fp_pos)) / (factor)) struct dcss_scaler_factors { int downscale; int upscale; }; static const struct dcss_scaler_factors dcss_scaler_factors[] = { {3, 8}, {5, 8}, {5, 8}, }; static void dcss_scaler_fractions_set(struct dcss_scaler_ch *ch, int src_xres, int src_yres, int dst_xres, int dst_yres, u32 src_format, u32 dst_format, enum chroma_location src_chroma_loc) { int src_c_xres, src_c_yres, dst_c_xres, dst_c_yres; u32 l_vinc, l_hinc, c_vinc, c_hinc; u32 c_vstart, c_hstart; src_c_xres = src_xres; src_c_yres = src_yres; dst_c_xres = dst_xres; dst_c_yres = dst_yres; c_vstart = 0; c_hstart = 0; /* adjustments for source chroma location */ if (src_format == BUF_FMT_YUV420) { /* vertical input chroma position adjustment */ switch (src_chroma_loc) { case PSC_LOC_HORZ_0_VERT_1_OVER_4: case PSC_LOC_HORZ_1_OVER_4_VERT_1_OVER_4: /* * move chroma up to first luma line * (1/4 chroma input line spacing) */ c_vstart -= (1 << (PSC_PHASE_FRACTION_BITS - 2)); break; case PSC_LOC_HORZ_0_VERT_1_OVER_2: case PSC_LOC_HORZ_1_OVER_4_VERT_1_OVER_2: /* * move chroma up to first luma line * (1/2 chroma input line spacing) */ c_vstart -= (1 << (PSC_PHASE_FRACTION_BITS - 1)); break; default: break; } /* horizontal input chroma position adjustment */ switch (src_chroma_loc) { case PSC_LOC_HORZ_1_OVER_4_VERT_1_OVER_4: case PSC_LOC_HORZ_1_OVER_4_VERT_0: case PSC_LOC_HORZ_1_OVER_4_VERT_1_OVER_2: /* move chroma left 1/4 chroma input sample spacing */ c_hstart -= (1 << (PSC_PHASE_FRACTION_BITS - 2)); break; default: break; } } /* adjustments to chroma resolution */ if (src_format == BUF_FMT_YUV420) { src_c_xres >>= 1; src_c_yres >>= 1; } else if (src_format == BUF_FMT_YUV422) { src_c_xres >>= 1; } if (dst_format == BUF_FMT_YUV422) dst_c_xres >>= 1; l_vinc = ((src_yres << 13) + (dst_yres >> 1)) / dst_yres; c_vinc = ((src_c_yres << 13) + (dst_c_yres >> 1)) / dst_c_yres; l_hinc = ((src_xres << 13) + (dst_xres >> 1)) / dst_xres; c_hinc = ((src_c_xres << 13) + (dst_c_xres >> 1)) / dst_c_xres; /* save chroma start phase */ ch->c_vstart = c_vstart; ch->c_hstart = c_hstart; dcss_scaler_write(ch, 0, DCSS_SCALER_V_LUM_START); dcss_scaler_write(ch, l_vinc, DCSS_SCALER_V_LUM_INC); dcss_scaler_write(ch, 0, DCSS_SCALER_H_LUM_START); dcss_scaler_write(ch, l_hinc, DCSS_SCALER_H_LUM_INC); dcss_scaler_write(ch, c_vstart, DCSS_SCALER_V_CHR_START); dcss_scaler_write(ch, c_vinc, DCSS_SCALER_V_CHR_INC); dcss_scaler_write(ch, c_hstart, DCSS_SCALER_H_CHR_START); dcss_scaler_write(ch, c_hinc, DCSS_SCALER_H_CHR_INC); } int dcss_scaler_get_min_max_ratios(struct dcss_scaler *scl, int ch_num, int *min, int *max) { *min = upscale_fp(dcss_scaler_factors[ch_num].upscale, 16); *max = downscale_fp(dcss_scaler_factors[ch_num].downscale, 16); return 0; } static void dcss_scaler_program_5_coef_set(struct dcss_scaler_ch *ch, int base_addr, int coef[][PSC_NUM_TAPS]) { int i, phase; for (i = 0; i < PSC_STORED_PHASES; i++) { dcss_scaler_write(ch, ((coef[i][1] & 0xfff) << 16 | (coef[i][2] & 0xfff) << 4 | (coef[i][3] & 0xf00) >> 8), base_addr + i * sizeof(u32)); dcss_scaler_write(ch, ((coef[i][3] & 0x0ff) << 20 | (coef[i][4] & 0xfff) << 8 | (coef[i][5] & 0xff0) >> 4), base_addr + 0x40 + i * sizeof(u32)); dcss_scaler_write(ch, ((coef[i][5] & 0x00f) << 24), base_addr + 0x80 + i * sizeof(u32)); } /* reverse both phase and tap orderings */ for (phase = (PSC_NUM_PHASES >> 1) - 1; i < PSC_NUM_PHASES; i++, phase--) { dcss_scaler_write(ch, ((coef[phase][5] & 0xfff) << 16 | (coef[phase][4] & 0xfff) << 4 | (coef[phase][3] & 0xf00) >> 8), base_addr + i * sizeof(u32)); dcss_scaler_write(ch, ((coef[phase][3] & 0x0ff) << 20 | (coef[phase][2] & 0xfff) << 8 | (coef[phase][1] & 0xff0) >> 4), base_addr + 0x40 + i * sizeof(u32)); dcss_scaler_write(ch, ((coef[phase][1] & 0x00f) << 24), base_addr + 0x80 + i * sizeof(u32)); } } static void dcss_scaler_program_7_coef_set(struct dcss_scaler_ch *ch, int base_addr, int coef[][PSC_NUM_TAPS]) { int i, phase; for (i = 0; i < PSC_STORED_PHASES; i++) { dcss_scaler_write(ch, ((coef[i][0] & 0xfff) << 16 | (coef[i][1] & 0xfff) << 4 | (coef[i][2] & 0xf00) >> 8), base_addr + i * sizeof(u32)); dcss_scaler_write(ch, ((coef[i][2] & 0x0ff) << 20 | (coef[i][3] & 0xfff) << 8 | (coef[i][4] & 0xff0) >> 4), base_addr + 0x40 + i * sizeof(u32)); dcss_scaler_write(ch, ((coef[i][4] & 0x00f) << 24 | (coef[i][5] & 0xfff) << 12 | (coef[i][6] & 0xfff)), base_addr + 0x80 + i * sizeof(u32)); } /* reverse both phase and tap orderings */ for (phase = (PSC_NUM_PHASES >> 1) - 1; i < PSC_NUM_PHASES; i++, phase--) { dcss_scaler_write(ch, ((coef[phase][6] & 0xfff) << 16 | (coef[phase][5] & 0xfff) << 4 | (coef[phase][4] & 0xf00) >> 8), base_addr + i * sizeof(u32)); dcss_scaler_write(ch, ((coef[phase][4] & 0x0ff) << 20 | (coef[phase][3] & 0xfff) << 8 | (coef[phase][2] & 0xff0) >> 4), base_addr + 0x40 + i * sizeof(u32)); dcss_scaler_write(ch, ((coef[phase][2] & 0x00f) << 24 | (coef[phase][1] & 0xfff) << 12 | (coef[phase][0] & 0xfff)), base_addr + 0x80 + i * sizeof(u32)); } } static void dcss_scaler_yuv_coef_set(struct dcss_scaler_ch *ch, enum buffer_format src_format, enum buffer_format dst_format, bool use_5_taps, int src_xres, int src_yres, int dst_xres, int dst_yres) { int coef[PSC_STORED_PHASES][PSC_NUM_TAPS]; bool program_5_taps = use_5_taps || (dst_format == BUF_FMT_YUV422 && src_format == BUF_FMT_ARGB8888_YUV444); /* horizontal luma */ dcss_scaler_filter_design(src_xres, dst_xres, false, src_xres == dst_xres, coef, ch->use_nn_interpolation); dcss_scaler_program_7_coef_set(ch, DCSS_SCALER_COEF_HLUM, coef); /* vertical luma */ dcss_scaler_filter_design(src_yres, dst_yres, program_5_taps, src_yres == dst_yres, coef, ch->use_nn_interpolation); if (program_5_taps) dcss_scaler_program_5_coef_set(ch, DCSS_SCALER_COEF_VLUM, coef); else dcss_scaler_program_7_coef_set(ch, DCSS_SCALER_COEF_VLUM, coef); /* adjust chroma resolution */ if (src_format != BUF_FMT_ARGB8888_YUV444) src_xres >>= 1; if (src_format == BUF_FMT_YUV420) src_yres >>= 1; if (dst_format != BUF_FMT_ARGB8888_YUV444) dst_xres >>= 1; if (dst_format == BUF_FMT_YUV420) /* should not happen */ dst_yres >>= 1; /* horizontal chroma */ dcss_scaler_filter_design(src_xres, dst_xres, false, (src_xres == dst_xres) && (ch->c_hstart == 0), coef, ch->use_nn_interpolation); dcss_scaler_program_7_coef_set(ch, DCSS_SCALER_COEF_HCHR, coef); /* vertical chroma */ dcss_scaler_filter_design(src_yres, dst_yres, program_5_taps, (src_yres == dst_yres) && (ch->c_vstart == 0), coef, ch->use_nn_interpolation); if (program_5_taps) dcss_scaler_program_5_coef_set(ch, DCSS_SCALER_COEF_VCHR, coef); else dcss_scaler_program_7_coef_set(ch, DCSS_SCALER_COEF_VCHR, coef); } static void dcss_scaler_rgb_coef_set(struct dcss_scaler_ch *ch, int src_xres, int src_yres, int dst_xres, int dst_yres) { int coef[PSC_STORED_PHASES][PSC_NUM_TAPS]; /* horizontal RGB */ dcss_scaler_filter_design(src_xres, dst_xres, false, src_xres == dst_xres, coef, ch->use_nn_interpolation); dcss_scaler_program_7_coef_set(ch, DCSS_SCALER_COEF_HLUM, coef); /* vertical RGB */ dcss_scaler_filter_design(src_yres, dst_yres, false, src_yres == dst_yres, coef, ch->use_nn_interpolation); dcss_scaler_program_7_coef_set(ch, DCSS_SCALER_COEF_VLUM, coef); } static void dcss_scaler_set_rgb10_order(struct dcss_scaler_ch *ch, const struct drm_format_info *format) { u32 a2r10g10b10_format; if (format->is_yuv) return; ch->sdata_ctrl &= ~A2R10G10B10_FORMAT_MASK; if (format->depth != 30) return; switch (format->format) { case DRM_FORMAT_ARGB2101010: case DRM_FORMAT_XRGB2101010: a2r10g10b10_format = 0; break; case DRM_FORMAT_ABGR2101010: case DRM_FORMAT_XBGR2101010: a2r10g10b10_format = 5; break; case DRM_FORMAT_RGBA1010102: case DRM_FORMAT_RGBX1010102: a2r10g10b10_format = 6; break; case DRM_FORMAT_BGRA1010102: case DRM_FORMAT_BGRX1010102: a2r10g10b10_format = 11; break; default: a2r10g10b10_format = 0; break; } ch->sdata_ctrl |= a2r10g10b10_format << A2R10G10B10_FORMAT_POS; } void dcss_scaler_set_filter(struct dcss_scaler *scl, int ch_num, enum drm_scaling_filter scaling_filter) { struct dcss_scaler_ch *ch = &scl->ch[ch_num]; ch->use_nn_interpolation = scaling_filter == DRM_SCALING_FILTER_NEAREST_NEIGHBOR; } void dcss_scaler_setup(struct dcss_scaler *scl, int ch_num, const struct drm_format_info *format, int src_xres, int src_yres, int dst_xres, int dst_yres, u32 vrefresh_hz) { struct dcss_scaler_ch *ch = &scl->ch[ch_num]; unsigned int pixel_depth = 0; bool rtr_8line_en = false; bool use_5_taps = false; enum buffer_format src_format = BUF_FMT_ARGB8888_YUV444; enum buffer_format dst_format = BUF_FMT_ARGB8888_YUV444; u32 pix_format = format->format; if (format->is_yuv) { dcss_scaler_yuv_enable(ch, true); if (pix_format == DRM_FORMAT_NV12 || pix_format == DRM_FORMAT_NV21) { rtr_8line_en = true; src_format = BUF_FMT_YUV420; } else if (pix_format == DRM_FORMAT_UYVY || pix_format == DRM_FORMAT_VYUY || pix_format == DRM_FORMAT_YUYV || pix_format == DRM_FORMAT_YVYU) { src_format = BUF_FMT_YUV422; } use_5_taps = !rtr_8line_en; } else { dcss_scaler_yuv_enable(ch, false); pixel_depth = format->depth; } dcss_scaler_fractions_set(ch, src_xres, src_yres, dst_xres, dst_yres, src_format, dst_format, PSC_LOC_HORZ_0_VERT_1_OVER_4); if (format->is_yuv) dcss_scaler_yuv_coef_set(ch, src_format, dst_format, use_5_taps, src_xres, src_yres, dst_xres, dst_yres); else dcss_scaler_rgb_coef_set(ch, src_xres, src_yres, dst_xres, dst_yres); dcss_scaler_rtr_8lines_enable(ch, rtr_8line_en); dcss_scaler_bit_depth_set(ch, pixel_depth); dcss_scaler_set_rgb10_order(ch, format); dcss_scaler_format_set(ch, src_format, dst_format); dcss_scaler_res_set(ch, src_xres, src_yres, dst_xres, dst_yres, pix_format, dst_format); } /* This function will be called from interrupt context. */ void dcss_scaler_write_sclctrl(struct dcss_scaler *scl) { int chnum; dcss_ctxld_assert_locked(scl->ctxld); for (chnum = 0; chnum < 3; chnum++) { struct dcss_scaler_ch *ch = &scl->ch[chnum]; if (ch->scaler_ctrl_chgd) { dcss_ctxld_write_irqsafe(scl->ctxld, scl->ctx_id, ch->scaler_ctrl, ch->base_ofs + DCSS_SCALER_CTRL); ch->scaler_ctrl_chgd = false; } } }
linux-master
drivers/gpu/drm/imx/dcss/dcss-scaler.c
// SPDX-License-Identifier: GPL-2.0 /* * Copyright 2019 NXP. */ #include <drm/drm_atomic.h> #include <drm/drm_atomic_helper.h> #include <drm/drm_bridge_connector.h> #include <drm/drm_drv.h> #include <drm/drm_fbdev_dma.h> #include <drm/drm_gem_dma_helper.h> #include <drm/drm_gem_framebuffer_helper.h> #include <drm/drm_of.h> #include <drm/drm_probe_helper.h> #include <drm/drm_vblank.h> #include "dcss-dev.h" #include "dcss-kms.h" DEFINE_DRM_GEM_DMA_FOPS(dcss_cma_fops); static const struct drm_mode_config_funcs dcss_drm_mode_config_funcs = { .fb_create = drm_gem_fb_create, .atomic_check = drm_atomic_helper_check, .atomic_commit = drm_atomic_helper_commit, }; static const struct drm_driver dcss_kms_driver = { .driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_ATOMIC, DRM_GEM_DMA_DRIVER_OPS, .fops = &dcss_cma_fops, .name = "imx-dcss", .desc = "i.MX8MQ Display Subsystem", .date = "20190917", .major = 1, .minor = 0, .patchlevel = 0, }; static const struct drm_mode_config_helper_funcs dcss_mode_config_helpers = { .atomic_commit_tail = drm_atomic_helper_commit_tail_rpm, }; static void dcss_kms_mode_config_init(struct dcss_kms_dev *kms) { struct drm_mode_config *config = &kms->base.mode_config; drm_mode_config_init(&kms->base); config->min_width = 1; config->min_height = 1; config->max_width = 4096; config->max_height = 4096; config->normalize_zpos = true; config->funcs = &dcss_drm_mode_config_funcs; config->helper_private = &dcss_mode_config_helpers; } static const struct drm_encoder_funcs dcss_kms_simple_encoder_funcs = { .destroy = drm_encoder_cleanup, }; static int dcss_kms_bridge_connector_init(struct dcss_kms_dev *kms) { struct drm_device *ddev = &kms->base; struct drm_encoder *encoder = &kms->encoder; struct drm_crtc *crtc = (struct drm_crtc *)&kms->crtc; struct drm_panel *panel; struct drm_bridge *bridge; int ret; ret = drm_of_find_panel_or_bridge(ddev->dev->of_node, 0, 0, &panel, &bridge); if (ret) return ret; if (!bridge) { dev_err(ddev->dev, "No bridge found %d.\n", ret); return -ENODEV; } encoder->possible_crtcs = drm_crtc_mask(crtc); ret = drm_encoder_init(&kms->base, encoder, &dcss_kms_simple_encoder_funcs, DRM_MODE_ENCODER_NONE, NULL); if (ret) { dev_err(ddev->dev, "Failed initializing encoder %d.\n", ret); return ret; } ret = drm_bridge_attach(encoder, bridge, NULL, DRM_BRIDGE_ATTACH_NO_CONNECTOR); if (ret < 0) return ret; kms->connector = drm_bridge_connector_init(ddev, encoder); if (IS_ERR(kms->connector)) { dev_err(ddev->dev, "Unable to create bridge connector.\n"); return PTR_ERR(kms->connector); } drm_connector_attach_encoder(kms->connector, encoder); return 0; } struct dcss_kms_dev *dcss_kms_attach(struct dcss_dev *dcss) { struct dcss_kms_dev *kms; struct drm_device *drm; struct dcss_crtc *crtc; int ret; kms = devm_drm_dev_alloc(dcss->dev, &dcss_kms_driver, struct dcss_kms_dev, base); if (IS_ERR(kms)) return kms; drm = &kms->base; crtc = &kms->crtc; drm->dev_private = dcss; dcss_kms_mode_config_init(kms); ret = drm_vblank_init(drm, 1); if (ret) goto cleanup_mode_config; ret = dcss_kms_bridge_connector_init(kms); if (ret) goto cleanup_mode_config; ret = dcss_crtc_init(crtc, drm); if (ret) goto cleanup_mode_config; drm_mode_config_reset(drm); drm_kms_helper_poll_init(drm); ret = drm_dev_register(drm, 0); if (ret) goto cleanup_crtc; drm_fbdev_dma_setup(drm, 32); return kms; cleanup_crtc: drm_kms_helper_poll_fini(drm); dcss_crtc_deinit(crtc, drm); cleanup_mode_config: drm_mode_config_cleanup(drm); drm->dev_private = NULL; return ERR_PTR(ret); } void dcss_kms_detach(struct dcss_kms_dev *kms) { struct drm_device *drm = &kms->base; drm_dev_unregister(drm); drm_kms_helper_poll_fini(drm); drm_atomic_helper_shutdown(drm); drm_crtc_vblank_off(&kms->crtc.base); drm_mode_config_cleanup(drm); dcss_crtc_deinit(&kms->crtc, drm); drm->dev_private = NULL; }
linux-master
drivers/gpu/drm/imx/dcss/dcss-kms.c
// SPDX-License-Identifier: GPL-2.0 /* * Copyright 2019 NXP. */ #include <linux/device.h> #include <linux/slab.h> #include "dcss-dev.h" #define DCSS_DPR_SYSTEM_CTRL0 0x000 #define RUN_EN BIT(0) #define SOFT_RESET BIT(1) #define REPEAT_EN BIT(2) #define SHADOW_LOAD_EN BIT(3) #define SW_SHADOW_LOAD_SEL BIT(4) #define BCMD2AXI_MSTR_ID_CTRL BIT(16) #define DCSS_DPR_IRQ_MASK 0x020 #define DCSS_DPR_IRQ_MASK_STATUS 0x030 #define DCSS_DPR_IRQ_NONMASK_STATUS 0x040 #define IRQ_DPR_CTRL_DONE BIT(0) #define IRQ_DPR_RUN BIT(1) #define IRQ_DPR_SHADOW_LOADED BIT(2) #define IRQ_AXI_READ_ERR BIT(3) #define DPR2RTR_YRGB_FIFO_OVFL BIT(4) #define DPR2RTR_UV_FIFO_OVFL BIT(5) #define DPR2RTR_FIFO_LD_BUF_RDY_YRGB_ERR BIT(6) #define DPR2RTR_FIFO_LD_BUF_RDY_UV_ERR BIT(7) #define DCSS_DPR_MODE_CTRL0 0x050 #define RTR_3BUF_EN BIT(0) #define RTR_4LINE_BUF_EN BIT(1) #define TILE_TYPE_POS 2 #define TILE_TYPE_MASK GENMASK(4, 2) #define YUV_EN BIT(6) #define COMP_2PLANE_EN BIT(7) #define PIX_SIZE_POS 8 #define PIX_SIZE_MASK GENMASK(9, 8) #define PIX_LUMA_UV_SWAP BIT(10) #define PIX_UV_SWAP BIT(11) #define B_COMP_SEL_POS 12 #define B_COMP_SEL_MASK GENMASK(13, 12) #define G_COMP_SEL_POS 14 #define G_COMP_SEL_MASK GENMASK(15, 14) #define R_COMP_SEL_POS 16 #define R_COMP_SEL_MASK GENMASK(17, 16) #define A_COMP_SEL_POS 18 #define A_COMP_SEL_MASK GENMASK(19, 18) #define DCSS_DPR_FRAME_CTRL0 0x070 #define HFLIP_EN BIT(0) #define VFLIP_EN BIT(1) #define ROT_ENC_POS 2 #define ROT_ENC_MASK GENMASK(3, 2) #define ROT_FLIP_ORDER_EN BIT(4) #define PITCH_POS 16 #define PITCH_MASK GENMASK(31, 16) #define DCSS_DPR_FRAME_1P_CTRL0 0x090 #define DCSS_DPR_FRAME_1P_PIX_X_CTRL 0x0A0 #define DCSS_DPR_FRAME_1P_PIX_Y_CTRL 0x0B0 #define DCSS_DPR_FRAME_1P_BASE_ADDR 0x0C0 #define DCSS_DPR_FRAME_2P_CTRL0 0x0E0 #define DCSS_DPR_FRAME_2P_PIX_X_CTRL 0x0F0 #define DCSS_DPR_FRAME_2P_PIX_Y_CTRL 0x100 #define DCSS_DPR_FRAME_2P_BASE_ADDR 0x110 #define DCSS_DPR_STATUS_CTRL0 0x130 #define STATUS_MUX_SEL_MASK GENMASK(2, 0) #define STATUS_SRC_SEL_POS 16 #define STATUS_SRC_SEL_MASK GENMASK(18, 16) #define DCSS_DPR_STATUS_CTRL1 0x140 #define DCSS_DPR_RTRAM_CTRL0 0x200 #define NUM_ROWS_ACTIVE BIT(0) #define THRES_HIGH_POS 1 #define THRES_HIGH_MASK GENMASK(3, 1) #define THRES_LOW_POS 4 #define THRES_LOW_MASK GENMASK(6, 4) #define ABORT_SEL BIT(7) enum dcss_tile_type { TILE_LINEAR = 0, TILE_GPU_STANDARD, TILE_GPU_SUPER, TILE_VPU_YUV420, TILE_VPU_VP9, }; enum dcss_pix_size { PIX_SIZE_8, PIX_SIZE_16, PIX_SIZE_32, }; struct dcss_dpr_ch { struct dcss_dpr *dpr; void __iomem *base_reg; u32 base_ofs; struct drm_format_info format; enum dcss_pix_size pix_size; enum dcss_tile_type tile; bool rtram_4line_en; bool rtram_3buf_en; u32 frame_ctrl; u32 mode_ctrl; u32 sys_ctrl; u32 rtram_ctrl; bool sys_ctrl_chgd; int ch_num; int irq; }; struct dcss_dpr { struct device *dev; struct dcss_ctxld *ctxld; u32 ctx_id; struct dcss_dpr_ch ch[3]; }; static void dcss_dpr_write(struct dcss_dpr_ch *ch, u32 val, u32 ofs) { struct dcss_dpr *dpr = ch->dpr; dcss_ctxld_write(dpr->ctxld, dpr->ctx_id, val, ch->base_ofs + ofs); } static int dcss_dpr_ch_init_all(struct dcss_dpr *dpr, unsigned long dpr_base) { struct dcss_dpr_ch *ch; int i; for (i = 0; i < 3; i++) { ch = &dpr->ch[i]; ch->base_ofs = dpr_base + i * 0x1000; ch->base_reg = ioremap(ch->base_ofs, SZ_4K); if (!ch->base_reg) { dev_err(dpr->dev, "dpr: unable to remap ch %d base\n", i); return -ENOMEM; } ch->dpr = dpr; ch->ch_num = i; dcss_writel(0xff, ch->base_reg + DCSS_DPR_IRQ_MASK); } return 0; } int dcss_dpr_init(struct dcss_dev *dcss, unsigned long dpr_base) { struct dcss_dpr *dpr; dpr = kzalloc(sizeof(*dpr), GFP_KERNEL); if (!dpr) return -ENOMEM; dcss->dpr = dpr; dpr->dev = dcss->dev; dpr->ctxld = dcss->ctxld; dpr->ctx_id = CTX_SB_HP; if (dcss_dpr_ch_init_all(dpr, dpr_base)) { int i; for (i = 0; i < 3; i++) { if (dpr->ch[i].base_reg) iounmap(dpr->ch[i].base_reg); } kfree(dpr); return -ENOMEM; } return 0; } void dcss_dpr_exit(struct dcss_dpr *dpr) { int ch_no; /* stop DPR on all channels */ for (ch_no = 0; ch_no < 3; ch_no++) { struct dcss_dpr_ch *ch = &dpr->ch[ch_no]; dcss_writel(0, ch->base_reg + DCSS_DPR_SYSTEM_CTRL0); if (ch->base_reg) iounmap(ch->base_reg); } kfree(dpr); } static u32 dcss_dpr_x_pix_wide_adjust(struct dcss_dpr_ch *ch, u32 pix_wide, u32 pix_format) { u8 pix_in_64byte_map[3][5] = { /* LIN, GPU_STD, GPU_SUP, VPU_YUV420, VPU_VP9 */ { 64, 8, 8, 8, 16}, /* PIX_SIZE_8 */ { 32, 8, 8, 8, 8}, /* PIX_SIZE_16 */ { 16, 4, 4, 8, 8}, /* PIX_SIZE_32 */ }; u32 offset; u32 div_64byte_mod, pix_in_64byte; pix_in_64byte = pix_in_64byte_map[ch->pix_size][ch->tile]; div_64byte_mod = pix_wide % pix_in_64byte; offset = (div_64byte_mod == 0) ? 0 : (pix_in_64byte - div_64byte_mod); return pix_wide + offset; } static u32 dcss_dpr_y_pix_high_adjust(struct dcss_dpr_ch *ch, u32 pix_high, u32 pix_format) { u8 num_rows_buf = ch->rtram_4line_en ? 4 : 8; u32 offset, pix_y_mod; pix_y_mod = pix_high % num_rows_buf; offset = pix_y_mod ? (num_rows_buf - pix_y_mod) : 0; return pix_high + offset; } void dcss_dpr_set_res(struct dcss_dpr *dpr, int ch_num, u32 xres, u32 yres) { struct dcss_dpr_ch *ch = &dpr->ch[ch_num]; u32 pix_format = ch->format.format; u32 gap = DCSS_DPR_FRAME_2P_BASE_ADDR - DCSS_DPR_FRAME_1P_BASE_ADDR; int plane, max_planes = 1; u32 pix_x_wide, pix_y_high; if (pix_format == DRM_FORMAT_NV12 || pix_format == DRM_FORMAT_NV21) max_planes = 2; for (plane = 0; plane < max_planes; plane++) { yres = plane == 1 ? yres >> 1 : yres; pix_x_wide = dcss_dpr_x_pix_wide_adjust(ch, xres, pix_format); pix_y_high = dcss_dpr_y_pix_high_adjust(ch, yres, pix_format); dcss_dpr_write(ch, pix_x_wide, DCSS_DPR_FRAME_1P_PIX_X_CTRL + plane * gap); dcss_dpr_write(ch, pix_y_high, DCSS_DPR_FRAME_1P_PIX_Y_CTRL + plane * gap); dcss_dpr_write(ch, 2, DCSS_DPR_FRAME_1P_CTRL0 + plane * gap); } } void dcss_dpr_addr_set(struct dcss_dpr *dpr, int ch_num, u32 luma_base_addr, u32 chroma_base_addr, u16 pitch) { struct dcss_dpr_ch *ch = &dpr->ch[ch_num]; dcss_dpr_write(ch, luma_base_addr, DCSS_DPR_FRAME_1P_BASE_ADDR); dcss_dpr_write(ch, chroma_base_addr, DCSS_DPR_FRAME_2P_BASE_ADDR); ch->frame_ctrl &= ~PITCH_MASK; ch->frame_ctrl |= (((u32)pitch << PITCH_POS) & PITCH_MASK); } static void dcss_dpr_argb_comp_sel(struct dcss_dpr_ch *ch, int a_sel, int r_sel, int g_sel, int b_sel) { u32 sel; sel = ((a_sel << A_COMP_SEL_POS) & A_COMP_SEL_MASK) | ((r_sel << R_COMP_SEL_POS) & R_COMP_SEL_MASK) | ((g_sel << G_COMP_SEL_POS) & G_COMP_SEL_MASK) | ((b_sel << B_COMP_SEL_POS) & B_COMP_SEL_MASK); ch->mode_ctrl &= ~(A_COMP_SEL_MASK | R_COMP_SEL_MASK | G_COMP_SEL_MASK | B_COMP_SEL_MASK); ch->mode_ctrl |= sel; } static void dcss_dpr_pix_size_set(struct dcss_dpr_ch *ch, const struct drm_format_info *format) { u32 val; switch (format->format) { case DRM_FORMAT_NV12: case DRM_FORMAT_NV21: val = PIX_SIZE_8; break; case DRM_FORMAT_UYVY: case DRM_FORMAT_VYUY: case DRM_FORMAT_YUYV: case DRM_FORMAT_YVYU: val = PIX_SIZE_16; break; default: val = PIX_SIZE_32; break; } ch->pix_size = val; ch->mode_ctrl &= ~PIX_SIZE_MASK; ch->mode_ctrl |= ((val << PIX_SIZE_POS) & PIX_SIZE_MASK); } static void dcss_dpr_uv_swap(struct dcss_dpr_ch *ch, bool swap) { ch->mode_ctrl &= ~PIX_UV_SWAP; ch->mode_ctrl |= (swap ? PIX_UV_SWAP : 0); } static void dcss_dpr_y_uv_swap(struct dcss_dpr_ch *ch, bool swap) { ch->mode_ctrl &= ~PIX_LUMA_UV_SWAP; ch->mode_ctrl |= (swap ? PIX_LUMA_UV_SWAP : 0); } static void dcss_dpr_2plane_en(struct dcss_dpr_ch *ch, bool en) { ch->mode_ctrl &= ~COMP_2PLANE_EN; ch->mode_ctrl |= (en ? COMP_2PLANE_EN : 0); } static void dcss_dpr_yuv_en(struct dcss_dpr_ch *ch, bool en) { ch->mode_ctrl &= ~YUV_EN; ch->mode_ctrl |= (en ? YUV_EN : 0); } void dcss_dpr_enable(struct dcss_dpr *dpr, int ch_num, bool en) { struct dcss_dpr_ch *ch = &dpr->ch[ch_num]; u32 sys_ctrl; sys_ctrl = (en ? REPEAT_EN | RUN_EN : 0); if (en) { dcss_dpr_write(ch, ch->mode_ctrl, DCSS_DPR_MODE_CTRL0); dcss_dpr_write(ch, ch->frame_ctrl, DCSS_DPR_FRAME_CTRL0); dcss_dpr_write(ch, ch->rtram_ctrl, DCSS_DPR_RTRAM_CTRL0); } if (ch->sys_ctrl != sys_ctrl) ch->sys_ctrl_chgd = true; ch->sys_ctrl = sys_ctrl; } struct rgb_comp_sel { u32 drm_format; int a_sel; int r_sel; int g_sel; int b_sel; }; static struct rgb_comp_sel comp_sel_map[] = { {DRM_FORMAT_ARGB8888, 3, 2, 1, 0}, {DRM_FORMAT_XRGB8888, 3, 2, 1, 0}, {DRM_FORMAT_ABGR8888, 3, 0, 1, 2}, {DRM_FORMAT_XBGR8888, 3, 0, 1, 2}, {DRM_FORMAT_RGBA8888, 0, 3, 2, 1}, {DRM_FORMAT_RGBX8888, 0, 3, 2, 1}, {DRM_FORMAT_BGRA8888, 0, 1, 2, 3}, {DRM_FORMAT_BGRX8888, 0, 1, 2, 3}, }; static int to_comp_sel(u32 pix_fmt, int *a_sel, int *r_sel, int *g_sel, int *b_sel) { int i; for (i = 0; i < ARRAY_SIZE(comp_sel_map); i++) { if (comp_sel_map[i].drm_format == pix_fmt) { *a_sel = comp_sel_map[i].a_sel; *r_sel = comp_sel_map[i].r_sel; *g_sel = comp_sel_map[i].g_sel; *b_sel = comp_sel_map[i].b_sel; return 0; } } return -1; } static void dcss_dpr_rtram_set(struct dcss_dpr_ch *ch, u32 pix_format) { u32 val, mask; switch (pix_format) { case DRM_FORMAT_NV21: case DRM_FORMAT_NV12: ch->rtram_3buf_en = true; ch->rtram_4line_en = false; break; default: ch->rtram_3buf_en = true; ch->rtram_4line_en = true; break; } val = (ch->rtram_4line_en ? RTR_4LINE_BUF_EN : 0); val |= (ch->rtram_3buf_en ? RTR_3BUF_EN : 0); mask = RTR_4LINE_BUF_EN | RTR_3BUF_EN; ch->mode_ctrl &= ~mask; ch->mode_ctrl |= (val & mask); val = (ch->rtram_4line_en ? 0 : NUM_ROWS_ACTIVE); val |= (3 << THRES_LOW_POS) & THRES_LOW_MASK; val |= (4 << THRES_HIGH_POS) & THRES_HIGH_MASK; mask = THRES_LOW_MASK | THRES_HIGH_MASK | NUM_ROWS_ACTIVE; ch->rtram_ctrl &= ~mask; ch->rtram_ctrl |= (val & mask); } static void dcss_dpr_setup_components(struct dcss_dpr_ch *ch, const struct drm_format_info *format) { int a_sel, r_sel, g_sel, b_sel; bool uv_swap, y_uv_swap; switch (format->format) { case DRM_FORMAT_YVYU: uv_swap = true; y_uv_swap = true; break; case DRM_FORMAT_VYUY: case DRM_FORMAT_NV21: uv_swap = true; y_uv_swap = false; break; case DRM_FORMAT_YUYV: uv_swap = false; y_uv_swap = true; break; default: uv_swap = false; y_uv_swap = false; break; } dcss_dpr_uv_swap(ch, uv_swap); dcss_dpr_y_uv_swap(ch, y_uv_swap); if (!format->is_yuv) { if (!to_comp_sel(format->format, &a_sel, &r_sel, &g_sel, &b_sel)) { dcss_dpr_argb_comp_sel(ch, a_sel, r_sel, g_sel, b_sel); } else { dcss_dpr_argb_comp_sel(ch, 3, 2, 1, 0); } } else { dcss_dpr_argb_comp_sel(ch, 0, 0, 0, 0); } } static void dcss_dpr_tile_set(struct dcss_dpr_ch *ch, uint64_t modifier) { switch (ch->ch_num) { case 0: switch (modifier) { case DRM_FORMAT_MOD_LINEAR: ch->tile = TILE_LINEAR; break; case DRM_FORMAT_MOD_VIVANTE_TILED: ch->tile = TILE_GPU_STANDARD; break; case DRM_FORMAT_MOD_VIVANTE_SUPER_TILED: ch->tile = TILE_GPU_SUPER; break; default: WARN_ON(1); break; } break; case 1: case 2: ch->tile = TILE_LINEAR; break; default: WARN_ON(1); return; } ch->mode_ctrl &= ~TILE_TYPE_MASK; ch->mode_ctrl |= ((ch->tile << TILE_TYPE_POS) & TILE_TYPE_MASK); } void dcss_dpr_format_set(struct dcss_dpr *dpr, int ch_num, const struct drm_format_info *format, u64 modifier) { struct dcss_dpr_ch *ch = &dpr->ch[ch_num]; ch->format = *format; dcss_dpr_yuv_en(ch, format->is_yuv); dcss_dpr_pix_size_set(ch, format); dcss_dpr_setup_components(ch, format); dcss_dpr_2plane_en(ch, format->num_planes == 2); dcss_dpr_rtram_set(ch, format->format); dcss_dpr_tile_set(ch, modifier); } /* This function will be called from interrupt context. */ void dcss_dpr_write_sysctrl(struct dcss_dpr *dpr) { int chnum; dcss_ctxld_assert_locked(dpr->ctxld); for (chnum = 0; chnum < 3; chnum++) { struct dcss_dpr_ch *ch = &dpr->ch[chnum]; if (ch->sys_ctrl_chgd) { dcss_ctxld_write_irqsafe(dpr->ctxld, dpr->ctx_id, ch->sys_ctrl, ch->base_ofs + DCSS_DPR_SYSTEM_CTRL0); ch->sys_ctrl_chgd = false; } } } void dcss_dpr_set_rotation(struct dcss_dpr *dpr, int ch_num, u32 rotation) { struct dcss_dpr_ch *ch = &dpr->ch[ch_num]; ch->frame_ctrl &= ~(HFLIP_EN | VFLIP_EN | ROT_ENC_MASK); ch->frame_ctrl |= rotation & DRM_MODE_REFLECT_X ? HFLIP_EN : 0; ch->frame_ctrl |= rotation & DRM_MODE_REFLECT_Y ? VFLIP_EN : 0; if (rotation & DRM_MODE_ROTATE_90) ch->frame_ctrl |= 1 << ROT_ENC_POS; else if (rotation & DRM_MODE_ROTATE_180) ch->frame_ctrl |= 2 << ROT_ENC_POS; else if (rotation & DRM_MODE_ROTATE_270) ch->frame_ctrl |= 3 << ROT_ENC_POS; }
linux-master
drivers/gpu/drm/imx/dcss/dcss-dpr.c
// SPDX-License-Identifier: GPL-2.0 /* * Copyright 2019 NXP. */ #include <linux/clk.h> #include <linux/delay.h> #include <linux/interrupt.h> #include <linux/of.h> #include <linux/platform_device.h> #include <linux/slab.h> #include "dcss-dev.h" #define DCSS_DTG_TC_CONTROL_STATUS 0x00 #define CH3_EN BIT(0) #define CH2_EN BIT(1) #define CH1_EN BIT(2) #define OVL_DATA_MODE BIT(3) #define BLENDER_VIDEO_ALPHA_SEL BIT(7) #define DTG_START BIT(8) #define DBY_MODE_EN BIT(9) #define CH1_ALPHA_SEL BIT(10) #define CSS_PIX_COMP_SWAP_POS 12 #define CSS_PIX_COMP_SWAP_MASK GENMASK(14, 12) #define DEFAULT_FG_ALPHA_POS 24 #define DEFAULT_FG_ALPHA_MASK GENMASK(31, 24) #define DCSS_DTG_TC_DTG 0x04 #define DCSS_DTG_TC_DISP_TOP 0x08 #define DCSS_DTG_TC_DISP_BOT 0x0C #define DCSS_DTG_TC_CH1_TOP 0x10 #define DCSS_DTG_TC_CH1_BOT 0x14 #define DCSS_DTG_TC_CH2_TOP 0x18 #define DCSS_DTG_TC_CH2_BOT 0x1C #define DCSS_DTG_TC_CH3_TOP 0x20 #define DCSS_DTG_TC_CH3_BOT 0x24 #define TC_X_POS 0 #define TC_X_MASK GENMASK(12, 0) #define TC_Y_POS 16 #define TC_Y_MASK GENMASK(28, 16) #define DCSS_DTG_TC_CTXLD 0x28 #define TC_CTXLD_DB_Y_POS 0 #define TC_CTXLD_DB_Y_MASK GENMASK(12, 0) #define TC_CTXLD_SB_Y_POS 16 #define TC_CTXLD_SB_Y_MASK GENMASK(28, 16) #define DCSS_DTG_TC_CH1_BKRND 0x2C #define DCSS_DTG_TC_CH2_BKRND 0x30 #define BKRND_R_Y_COMP_POS 20 #define BKRND_R_Y_COMP_MASK GENMASK(29, 20) #define BKRND_G_U_COMP_POS 10 #define BKRND_G_U_COMP_MASK GENMASK(19, 10) #define BKRND_B_V_COMP_POS 0 #define BKRND_B_V_COMP_MASK GENMASK(9, 0) #define DCSS_DTG_BLENDER_DBY_RANGEINV 0x38 #define DCSS_DTG_BLENDER_DBY_RANGEMIN 0x3C #define DCSS_DTG_BLENDER_DBY_BDP 0x40 #define DCSS_DTG_BLENDER_BKRND_I 0x44 #define DCSS_DTG_BLENDER_BKRND_P 0x48 #define DCSS_DTG_BLENDER_BKRND_T 0x4C #define DCSS_DTG_LINE0_INT 0x50 #define DCSS_DTG_LINE1_INT 0x54 #define DCSS_DTG_BG_ALPHA_DEFAULT 0x58 #define DCSS_DTG_INT_STATUS 0x5C #define DCSS_DTG_INT_CONTROL 0x60 #define DCSS_DTG_TC_CH3_BKRND 0x64 #define DCSS_DTG_INT_MASK 0x68 #define LINE0_IRQ BIT(0) #define LINE1_IRQ BIT(1) #define LINE2_IRQ BIT(2) #define LINE3_IRQ BIT(3) #define DCSS_DTG_LINE2_INT 0x6C #define DCSS_DTG_LINE3_INT 0x70 #define DCSS_DTG_DBY_OL 0x74 #define DCSS_DTG_DBY_BL 0x78 #define DCSS_DTG_DBY_EL 0x7C struct dcss_dtg { struct device *dev; struct dcss_ctxld *ctxld; void __iomem *base_reg; u32 base_ofs; u32 ctx_id; bool in_use; u32 dis_ulc_x; u32 dis_ulc_y; u32 control_status; u32 alpha; u32 alpha_cfg; int ctxld_kick_irq; bool ctxld_kick_irq_en; }; static void dcss_dtg_write(struct dcss_dtg *dtg, u32 val, u32 ofs) { if (!dtg->in_use) dcss_writel(val, dtg->base_reg + ofs); dcss_ctxld_write(dtg->ctxld, dtg->ctx_id, val, dtg->base_ofs + ofs); } static irqreturn_t dcss_dtg_irq_handler(int irq, void *data) { struct dcss_dtg *dtg = data; u32 status; status = dcss_readl(dtg->base_reg + DCSS_DTG_INT_STATUS); if (!(status & LINE0_IRQ)) return IRQ_NONE; dcss_ctxld_kick(dtg->ctxld); dcss_writel(status & LINE0_IRQ, dtg->base_reg + DCSS_DTG_INT_CONTROL); return IRQ_HANDLED; } static int dcss_dtg_irq_config(struct dcss_dtg *dtg, struct platform_device *pdev) { int ret; dtg->ctxld_kick_irq = platform_get_irq_byname(pdev, "ctxld_kick"); if (dtg->ctxld_kick_irq < 0) return dtg->ctxld_kick_irq; dcss_update(0, LINE0_IRQ | LINE1_IRQ, dtg->base_reg + DCSS_DTG_INT_MASK); ret = request_irq(dtg->ctxld_kick_irq, dcss_dtg_irq_handler, 0, "dcss_ctxld_kick", dtg); if (ret) { dev_err(dtg->dev, "dtg: irq request failed.\n"); return ret; } disable_irq(dtg->ctxld_kick_irq); dtg->ctxld_kick_irq_en = false; return 0; } int dcss_dtg_init(struct dcss_dev *dcss, unsigned long dtg_base) { int ret = 0; struct dcss_dtg *dtg; dtg = kzalloc(sizeof(*dtg), GFP_KERNEL); if (!dtg) return -ENOMEM; dcss->dtg = dtg; dtg->dev = dcss->dev; dtg->ctxld = dcss->ctxld; dtg->base_reg = ioremap(dtg_base, SZ_4K); if (!dtg->base_reg) { dev_err(dcss->dev, "dtg: unable to remap dtg base\n"); ret = -ENOMEM; goto err_ioremap; } dtg->base_ofs = dtg_base; dtg->ctx_id = CTX_DB; dtg->alpha = 255; dtg->control_status |= OVL_DATA_MODE | BLENDER_VIDEO_ALPHA_SEL | ((dtg->alpha << DEFAULT_FG_ALPHA_POS) & DEFAULT_FG_ALPHA_MASK); ret = dcss_dtg_irq_config(dtg, to_platform_device(dcss->dev)); if (ret) goto err_irq; return 0; err_irq: iounmap(dtg->base_reg); err_ioremap: kfree(dtg); return ret; } void dcss_dtg_exit(struct dcss_dtg *dtg) { free_irq(dtg->ctxld_kick_irq, dtg); if (dtg->base_reg) iounmap(dtg->base_reg); kfree(dtg); } void dcss_dtg_sync_set(struct dcss_dtg *dtg, struct videomode *vm) { struct dcss_dev *dcss = dcss_drv_dev_to_dcss(dtg->dev); u16 dtg_lrc_x, dtg_lrc_y; u16 dis_ulc_x, dis_ulc_y; u16 dis_lrc_x, dis_lrc_y; u32 sb_ctxld_trig, db_ctxld_trig; u32 pixclock = vm->pixelclock; u32 actual_clk; dtg_lrc_x = vm->hfront_porch + vm->hback_porch + vm->hsync_len + vm->hactive - 1; dtg_lrc_y = vm->vfront_porch + vm->vback_porch + vm->vsync_len + vm->vactive - 1; dis_ulc_x = vm->hsync_len + vm->hback_porch - 1; dis_ulc_y = vm->vsync_len + vm->vfront_porch + vm->vback_porch - 1; dis_lrc_x = vm->hsync_len + vm->hback_porch + vm->hactive - 1; dis_lrc_y = vm->vsync_len + vm->vfront_porch + vm->vback_porch + vm->vactive - 1; clk_disable_unprepare(dcss->pix_clk); clk_set_rate(dcss->pix_clk, vm->pixelclock); clk_prepare_enable(dcss->pix_clk); actual_clk = clk_get_rate(dcss->pix_clk); if (pixclock != actual_clk) { dev_info(dtg->dev, "Pixel clock set to %u kHz instead of %u kHz.\n", (actual_clk / 1000), (pixclock / 1000)); } dcss_dtg_write(dtg, ((dtg_lrc_y << TC_Y_POS) | dtg_lrc_x), DCSS_DTG_TC_DTG); dcss_dtg_write(dtg, ((dis_ulc_y << TC_Y_POS) | dis_ulc_x), DCSS_DTG_TC_DISP_TOP); dcss_dtg_write(dtg, ((dis_lrc_y << TC_Y_POS) | dis_lrc_x), DCSS_DTG_TC_DISP_BOT); dtg->dis_ulc_x = dis_ulc_x; dtg->dis_ulc_y = dis_ulc_y; sb_ctxld_trig = ((0 * dis_lrc_y / 100) << TC_CTXLD_SB_Y_POS) & TC_CTXLD_SB_Y_MASK; db_ctxld_trig = ((99 * dis_lrc_y / 100) << TC_CTXLD_DB_Y_POS) & TC_CTXLD_DB_Y_MASK; dcss_dtg_write(dtg, sb_ctxld_trig | db_ctxld_trig, DCSS_DTG_TC_CTXLD); /* vblank trigger */ dcss_dtg_write(dtg, 0, DCSS_DTG_LINE1_INT); /* CTXLD trigger */ dcss_dtg_write(dtg, ((90 * dis_lrc_y) / 100) << 16, DCSS_DTG_LINE0_INT); } void dcss_dtg_plane_pos_set(struct dcss_dtg *dtg, int ch_num, int px, int py, int pw, int ph) { u16 p_ulc_x, p_ulc_y; u16 p_lrc_x, p_lrc_y; p_ulc_x = dtg->dis_ulc_x + px; p_ulc_y = dtg->dis_ulc_y + py; p_lrc_x = p_ulc_x + pw; p_lrc_y = p_ulc_y + ph; if (!px && !py && !pw && !ph) { dcss_dtg_write(dtg, 0, DCSS_DTG_TC_CH1_TOP + 0x8 * ch_num); dcss_dtg_write(dtg, 0, DCSS_DTG_TC_CH1_BOT + 0x8 * ch_num); } else { dcss_dtg_write(dtg, ((p_ulc_y << TC_Y_POS) | p_ulc_x), DCSS_DTG_TC_CH1_TOP + 0x8 * ch_num); dcss_dtg_write(dtg, ((p_lrc_y << TC_Y_POS) | p_lrc_x), DCSS_DTG_TC_CH1_BOT + 0x8 * ch_num); } } bool dcss_dtg_global_alpha_changed(struct dcss_dtg *dtg, int ch_num, int alpha) { if (ch_num) return false; return alpha != dtg->alpha; } void dcss_dtg_plane_alpha_set(struct dcss_dtg *dtg, int ch_num, const struct drm_format_info *format, int alpha) { /* we care about alpha only when channel 0 is concerned */ if (ch_num) return; /* * Use global alpha if pixel format does not have alpha channel or the * user explicitly chose to use global alpha (i.e. alpha is not OPAQUE). */ if (!format->has_alpha || alpha != 255) dtg->alpha_cfg = (alpha << DEFAULT_FG_ALPHA_POS) & DEFAULT_FG_ALPHA_MASK; else /* use per-pixel alpha otherwise */ dtg->alpha_cfg = CH1_ALPHA_SEL; dtg->alpha = alpha; } void dcss_dtg_css_set(struct dcss_dtg *dtg) { dtg->control_status |= (0x5 << CSS_PIX_COMP_SWAP_POS) & CSS_PIX_COMP_SWAP_MASK; } void dcss_dtg_enable(struct dcss_dtg *dtg) { dtg->control_status |= DTG_START; dtg->control_status &= ~(CH1_ALPHA_SEL | DEFAULT_FG_ALPHA_MASK); dtg->control_status |= dtg->alpha_cfg; dcss_dtg_write(dtg, dtg->control_status, DCSS_DTG_TC_CONTROL_STATUS); dtg->in_use = true; } void dcss_dtg_shutoff(struct dcss_dtg *dtg) { dtg->control_status &= ~DTG_START; dcss_writel(dtg->control_status, dtg->base_reg + DCSS_DTG_TC_CONTROL_STATUS); dtg->in_use = false; } bool dcss_dtg_is_enabled(struct dcss_dtg *dtg) { return dtg->in_use; } void dcss_dtg_ch_enable(struct dcss_dtg *dtg, int ch_num, bool en) { u32 ch_en_map[] = {CH1_EN, CH2_EN, CH3_EN}; u32 control_status; control_status = dtg->control_status & ~ch_en_map[ch_num]; control_status |= en ? ch_en_map[ch_num] : 0; control_status &= ~(CH1_ALPHA_SEL | DEFAULT_FG_ALPHA_MASK); control_status |= dtg->alpha_cfg; if (dtg->control_status != control_status) dcss_dtg_write(dtg, control_status, DCSS_DTG_TC_CONTROL_STATUS); dtg->control_status = control_status; } void dcss_dtg_vblank_irq_enable(struct dcss_dtg *dtg, bool en) { u32 status; u32 mask = en ? LINE1_IRQ : 0; if (en) { status = dcss_readl(dtg->base_reg + DCSS_DTG_INT_STATUS); dcss_writel(status & LINE1_IRQ, dtg->base_reg + DCSS_DTG_INT_CONTROL); } dcss_update(mask, LINE1_IRQ, dtg->base_reg + DCSS_DTG_INT_MASK); } void dcss_dtg_ctxld_kick_irq_enable(struct dcss_dtg *dtg, bool en) { u32 status; u32 mask = en ? LINE0_IRQ : 0; if (en) { status = dcss_readl(dtg->base_reg + DCSS_DTG_INT_STATUS); if (!dtg->ctxld_kick_irq_en) { dcss_writel(status & LINE0_IRQ, dtg->base_reg + DCSS_DTG_INT_CONTROL); enable_irq(dtg->ctxld_kick_irq); dtg->ctxld_kick_irq_en = true; dcss_update(mask, LINE0_IRQ, dtg->base_reg + DCSS_DTG_INT_MASK); } return; } if (!dtg->ctxld_kick_irq_en) return; disable_irq_nosync(dtg->ctxld_kick_irq); dtg->ctxld_kick_irq_en = false; dcss_update(mask, LINE0_IRQ, dtg->base_reg + DCSS_DTG_INT_MASK); } void dcss_dtg_vblank_irq_clear(struct dcss_dtg *dtg) { dcss_update(LINE1_IRQ, LINE1_IRQ, dtg->base_reg + DCSS_DTG_INT_CONTROL); } bool dcss_dtg_vblank_irq_valid(struct dcss_dtg *dtg) { return !!(dcss_readl(dtg->base_reg + DCSS_DTG_INT_STATUS) & LINE1_IRQ); }
linux-master
drivers/gpu/drm/imx/dcss/dcss-dtg.c
// SPDX-License-Identifier: GPL-2.0 /* * Copyright 2019 NXP. */ #include <linux/clk.h> #include <linux/of.h> #include <linux/of_graph.h> #include <linux/platform_device.h> #include <linux/pm_runtime.h> #include <linux/slab.h> #include <drm/drm_bridge_connector.h> #include <drm/drm_device.h> #include <drm/drm_modeset_helper.h> #include "dcss-dev.h" #include "dcss-kms.h" static void dcss_clocks_enable(struct dcss_dev *dcss) { clk_prepare_enable(dcss->axi_clk); clk_prepare_enable(dcss->apb_clk); clk_prepare_enable(dcss->rtrm_clk); clk_prepare_enable(dcss->dtrc_clk); clk_prepare_enable(dcss->pix_clk); } static void dcss_clocks_disable(struct dcss_dev *dcss) { clk_disable_unprepare(dcss->pix_clk); clk_disable_unprepare(dcss->dtrc_clk); clk_disable_unprepare(dcss->rtrm_clk); clk_disable_unprepare(dcss->apb_clk); clk_disable_unprepare(dcss->axi_clk); } static void dcss_disable_dtg_and_ss_cb(void *data) { struct dcss_dev *dcss = data; dcss->disable_callback = NULL; dcss_ss_shutoff(dcss->ss); dcss_dtg_shutoff(dcss->dtg); complete(&dcss->disable_completion); } void dcss_disable_dtg_and_ss(struct dcss_dev *dcss) { dcss->disable_callback = dcss_disable_dtg_and_ss_cb; } void dcss_enable_dtg_and_ss(struct dcss_dev *dcss) { if (dcss->disable_callback) dcss->disable_callback = NULL; dcss_dtg_enable(dcss->dtg); dcss_ss_enable(dcss->ss); } static int dcss_submodules_init(struct dcss_dev *dcss) { int ret = 0; u32 base_addr = dcss->start_addr; const struct dcss_type_data *devtype = dcss->devtype; dcss_clocks_enable(dcss); ret = dcss_blkctl_init(dcss, base_addr + devtype->blkctl_ofs); if (ret) return ret; ret = dcss_ctxld_init(dcss, base_addr + devtype->ctxld_ofs); if (ret) goto ctxld_err; ret = dcss_dtg_init(dcss, base_addr + devtype->dtg_ofs); if (ret) goto dtg_err; ret = dcss_ss_init(dcss, base_addr + devtype->ss_ofs); if (ret) goto ss_err; ret = dcss_dpr_init(dcss, base_addr + devtype->dpr_ofs); if (ret) goto dpr_err; ret = dcss_scaler_init(dcss, base_addr + devtype->scaler_ofs); if (ret) goto scaler_err; dcss_clocks_disable(dcss); return 0; scaler_err: dcss_dpr_exit(dcss->dpr); dpr_err: dcss_ss_exit(dcss->ss); ss_err: dcss_dtg_exit(dcss->dtg); dtg_err: dcss_ctxld_exit(dcss->ctxld); ctxld_err: dcss_blkctl_exit(dcss->blkctl); dcss_clocks_disable(dcss); return ret; } static void dcss_submodules_stop(struct dcss_dev *dcss) { dcss_clocks_enable(dcss); dcss_scaler_exit(dcss->scaler); dcss_dpr_exit(dcss->dpr); dcss_ss_exit(dcss->ss); dcss_dtg_exit(dcss->dtg); dcss_ctxld_exit(dcss->ctxld); dcss_blkctl_exit(dcss->blkctl); dcss_clocks_disable(dcss); } static int dcss_clks_init(struct dcss_dev *dcss) { int i; struct { const char *id; struct clk **clk; } clks[] = { {"apb", &dcss->apb_clk}, {"axi", &dcss->axi_clk}, {"pix", &dcss->pix_clk}, {"rtrm", &dcss->rtrm_clk}, {"dtrc", &dcss->dtrc_clk}, }; for (i = 0; i < ARRAY_SIZE(clks); i++) { *clks[i].clk = devm_clk_get(dcss->dev, clks[i].id); if (IS_ERR(*clks[i].clk)) { dev_err(dcss->dev, "failed to get %s clock\n", clks[i].id); return PTR_ERR(*clks[i].clk); } } return 0; } static void dcss_clks_release(struct dcss_dev *dcss) { devm_clk_put(dcss->dev, dcss->dtrc_clk); devm_clk_put(dcss->dev, dcss->rtrm_clk); devm_clk_put(dcss->dev, dcss->pix_clk); devm_clk_put(dcss->dev, dcss->axi_clk); devm_clk_put(dcss->dev, dcss->apb_clk); } struct dcss_dev *dcss_dev_create(struct device *dev, bool hdmi_output) { struct platform_device *pdev = to_platform_device(dev); int ret; struct resource *res; struct dcss_dev *dcss; const struct dcss_type_data *devtype; devtype = of_device_get_match_data(dev); if (!devtype) { dev_err(dev, "no device match found\n"); return ERR_PTR(-ENODEV); } res = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!res) { dev_err(dev, "cannot get memory resource\n"); return ERR_PTR(-EINVAL); } dcss = kzalloc(sizeof(*dcss), GFP_KERNEL); if (!dcss) return ERR_PTR(-ENOMEM); dcss->dev = dev; dcss->devtype = devtype; dcss->hdmi_output = hdmi_output; ret = dcss_clks_init(dcss); if (ret) { dev_err(dev, "clocks initialization failed\n"); goto err; } dcss->of_port = of_graph_get_port_by_id(dev->of_node, 0); if (!dcss->of_port) { dev_err(dev, "no port@0 node in %pOF\n", dev->of_node); ret = -ENODEV; goto clks_err; } dcss->start_addr = res->start; ret = dcss_submodules_init(dcss); if (ret) { of_node_put(dcss->of_port); dev_err(dev, "submodules initialization failed\n"); goto clks_err; } init_completion(&dcss->disable_completion); pm_runtime_set_autosuspend_delay(dev, 100); pm_runtime_use_autosuspend(dev); pm_runtime_set_suspended(dev); pm_runtime_allow(dev); pm_runtime_enable(dev); return dcss; clks_err: dcss_clks_release(dcss); err: kfree(dcss); return ERR_PTR(ret); } void dcss_dev_destroy(struct dcss_dev *dcss) { if (!pm_runtime_suspended(dcss->dev)) { dcss_ctxld_suspend(dcss->ctxld); dcss_clocks_disable(dcss); } of_node_put(dcss->of_port); pm_runtime_disable(dcss->dev); dcss_submodules_stop(dcss); dcss_clks_release(dcss); kfree(dcss); } static int dcss_dev_suspend(struct device *dev) { struct dcss_dev *dcss = dcss_drv_dev_to_dcss(dev); struct drm_device *ddev = dcss_drv_dev_to_drm(dev); int ret; drm_mode_config_helper_suspend(ddev); if (pm_runtime_suspended(dev)) return 0; ret = dcss_ctxld_suspend(dcss->ctxld); if (ret) return ret; dcss_clocks_disable(dcss); return 0; } static int dcss_dev_resume(struct device *dev) { struct dcss_dev *dcss = dcss_drv_dev_to_dcss(dev); struct drm_device *ddev = dcss_drv_dev_to_drm(dev); if (pm_runtime_suspended(dev)) { drm_mode_config_helper_resume(ddev); return 0; } dcss_clocks_enable(dcss); dcss_blkctl_cfg(dcss->blkctl); dcss_ctxld_resume(dcss->ctxld); drm_mode_config_helper_resume(ddev); return 0; } static int dcss_dev_runtime_suspend(struct device *dev) { struct dcss_dev *dcss = dcss_drv_dev_to_dcss(dev); int ret; ret = dcss_ctxld_suspend(dcss->ctxld); if (ret) return ret; dcss_clocks_disable(dcss); return 0; } static int dcss_dev_runtime_resume(struct device *dev) { struct dcss_dev *dcss = dcss_drv_dev_to_dcss(dev); dcss_clocks_enable(dcss); dcss_blkctl_cfg(dcss->blkctl); dcss_ctxld_resume(dcss->ctxld); return 0; } EXPORT_GPL_DEV_PM_OPS(dcss_dev_pm_ops) = { RUNTIME_PM_OPS(dcss_dev_runtime_suspend, dcss_dev_runtime_resume, NULL) SYSTEM_SLEEP_PM_OPS(dcss_dev_suspend, dcss_dev_resume) };
linux-master
drivers/gpu/drm/imx/dcss/dcss-dev.c
// SPDX-License-Identifier: GPL-2.0 /* * Copyright 2019 NXP. */ #include <drm/drm_atomic.h> #include <drm/drm_atomic_helper.h> #include <drm/drm_vblank.h> #include <linux/platform_device.h> #include <linux/pm_runtime.h> #include "dcss-dev.h" #include "dcss-kms.h" static int dcss_enable_vblank(struct drm_crtc *crtc) { struct dcss_crtc *dcss_crtc = container_of(crtc, struct dcss_crtc, base); struct dcss_dev *dcss = crtc->dev->dev_private; dcss_dtg_vblank_irq_enable(dcss->dtg, true); dcss_dtg_ctxld_kick_irq_enable(dcss->dtg, true); enable_irq(dcss_crtc->irq); return 0; } static void dcss_disable_vblank(struct drm_crtc *crtc) { struct dcss_crtc *dcss_crtc = container_of(crtc, struct dcss_crtc, base); struct dcss_dev *dcss = dcss_crtc->base.dev->dev_private; disable_irq_nosync(dcss_crtc->irq); dcss_dtg_vblank_irq_enable(dcss->dtg, false); if (dcss_crtc->disable_ctxld_kick_irq) dcss_dtg_ctxld_kick_irq_enable(dcss->dtg, false); } static const struct drm_crtc_funcs dcss_crtc_funcs = { .set_config = drm_atomic_helper_set_config, .destroy = drm_crtc_cleanup, .page_flip = drm_atomic_helper_page_flip, .reset = drm_atomic_helper_crtc_reset, .atomic_duplicate_state = drm_atomic_helper_crtc_duplicate_state, .atomic_destroy_state = drm_atomic_helper_crtc_destroy_state, .enable_vblank = dcss_enable_vblank, .disable_vblank = dcss_disable_vblank, }; static void dcss_crtc_atomic_begin(struct drm_crtc *crtc, struct drm_atomic_state *state) { drm_crtc_vblank_on(crtc); } static void dcss_crtc_atomic_flush(struct drm_crtc *crtc, struct drm_atomic_state *state) { struct dcss_crtc *dcss_crtc = container_of(crtc, struct dcss_crtc, base); struct dcss_dev *dcss = dcss_crtc->base.dev->dev_private; spin_lock_irq(&crtc->dev->event_lock); if (crtc->state->event) { WARN_ON(drm_crtc_vblank_get(crtc)); drm_crtc_arm_vblank_event(crtc, crtc->state->event); crtc->state->event = NULL; } spin_unlock_irq(&crtc->dev->event_lock); if (dcss_dtg_is_enabled(dcss->dtg)) dcss_ctxld_enable(dcss->ctxld); } static void dcss_crtc_atomic_enable(struct drm_crtc *crtc, struct drm_atomic_state *state) { struct drm_crtc_state *old_crtc_state = drm_atomic_get_old_crtc_state(state, crtc); struct dcss_crtc *dcss_crtc = container_of(crtc, struct dcss_crtc, base); struct dcss_dev *dcss = dcss_crtc->base.dev->dev_private; struct drm_display_mode *mode = &crtc->state->adjusted_mode; struct drm_display_mode *old_mode = &old_crtc_state->adjusted_mode; struct videomode vm; drm_display_mode_to_videomode(mode, &vm); pm_runtime_get_sync(dcss->dev); vm.pixelclock = mode->crtc_clock * 1000; dcss_ss_subsam_set(dcss->ss); dcss_dtg_css_set(dcss->dtg); if (!drm_mode_equal(mode, old_mode) || !old_crtc_state->active) { dcss_dtg_sync_set(dcss->dtg, &vm); dcss_ss_sync_set(dcss->ss, &vm, mode->flags & DRM_MODE_FLAG_PHSYNC, mode->flags & DRM_MODE_FLAG_PVSYNC); } dcss_enable_dtg_and_ss(dcss); dcss_ctxld_enable(dcss->ctxld); /* Allow CTXLD kick interrupt to be disabled when VBLANK is disabled. */ dcss_crtc->disable_ctxld_kick_irq = true; } static void dcss_crtc_atomic_disable(struct drm_crtc *crtc, struct drm_atomic_state *state) { struct drm_crtc_state *old_crtc_state = drm_atomic_get_old_crtc_state(state, crtc); struct dcss_crtc *dcss_crtc = container_of(crtc, struct dcss_crtc, base); struct dcss_dev *dcss = dcss_crtc->base.dev->dev_private; struct drm_display_mode *mode = &crtc->state->adjusted_mode; struct drm_display_mode *old_mode = &old_crtc_state->adjusted_mode; drm_atomic_helper_disable_planes_on_crtc(old_crtc_state, false); spin_lock_irq(&crtc->dev->event_lock); if (crtc->state->event) { drm_crtc_send_vblank_event(crtc, crtc->state->event); crtc->state->event = NULL; } spin_unlock_irq(&crtc->dev->event_lock); dcss_dtg_ctxld_kick_irq_enable(dcss->dtg, true); reinit_completion(&dcss->disable_completion); dcss_disable_dtg_and_ss(dcss); dcss_ctxld_enable(dcss->ctxld); if (!drm_mode_equal(mode, old_mode) || !crtc->state->active) if (!wait_for_completion_timeout(&dcss->disable_completion, msecs_to_jiffies(100))) dev_err(dcss->dev, "Shutting off DTG timed out.\n"); /* * Do not shut off CTXLD kick interrupt when shutting VBLANK off. It * will be needed to commit the last changes, before going to suspend. */ dcss_crtc->disable_ctxld_kick_irq = false; drm_crtc_vblank_off(crtc); pm_runtime_mark_last_busy(dcss->dev); pm_runtime_put_autosuspend(dcss->dev); } static const struct drm_crtc_helper_funcs dcss_helper_funcs = { .atomic_begin = dcss_crtc_atomic_begin, .atomic_flush = dcss_crtc_atomic_flush, .atomic_enable = dcss_crtc_atomic_enable, .atomic_disable = dcss_crtc_atomic_disable, }; static irqreturn_t dcss_crtc_irq_handler(int irq, void *dev_id) { struct dcss_crtc *dcss_crtc = dev_id; struct dcss_dev *dcss = dcss_crtc->base.dev->dev_private; if (!dcss_dtg_vblank_irq_valid(dcss->dtg)) return IRQ_NONE; if (dcss_ctxld_is_flushed(dcss->ctxld)) drm_crtc_handle_vblank(&dcss_crtc->base); dcss_dtg_vblank_irq_clear(dcss->dtg); return IRQ_HANDLED; } int dcss_crtc_init(struct dcss_crtc *crtc, struct drm_device *drm) { struct dcss_dev *dcss = drm->dev_private; struct platform_device *pdev = to_platform_device(dcss->dev); int ret; crtc->plane[0] = dcss_plane_init(drm, drm_crtc_mask(&crtc->base), DRM_PLANE_TYPE_PRIMARY, 0); if (IS_ERR(crtc->plane[0])) return PTR_ERR(crtc->plane[0]); crtc->base.port = dcss->of_port; drm_crtc_helper_add(&crtc->base, &dcss_helper_funcs); ret = drm_crtc_init_with_planes(drm, &crtc->base, &crtc->plane[0]->base, NULL, &dcss_crtc_funcs, NULL); if (ret) { dev_err(dcss->dev, "failed to init crtc\n"); return ret; } crtc->irq = platform_get_irq_byname(pdev, "vblank"); if (crtc->irq < 0) return crtc->irq; ret = request_irq(crtc->irq, dcss_crtc_irq_handler, 0, "dcss_drm", crtc); if (ret) { dev_err(dcss->dev, "irq request failed with %d.\n", ret); return ret; } disable_irq(crtc->irq); return 0; } void dcss_crtc_deinit(struct dcss_crtc *crtc, struct drm_device *drm) { free_irq(crtc->irq, crtc); }
linux-master
drivers/gpu/drm/imx/dcss/dcss-crtc.c
// SPDX-License-Identifier: GPL-2.0 /* * Copyright 2019 NXP. */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/of.h> #include <linux/platform_device.h> #include <drm/drm_module.h> #include <drm/drm_of.h> #include "dcss-dev.h" #include "dcss-kms.h" struct dcss_drv { struct dcss_dev *dcss; struct dcss_kms_dev *kms; }; struct dcss_dev *dcss_drv_dev_to_dcss(struct device *dev) { struct dcss_drv *mdrv = dev_get_drvdata(dev); return mdrv ? mdrv->dcss : NULL; } struct drm_device *dcss_drv_dev_to_drm(struct device *dev) { struct dcss_drv *mdrv = dev_get_drvdata(dev); return mdrv ? &mdrv->kms->base : NULL; } static int dcss_drv_platform_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; struct device_node *remote; struct dcss_drv *mdrv; int err = 0; bool hdmi_output = true; if (!dev->of_node) return -ENODEV; remote = of_graph_get_remote_node(dev->of_node, 0, 0); if (!remote) return -ENODEV; hdmi_output = !of_device_is_compatible(remote, "fsl,imx8mq-nwl-dsi"); of_node_put(remote); mdrv = kzalloc(sizeof(*mdrv), GFP_KERNEL); if (!mdrv) return -ENOMEM; mdrv->dcss = dcss_dev_create(dev, hdmi_output); if (IS_ERR(mdrv->dcss)) { err = PTR_ERR(mdrv->dcss); goto err; } dev_set_drvdata(dev, mdrv); mdrv->kms = dcss_kms_attach(mdrv->dcss); if (IS_ERR(mdrv->kms)) { err = PTR_ERR(mdrv->kms); dev_err_probe(dev, err, "Failed to initialize KMS\n"); goto dcss_shutoff; } return 0; dcss_shutoff: dcss_dev_destroy(mdrv->dcss); err: kfree(mdrv); return err; } static int dcss_drv_platform_remove(struct platform_device *pdev) { struct dcss_drv *mdrv = dev_get_drvdata(&pdev->dev); dcss_kms_detach(mdrv->kms); dcss_dev_destroy(mdrv->dcss); kfree(mdrv); return 0; } static struct dcss_type_data dcss_types[] = { [DCSS_IMX8MQ] = { .name = "DCSS_IMX8MQ", .blkctl_ofs = 0x2F000, .ctxld_ofs = 0x23000, .dtg_ofs = 0x20000, .scaler_ofs = 0x1C000, .ss_ofs = 0x1B000, .dpr_ofs = 0x18000, }, }; static const struct of_device_id dcss_of_match[] = { { .compatible = "nxp,imx8mq-dcss", .data = &dcss_types[DCSS_IMX8MQ], }, {}, }; MODULE_DEVICE_TABLE(of, dcss_of_match); static struct platform_driver dcss_platform_driver = { .probe = dcss_drv_platform_probe, .remove = dcss_drv_platform_remove, .driver = { .name = "imx-dcss", .of_match_table = dcss_of_match, .pm = pm_ptr(&dcss_dev_pm_ops), }, }; drm_module_platform_driver(dcss_platform_driver); MODULE_AUTHOR("Laurentiu Palcu <[email protected]>"); MODULE_DESCRIPTION("DCSS driver for i.MX8MQ"); MODULE_LICENSE("GPL v2");
linux-master
drivers/gpu/drm/imx/dcss/dcss-drv.c
// SPDX-License-Identifier: GPL-2.0 /* * Copyright 2019 NXP. */ #include <drm/drm_atomic.h> #include <drm/drm_atomic_helper.h> #include <drm/drm_blend.h> #include <drm/drm_fb_dma_helper.h> #include <drm/drm_framebuffer.h> #include <drm/drm_gem_atomic_helper.h> #include <drm/drm_gem_dma_helper.h> #include "dcss-dev.h" #include "dcss-kms.h" static const u32 dcss_common_formats[] = { /* RGB */ DRM_FORMAT_ARGB8888, DRM_FORMAT_XRGB8888, DRM_FORMAT_ABGR8888, DRM_FORMAT_XBGR8888, DRM_FORMAT_RGBA8888, DRM_FORMAT_RGBX8888, DRM_FORMAT_BGRA8888, DRM_FORMAT_BGRX8888, DRM_FORMAT_XRGB2101010, DRM_FORMAT_XBGR2101010, DRM_FORMAT_RGBX1010102, DRM_FORMAT_BGRX1010102, DRM_FORMAT_ARGB2101010, DRM_FORMAT_ABGR2101010, DRM_FORMAT_RGBA1010102, DRM_FORMAT_BGRA1010102, }; static const u64 dcss_video_format_modifiers[] = { DRM_FORMAT_MOD_LINEAR, DRM_FORMAT_MOD_INVALID, }; static const u64 dcss_graphics_format_modifiers[] = { DRM_FORMAT_MOD_VIVANTE_TILED, DRM_FORMAT_MOD_VIVANTE_SUPER_TILED, DRM_FORMAT_MOD_LINEAR, DRM_FORMAT_MOD_INVALID, }; static inline struct dcss_plane *to_dcss_plane(struct drm_plane *p) { return container_of(p, struct dcss_plane, base); } static inline bool dcss_plane_fb_is_linear(const struct drm_framebuffer *fb) { return ((fb->flags & DRM_MODE_FB_MODIFIERS) == 0) || ((fb->flags & DRM_MODE_FB_MODIFIERS) != 0 && fb->modifier == DRM_FORMAT_MOD_LINEAR); } static void dcss_plane_destroy(struct drm_plane *plane) { struct dcss_plane *dcss_plane = container_of(plane, struct dcss_plane, base); drm_plane_cleanup(plane); kfree(dcss_plane); } static bool dcss_plane_format_mod_supported(struct drm_plane *plane, u32 format, u64 modifier) { switch (plane->type) { case DRM_PLANE_TYPE_PRIMARY: switch (format) { case DRM_FORMAT_ARGB8888: case DRM_FORMAT_XRGB8888: case DRM_FORMAT_ARGB2101010: return modifier == DRM_FORMAT_MOD_LINEAR || modifier == DRM_FORMAT_MOD_VIVANTE_TILED || modifier == DRM_FORMAT_MOD_VIVANTE_SUPER_TILED; default: return modifier == DRM_FORMAT_MOD_LINEAR; } break; case DRM_PLANE_TYPE_OVERLAY: return modifier == DRM_FORMAT_MOD_LINEAR; default: return false; } } static const struct drm_plane_funcs dcss_plane_funcs = { .update_plane = drm_atomic_helper_update_plane, .disable_plane = drm_atomic_helper_disable_plane, .destroy = dcss_plane_destroy, .reset = drm_atomic_helper_plane_reset, .atomic_duplicate_state = drm_atomic_helper_plane_duplicate_state, .atomic_destroy_state = drm_atomic_helper_plane_destroy_state, .format_mod_supported = dcss_plane_format_mod_supported, }; static bool dcss_plane_can_rotate(const struct drm_format_info *format, bool mod_present, u64 modifier, unsigned int rotation) { bool linear_format = !mod_present || modifier == DRM_FORMAT_MOD_LINEAR; u32 supported_rotation = DRM_MODE_ROTATE_0; if (!format->is_yuv && linear_format) supported_rotation = DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_180 | DRM_MODE_REFLECT_MASK; else if (!format->is_yuv && (modifier == DRM_FORMAT_MOD_VIVANTE_TILED || modifier == DRM_FORMAT_MOD_VIVANTE_SUPER_TILED)) supported_rotation = DRM_MODE_ROTATE_MASK | DRM_MODE_REFLECT_MASK; else if (format->is_yuv && linear_format && (format->format == DRM_FORMAT_NV12 || format->format == DRM_FORMAT_NV21)) supported_rotation = DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_180 | DRM_MODE_REFLECT_MASK; return !!(rotation & supported_rotation); } static bool dcss_plane_is_source_size_allowed(u16 src_w, u16 src_h, u32 pix_fmt) { if (src_w < 64 && (pix_fmt == DRM_FORMAT_NV12 || pix_fmt == DRM_FORMAT_NV21)) return false; else if (src_w < 32 && (pix_fmt == DRM_FORMAT_UYVY || pix_fmt == DRM_FORMAT_VYUY || pix_fmt == DRM_FORMAT_YUYV || pix_fmt == DRM_FORMAT_YVYU)) return false; return src_w >= 16 && src_h >= 8; } static int dcss_plane_atomic_check(struct drm_plane *plane, struct drm_atomic_state *state) { struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state, plane); struct dcss_plane *dcss_plane = to_dcss_plane(plane); struct dcss_dev *dcss = plane->dev->dev_private; struct drm_framebuffer *fb = new_plane_state->fb; bool is_primary_plane = plane->type == DRM_PLANE_TYPE_PRIMARY; struct drm_gem_dma_object *dma_obj; struct drm_crtc_state *crtc_state; int hdisplay, vdisplay; int min, max; int ret; if (!fb || !new_plane_state->crtc) return 0; dma_obj = drm_fb_dma_get_gem_obj(fb, 0); WARN_ON(!dma_obj); crtc_state = drm_atomic_get_existing_crtc_state(state, new_plane_state->crtc); hdisplay = crtc_state->adjusted_mode.hdisplay; vdisplay = crtc_state->adjusted_mode.vdisplay; if (!dcss_plane_is_source_size_allowed(new_plane_state->src_w >> 16, new_plane_state->src_h >> 16, fb->format->format)) { DRM_DEBUG_KMS("Source plane size is not allowed!\n"); return -EINVAL; } dcss_scaler_get_min_max_ratios(dcss->scaler, dcss_plane->ch_num, &min, &max); ret = drm_atomic_helper_check_plane_state(new_plane_state, crtc_state, min, max, !is_primary_plane, false); if (ret) return ret; if (!new_plane_state->visible) return 0; if (!dcss_plane_can_rotate(fb->format, !!(fb->flags & DRM_MODE_FB_MODIFIERS), fb->modifier, new_plane_state->rotation)) { DRM_DEBUG_KMS("requested rotation is not allowed!\n"); return -EINVAL; } if ((new_plane_state->crtc_x < 0 || new_plane_state->crtc_y < 0 || new_plane_state->crtc_x + new_plane_state->crtc_w > hdisplay || new_plane_state->crtc_y + new_plane_state->crtc_h > vdisplay) && !dcss_plane_fb_is_linear(fb)) { DRM_DEBUG_KMS("requested cropping operation is not allowed!\n"); return -EINVAL; } if ((fb->flags & DRM_MODE_FB_MODIFIERS) && !plane->funcs->format_mod_supported(plane, fb->format->format, fb->modifier)) { DRM_DEBUG_KMS("Invalid modifier: %llx", fb->modifier); return -EINVAL; } return 0; } static void dcss_plane_atomic_set_base(struct dcss_plane *dcss_plane) { struct drm_plane *plane = &dcss_plane->base; struct drm_plane_state *state = plane->state; struct dcss_dev *dcss = plane->dev->dev_private; struct drm_framebuffer *fb = state->fb; const struct drm_format_info *format = fb->format; struct drm_gem_dma_object *dma_obj = drm_fb_dma_get_gem_obj(fb, 0); unsigned long p1_ba = 0, p2_ba = 0; if (!format->is_yuv || format->format == DRM_FORMAT_NV12 || format->format == DRM_FORMAT_NV21) p1_ba = dma_obj->dma_addr + fb->offsets[0] + fb->pitches[0] * (state->src.y1 >> 16) + format->char_per_block[0] * (state->src.x1 >> 16); else if (format->format == DRM_FORMAT_UYVY || format->format == DRM_FORMAT_VYUY || format->format == DRM_FORMAT_YUYV || format->format == DRM_FORMAT_YVYU) p1_ba = dma_obj->dma_addr + fb->offsets[0] + fb->pitches[0] * (state->src.y1 >> 16) + 2 * format->char_per_block[0] * (state->src.x1 >> 17); if (format->format == DRM_FORMAT_NV12 || format->format == DRM_FORMAT_NV21) p2_ba = dma_obj->dma_addr + fb->offsets[1] + (((fb->pitches[1] >> 1) * (state->src.y1 >> 17) + (state->src.x1 >> 17)) << 1); dcss_dpr_addr_set(dcss->dpr, dcss_plane->ch_num, p1_ba, p2_ba, fb->pitches[0]); } static bool dcss_plane_needs_setup(struct drm_plane_state *state, struct drm_plane_state *old_state) { struct drm_framebuffer *fb = state->fb; struct drm_framebuffer *old_fb = old_state->fb; return state->crtc_x != old_state->crtc_x || state->crtc_y != old_state->crtc_y || state->crtc_w != old_state->crtc_w || state->crtc_h != old_state->crtc_h || state->src_x != old_state->src_x || state->src_y != old_state->src_y || state->src_w != old_state->src_w || state->src_h != old_state->src_h || fb->format->format != old_fb->format->format || fb->modifier != old_fb->modifier || state->rotation != old_state->rotation || state->scaling_filter != old_state->scaling_filter; } static void dcss_plane_atomic_update(struct drm_plane *plane, struct drm_atomic_state *state) { struct drm_plane_state *old_state = drm_atomic_get_old_plane_state(state, plane); struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state, plane); struct dcss_plane *dcss_plane = to_dcss_plane(plane); struct dcss_dev *dcss = plane->dev->dev_private; struct drm_framebuffer *fb = new_state->fb; struct drm_crtc_state *crtc_state; bool modifiers_present; u32 src_w, src_h, dst_w, dst_h; struct drm_rect src, dst; bool enable = true; bool is_rotation_90_or_270; if (!fb || !new_state->crtc || !new_state->visible) return; crtc_state = new_state->crtc->state; modifiers_present = !!(fb->flags & DRM_MODE_FB_MODIFIERS); if (old_state->fb && !drm_atomic_crtc_needs_modeset(crtc_state) && !dcss_plane_needs_setup(new_state, old_state)) { dcss_plane_atomic_set_base(dcss_plane); return; } src = plane->state->src; dst = plane->state->dst; /* * The width and height after clipping. */ src_w = drm_rect_width(&src) >> 16; src_h = drm_rect_height(&src) >> 16; dst_w = drm_rect_width(&dst); dst_h = drm_rect_height(&dst); if (plane->type == DRM_PLANE_TYPE_OVERLAY && modifiers_present && fb->modifier == DRM_FORMAT_MOD_LINEAR) modifiers_present = false; dcss_dpr_format_set(dcss->dpr, dcss_plane->ch_num, new_state->fb->format, modifiers_present ? fb->modifier : DRM_FORMAT_MOD_LINEAR); dcss_dpr_set_res(dcss->dpr, dcss_plane->ch_num, src_w, src_h); dcss_dpr_set_rotation(dcss->dpr, dcss_plane->ch_num, new_state->rotation); dcss_plane_atomic_set_base(dcss_plane); is_rotation_90_or_270 = new_state->rotation & (DRM_MODE_ROTATE_90 | DRM_MODE_ROTATE_270); dcss_scaler_set_filter(dcss->scaler, dcss_plane->ch_num, new_state->scaling_filter); dcss_scaler_setup(dcss->scaler, dcss_plane->ch_num, new_state->fb->format, is_rotation_90_or_270 ? src_h : src_w, is_rotation_90_or_270 ? src_w : src_h, dst_w, dst_h, drm_mode_vrefresh(&crtc_state->mode)); dcss_dtg_plane_pos_set(dcss->dtg, dcss_plane->ch_num, dst.x1, dst.y1, dst_w, dst_h); dcss_dtg_plane_alpha_set(dcss->dtg, dcss_plane->ch_num, fb->format, new_state->alpha >> 8); if (!dcss_plane->ch_num && (new_state->alpha >> 8) == 0) enable = false; dcss_dpr_enable(dcss->dpr, dcss_plane->ch_num, enable); dcss_scaler_ch_enable(dcss->scaler, dcss_plane->ch_num, enable); if (!enable) dcss_dtg_plane_pos_set(dcss->dtg, dcss_plane->ch_num, 0, 0, 0, 0); dcss_dtg_ch_enable(dcss->dtg, dcss_plane->ch_num, enable); } static void dcss_plane_atomic_disable(struct drm_plane *plane, struct drm_atomic_state *state) { struct dcss_plane *dcss_plane = to_dcss_plane(plane); struct dcss_dev *dcss = plane->dev->dev_private; dcss_dpr_enable(dcss->dpr, dcss_plane->ch_num, false); dcss_scaler_ch_enable(dcss->scaler, dcss_plane->ch_num, false); dcss_dtg_plane_pos_set(dcss->dtg, dcss_plane->ch_num, 0, 0, 0, 0); dcss_dtg_ch_enable(dcss->dtg, dcss_plane->ch_num, false); } static const struct drm_plane_helper_funcs dcss_plane_helper_funcs = { .atomic_check = dcss_plane_atomic_check, .atomic_update = dcss_plane_atomic_update, .atomic_disable = dcss_plane_atomic_disable, }; struct dcss_plane *dcss_plane_init(struct drm_device *drm, unsigned int possible_crtcs, enum drm_plane_type type, unsigned int zpos) { struct dcss_plane *dcss_plane; const u64 *format_modifiers = dcss_video_format_modifiers; int ret; if (zpos > 2) return ERR_PTR(-EINVAL); dcss_plane = kzalloc(sizeof(*dcss_plane), GFP_KERNEL); if (!dcss_plane) { DRM_ERROR("failed to allocate plane\n"); return ERR_PTR(-ENOMEM); } if (type == DRM_PLANE_TYPE_PRIMARY) format_modifiers = dcss_graphics_format_modifiers; ret = drm_universal_plane_init(drm, &dcss_plane->base, possible_crtcs, &dcss_plane_funcs, dcss_common_formats, ARRAY_SIZE(dcss_common_formats), format_modifiers, type, NULL); if (ret) { DRM_ERROR("failed to initialize plane\n"); kfree(dcss_plane); return ERR_PTR(ret); } drm_plane_helper_add(&dcss_plane->base, &dcss_plane_helper_funcs); ret = drm_plane_create_zpos_immutable_property(&dcss_plane->base, zpos); if (ret) return ERR_PTR(ret); drm_plane_create_scaling_filter_property(&dcss_plane->base, BIT(DRM_SCALING_FILTER_DEFAULT) | BIT(DRM_SCALING_FILTER_NEAREST_NEIGHBOR)); drm_plane_create_rotation_property(&dcss_plane->base, DRM_MODE_ROTATE_0, DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_90 | DRM_MODE_ROTATE_180 | DRM_MODE_ROTATE_270 | DRM_MODE_REFLECT_X | DRM_MODE_REFLECT_Y); dcss_plane->ch_num = zpos; return dcss_plane; }
linux-master
drivers/gpu/drm/imx/dcss/dcss-plane.c
// SPDX-License-Identifier: GPL-2.0 /* * Copyright 2019 NXP. */ #include <linux/device.h> #include <linux/of.h> #include <linux/slab.h> #include "dcss-dev.h" #define DCSS_BLKCTL_RESET_CTRL 0x00 #define B_CLK_RESETN BIT(0) #define APB_CLK_RESETN BIT(1) #define P_CLK_RESETN BIT(2) #define RTR_CLK_RESETN BIT(4) #define DCSS_BLKCTL_CONTROL0 0x10 #define HDMI_MIPI_CLK_SEL BIT(0) #define DISPMIX_REFCLK_SEL_POS 4 #define DISPMIX_REFCLK_SEL_MASK GENMASK(5, 4) #define DISPMIX_PIXCLK_SEL BIT(8) #define HDMI_SRC_SECURE_EN BIT(16) struct dcss_blkctl { struct dcss_dev *dcss; void __iomem *base_reg; }; void dcss_blkctl_cfg(struct dcss_blkctl *blkctl) { if (blkctl->dcss->hdmi_output) dcss_writel(0, blkctl->base_reg + DCSS_BLKCTL_CONTROL0); else dcss_writel(DISPMIX_PIXCLK_SEL, blkctl->base_reg + DCSS_BLKCTL_CONTROL0); dcss_set(B_CLK_RESETN | APB_CLK_RESETN | P_CLK_RESETN | RTR_CLK_RESETN, blkctl->base_reg + DCSS_BLKCTL_RESET_CTRL); } int dcss_blkctl_init(struct dcss_dev *dcss, unsigned long blkctl_base) { struct dcss_blkctl *blkctl; blkctl = kzalloc(sizeof(*blkctl), GFP_KERNEL); if (!blkctl) return -ENOMEM; blkctl->base_reg = ioremap(blkctl_base, SZ_4K); if (!blkctl->base_reg) { dev_err(dcss->dev, "unable to remap BLK CTRL base\n"); kfree(blkctl); return -ENOMEM; } dcss->blkctl = blkctl; blkctl->dcss = dcss; dcss_blkctl_cfg(blkctl); return 0; } void dcss_blkctl_exit(struct dcss_blkctl *blkctl) { if (blkctl->base_reg) iounmap(blkctl->base_reg); kfree(blkctl); }
linux-master
drivers/gpu/drm/imx/dcss/dcss-blkctl.c
// SPDX-License-Identifier: GPL-2.0 /* * Copyright 2019 NXP. */ #include <linux/device.h> #include <linux/slab.h> #include "dcss-dev.h" #define DCSS_SS_SYS_CTRL 0x00 #define RUN_EN BIT(0) #define DCSS_SS_DISPLAY 0x10 #define LRC_X_POS 0 #define LRC_X_MASK GENMASK(12, 0) #define LRC_Y_POS 16 #define LRC_Y_MASK GENMASK(28, 16) #define DCSS_SS_HSYNC 0x20 #define DCSS_SS_VSYNC 0x30 #define SYNC_START_POS 0 #define SYNC_START_MASK GENMASK(12, 0) #define SYNC_END_POS 16 #define SYNC_END_MASK GENMASK(28, 16) #define SYNC_POL BIT(31) #define DCSS_SS_DE_ULC 0x40 #define ULC_X_POS 0 #define ULC_X_MASK GENMASK(12, 0) #define ULC_Y_POS 16 #define ULC_Y_MASK GENMASK(28, 16) #define ULC_POL BIT(31) #define DCSS_SS_DE_LRC 0x50 #define DCSS_SS_MODE 0x60 #define PIPE_MODE_POS 0 #define PIPE_MODE_MASK GENMASK(1, 0) #define DCSS_SS_COEFF 0x70 #define HORIZ_A_POS 0 #define HORIZ_A_MASK GENMASK(3, 0) #define HORIZ_B_POS 4 #define HORIZ_B_MASK GENMASK(7, 4) #define HORIZ_C_POS 8 #define HORIZ_C_MASK GENMASK(11, 8) #define HORIZ_H_NORM_POS 12 #define HORIZ_H_NORM_MASK GENMASK(14, 12) #define VERT_A_POS 16 #define VERT_A_MASK GENMASK(19, 16) #define VERT_B_POS 20 #define VERT_B_MASK GENMASK(23, 20) #define VERT_C_POS 24 #define VERT_C_MASK GENMASK(27, 24) #define VERT_H_NORM_POS 28 #define VERT_H_NORM_MASK GENMASK(30, 28) #define DCSS_SS_CLIP_CB 0x80 #define DCSS_SS_CLIP_CR 0x90 #define CLIP_MIN_POS 0 #define CLIP_MIN_MASK GENMASK(9, 0) #define CLIP_MAX_POS 0 #define CLIP_MAX_MASK GENMASK(23, 16) #define DCSS_SS_INTER_MODE 0xA0 #define INT_EN BIT(0) #define VSYNC_SHIFT BIT(1) struct dcss_ss { struct device *dev; void __iomem *base_reg; u32 base_ofs; struct dcss_ctxld *ctxld; u32 ctx_id; bool in_use; }; static void dcss_ss_write(struct dcss_ss *ss, u32 val, u32 ofs) { if (!ss->in_use) dcss_writel(val, ss->base_reg + ofs); dcss_ctxld_write(ss->ctxld, ss->ctx_id, val, ss->base_ofs + ofs); } int dcss_ss_init(struct dcss_dev *dcss, unsigned long ss_base) { struct dcss_ss *ss; ss = kzalloc(sizeof(*ss), GFP_KERNEL); if (!ss) return -ENOMEM; dcss->ss = ss; ss->dev = dcss->dev; ss->ctxld = dcss->ctxld; ss->base_reg = ioremap(ss_base, SZ_4K); if (!ss->base_reg) { dev_err(dcss->dev, "ss: unable to remap ss base\n"); kfree(ss); return -ENOMEM; } ss->base_ofs = ss_base; ss->ctx_id = CTX_SB_HP; return 0; } void dcss_ss_exit(struct dcss_ss *ss) { /* stop SS */ dcss_writel(0, ss->base_reg + DCSS_SS_SYS_CTRL); if (ss->base_reg) iounmap(ss->base_reg); kfree(ss); } void dcss_ss_subsam_set(struct dcss_ss *ss) { dcss_ss_write(ss, 0x41614161, DCSS_SS_COEFF); dcss_ss_write(ss, 0, DCSS_SS_MODE); dcss_ss_write(ss, 0x03ff0000, DCSS_SS_CLIP_CB); dcss_ss_write(ss, 0x03ff0000, DCSS_SS_CLIP_CR); } void dcss_ss_sync_set(struct dcss_ss *ss, struct videomode *vm, bool phsync, bool pvsync) { u16 lrc_x, lrc_y; u16 hsync_start, hsync_end; u16 vsync_start, vsync_end; u16 de_ulc_x, de_ulc_y; u16 de_lrc_x, de_lrc_y; lrc_x = vm->hfront_porch + vm->hback_porch + vm->hsync_len + vm->hactive - 1; lrc_y = vm->vfront_porch + vm->vback_porch + vm->vsync_len + vm->vactive - 1; dcss_ss_write(ss, (lrc_y << LRC_Y_POS) | lrc_x, DCSS_SS_DISPLAY); hsync_start = vm->hfront_porch + vm->hback_porch + vm->hsync_len + vm->hactive - 1; hsync_end = vm->hsync_len - 1; dcss_ss_write(ss, (phsync ? SYNC_POL : 0) | ((u32)hsync_end << SYNC_END_POS) | hsync_start, DCSS_SS_HSYNC); vsync_start = vm->vfront_porch - 1; vsync_end = vm->vfront_porch + vm->vsync_len - 1; dcss_ss_write(ss, (pvsync ? SYNC_POL : 0) | ((u32)vsync_end << SYNC_END_POS) | vsync_start, DCSS_SS_VSYNC); de_ulc_x = vm->hsync_len + vm->hback_porch - 1; de_ulc_y = vm->vsync_len + vm->vfront_porch + vm->vback_porch; dcss_ss_write(ss, SYNC_POL | ((u32)de_ulc_y << ULC_Y_POS) | de_ulc_x, DCSS_SS_DE_ULC); de_lrc_x = vm->hsync_len + vm->hback_porch + vm->hactive - 1; de_lrc_y = vm->vsync_len + vm->vfront_porch + vm->vback_porch + vm->vactive - 1; dcss_ss_write(ss, (de_lrc_y << LRC_Y_POS) | de_lrc_x, DCSS_SS_DE_LRC); } void dcss_ss_enable(struct dcss_ss *ss) { dcss_ss_write(ss, RUN_EN, DCSS_SS_SYS_CTRL); ss->in_use = true; } void dcss_ss_shutoff(struct dcss_ss *ss) { dcss_writel(0, ss->base_reg + DCSS_SS_SYS_CTRL); ss->in_use = false; }
linux-master
drivers/gpu/drm/imx/dcss/dcss-ss.c
// SPDX-License-Identifier: GPL-2.0 /* * Copyright 2019 NXP. */ #include <linux/delay.h> #include <linux/dma-mapping.h> #include <linux/interrupt.h> #include <linux/platform_device.h> #include <linux/slab.h> #include "dcss-dev.h" #define DCSS_CTXLD_CONTROL_STATUS 0x0 #define CTXLD_ENABLE BIT(0) #define ARB_SEL BIT(1) #define RD_ERR_EN BIT(2) #define DB_COMP_EN BIT(3) #define SB_HP_COMP_EN BIT(4) #define SB_LP_COMP_EN BIT(5) #define DB_PEND_SB_REC_EN BIT(6) #define SB_PEND_DISP_ACTIVE_EN BIT(7) #define AHB_ERR_EN BIT(8) #define RD_ERR BIT(16) #define DB_COMP BIT(17) #define SB_HP_COMP BIT(18) #define SB_LP_COMP BIT(19) #define DB_PEND_SB_REC BIT(20) #define SB_PEND_DISP_ACTIVE BIT(21) #define AHB_ERR BIT(22) #define DCSS_CTXLD_DB_BASE_ADDR 0x10 #define DCSS_CTXLD_DB_COUNT 0x14 #define DCSS_CTXLD_SB_BASE_ADDR 0x18 #define DCSS_CTXLD_SB_COUNT 0x1C #define SB_HP_COUNT_POS 0 #define SB_HP_COUNT_MASK 0xffff #define SB_LP_COUNT_POS 16 #define SB_LP_COUNT_MASK 0xffff0000 #define DCSS_AHB_ERR_ADDR 0x20 #define CTXLD_IRQ_COMPLETION (DB_COMP | SB_HP_COMP | SB_LP_COMP) #define CTXLD_IRQ_ERROR (RD_ERR | DB_PEND_SB_REC | AHB_ERR) /* The following sizes are in context loader entries, 8 bytes each. */ #define CTXLD_DB_CTX_ENTRIES 1024 /* max 65536 */ #define CTXLD_SB_LP_CTX_ENTRIES 10240 /* max 65536 */ #define CTXLD_SB_HP_CTX_ENTRIES 20000 /* max 65536 */ #define CTXLD_SB_CTX_ENTRIES (CTXLD_SB_LP_CTX_ENTRIES + \ CTXLD_SB_HP_CTX_ENTRIES) /* Sizes, in entries, of the DB, SB_HP and SB_LP context regions. */ static u16 dcss_ctxld_ctx_size[3] = { CTXLD_DB_CTX_ENTRIES, CTXLD_SB_HP_CTX_ENTRIES, CTXLD_SB_LP_CTX_ENTRIES }; /* this represents an entry in the context loader map */ struct dcss_ctxld_item { u32 val; u32 ofs; }; #define CTX_ITEM_SIZE sizeof(struct dcss_ctxld_item) struct dcss_ctxld { struct device *dev; void __iomem *ctxld_reg; int irq; bool irq_en; struct dcss_ctxld_item *db[2]; struct dcss_ctxld_item *sb_hp[2]; struct dcss_ctxld_item *sb_lp[2]; dma_addr_t db_paddr[2]; dma_addr_t sb_paddr[2]; u16 ctx_size[2][3]; /* holds the sizes of DB, SB_HP and SB_LP ctx */ u8 current_ctx; bool in_use; bool armed; spinlock_t lock; /* protects concurent access to private data */ }; static irqreturn_t dcss_ctxld_irq_handler(int irq, void *data) { struct dcss_ctxld *ctxld = data; struct dcss_dev *dcss = dcss_drv_dev_to_dcss(ctxld->dev); u32 irq_status; irq_status = dcss_readl(ctxld->ctxld_reg + DCSS_CTXLD_CONTROL_STATUS); if (irq_status & CTXLD_IRQ_COMPLETION && !(irq_status & CTXLD_ENABLE) && ctxld->in_use) { ctxld->in_use = false; if (dcss && dcss->disable_callback) dcss->disable_callback(dcss); } else if (irq_status & CTXLD_IRQ_ERROR) { /* * Except for throwing an error message and clearing the status * register, there's not much we can do here. */ dev_err(ctxld->dev, "ctxld: error encountered: %08x\n", irq_status); dev_err(ctxld->dev, "ctxld: db=%d, sb_hp=%d, sb_lp=%d\n", ctxld->ctx_size[ctxld->current_ctx ^ 1][CTX_DB], ctxld->ctx_size[ctxld->current_ctx ^ 1][CTX_SB_HP], ctxld->ctx_size[ctxld->current_ctx ^ 1][CTX_SB_LP]); } dcss_clr(irq_status & (CTXLD_IRQ_ERROR | CTXLD_IRQ_COMPLETION), ctxld->ctxld_reg + DCSS_CTXLD_CONTROL_STATUS); return IRQ_HANDLED; } static int dcss_ctxld_irq_config(struct dcss_ctxld *ctxld, struct platform_device *pdev) { int ret; ctxld->irq = platform_get_irq_byname(pdev, "ctxld"); if (ctxld->irq < 0) return ctxld->irq; ret = request_irq(ctxld->irq, dcss_ctxld_irq_handler, 0, "dcss_ctxld", ctxld); if (ret) { dev_err(ctxld->dev, "ctxld: irq request failed.\n"); return ret; } ctxld->irq_en = true; return 0; } static void dcss_ctxld_hw_cfg(struct dcss_ctxld *ctxld) { dcss_writel(RD_ERR_EN | SB_HP_COMP_EN | DB_PEND_SB_REC_EN | AHB_ERR_EN | RD_ERR | AHB_ERR, ctxld->ctxld_reg + DCSS_CTXLD_CONTROL_STATUS); } static void dcss_ctxld_free_ctx(struct dcss_ctxld *ctxld) { struct dcss_ctxld_item *ctx; int i; for (i = 0; i < 2; i++) { if (ctxld->db[i]) { dma_free_coherent(ctxld->dev, CTXLD_DB_CTX_ENTRIES * sizeof(*ctx), ctxld->db[i], ctxld->db_paddr[i]); ctxld->db[i] = NULL; ctxld->db_paddr[i] = 0; } if (ctxld->sb_hp[i]) { dma_free_coherent(ctxld->dev, CTXLD_SB_CTX_ENTRIES * sizeof(*ctx), ctxld->sb_hp[i], ctxld->sb_paddr[i]); ctxld->sb_hp[i] = NULL; ctxld->sb_paddr[i] = 0; } } } static int dcss_ctxld_alloc_ctx(struct dcss_ctxld *ctxld) { struct dcss_ctxld_item *ctx; int i; for (i = 0; i < 2; i++) { ctx = dma_alloc_coherent(ctxld->dev, CTXLD_DB_CTX_ENTRIES * sizeof(*ctx), &ctxld->db_paddr[i], GFP_KERNEL); if (!ctx) return -ENOMEM; ctxld->db[i] = ctx; ctx = dma_alloc_coherent(ctxld->dev, CTXLD_SB_CTX_ENTRIES * sizeof(*ctx), &ctxld->sb_paddr[i], GFP_KERNEL); if (!ctx) return -ENOMEM; ctxld->sb_hp[i] = ctx; ctxld->sb_lp[i] = ctx + CTXLD_SB_HP_CTX_ENTRIES; } return 0; } int dcss_ctxld_init(struct dcss_dev *dcss, unsigned long ctxld_base) { struct dcss_ctxld *ctxld; int ret; ctxld = kzalloc(sizeof(*ctxld), GFP_KERNEL); if (!ctxld) return -ENOMEM; dcss->ctxld = ctxld; ctxld->dev = dcss->dev; spin_lock_init(&ctxld->lock); ret = dcss_ctxld_alloc_ctx(ctxld); if (ret) { dev_err(dcss->dev, "ctxld: cannot allocate context memory.\n"); goto err; } ctxld->ctxld_reg = ioremap(ctxld_base, SZ_4K); if (!ctxld->ctxld_reg) { dev_err(dcss->dev, "ctxld: unable to remap ctxld base\n"); ret = -ENOMEM; goto err; } ret = dcss_ctxld_irq_config(ctxld, to_platform_device(dcss->dev)); if (ret) goto err_irq; dcss_ctxld_hw_cfg(ctxld); return 0; err_irq: iounmap(ctxld->ctxld_reg); err: dcss_ctxld_free_ctx(ctxld); kfree(ctxld); return ret; } void dcss_ctxld_exit(struct dcss_ctxld *ctxld) { free_irq(ctxld->irq, ctxld); if (ctxld->ctxld_reg) iounmap(ctxld->ctxld_reg); dcss_ctxld_free_ctx(ctxld); kfree(ctxld); } static int dcss_ctxld_enable_locked(struct dcss_ctxld *ctxld) { int curr_ctx = ctxld->current_ctx; u32 db_base, sb_base, sb_count; u32 sb_hp_cnt, sb_lp_cnt, db_cnt; struct dcss_dev *dcss = dcss_drv_dev_to_dcss(ctxld->dev); if (!dcss) return 0; dcss_dpr_write_sysctrl(dcss->dpr); dcss_scaler_write_sclctrl(dcss->scaler); sb_hp_cnt = ctxld->ctx_size[curr_ctx][CTX_SB_HP]; sb_lp_cnt = ctxld->ctx_size[curr_ctx][CTX_SB_LP]; db_cnt = ctxld->ctx_size[curr_ctx][CTX_DB]; /* make sure SB_LP context area comes after SB_HP */ if (sb_lp_cnt && ctxld->sb_lp[curr_ctx] != ctxld->sb_hp[curr_ctx] + sb_hp_cnt) { struct dcss_ctxld_item *sb_lp_adjusted; sb_lp_adjusted = ctxld->sb_hp[curr_ctx] + sb_hp_cnt; memcpy(sb_lp_adjusted, ctxld->sb_lp[curr_ctx], sb_lp_cnt * CTX_ITEM_SIZE); } db_base = db_cnt ? ctxld->db_paddr[curr_ctx] : 0; dcss_writel(db_base, ctxld->ctxld_reg + DCSS_CTXLD_DB_BASE_ADDR); dcss_writel(db_cnt, ctxld->ctxld_reg + DCSS_CTXLD_DB_COUNT); if (sb_hp_cnt) sb_count = ((sb_hp_cnt << SB_HP_COUNT_POS) & SB_HP_COUNT_MASK) | ((sb_lp_cnt << SB_LP_COUNT_POS) & SB_LP_COUNT_MASK); else sb_count = (sb_lp_cnt << SB_HP_COUNT_POS) & SB_HP_COUNT_MASK; sb_base = sb_count ? ctxld->sb_paddr[curr_ctx] : 0; dcss_writel(sb_base, ctxld->ctxld_reg + DCSS_CTXLD_SB_BASE_ADDR); dcss_writel(sb_count, ctxld->ctxld_reg + DCSS_CTXLD_SB_COUNT); /* enable the context loader */ dcss_set(CTXLD_ENABLE, ctxld->ctxld_reg + DCSS_CTXLD_CONTROL_STATUS); ctxld->in_use = true; /* * Toggle the current context to the alternate one so that any updates * in the modules' settings take place there. */ ctxld->current_ctx ^= 1; ctxld->ctx_size[ctxld->current_ctx][CTX_DB] = 0; ctxld->ctx_size[ctxld->current_ctx][CTX_SB_HP] = 0; ctxld->ctx_size[ctxld->current_ctx][CTX_SB_LP] = 0; return 0; } int dcss_ctxld_enable(struct dcss_ctxld *ctxld) { spin_lock_irq(&ctxld->lock); ctxld->armed = true; spin_unlock_irq(&ctxld->lock); return 0; } void dcss_ctxld_kick(struct dcss_ctxld *ctxld) { unsigned long flags; spin_lock_irqsave(&ctxld->lock, flags); if (ctxld->armed && !ctxld->in_use) { ctxld->armed = false; dcss_ctxld_enable_locked(ctxld); } spin_unlock_irqrestore(&ctxld->lock, flags); } void dcss_ctxld_write_irqsafe(struct dcss_ctxld *ctxld, u32 ctx_id, u32 val, u32 reg_ofs) { int curr_ctx = ctxld->current_ctx; struct dcss_ctxld_item *ctx[] = { [CTX_DB] = ctxld->db[curr_ctx], [CTX_SB_HP] = ctxld->sb_hp[curr_ctx], [CTX_SB_LP] = ctxld->sb_lp[curr_ctx] }; int item_idx = ctxld->ctx_size[curr_ctx][ctx_id]; if (item_idx + 1 > dcss_ctxld_ctx_size[ctx_id]) { WARN_ON(1); return; } ctx[ctx_id][item_idx].val = val; ctx[ctx_id][item_idx].ofs = reg_ofs; ctxld->ctx_size[curr_ctx][ctx_id] += 1; } void dcss_ctxld_write(struct dcss_ctxld *ctxld, u32 ctx_id, u32 val, u32 reg_ofs) { spin_lock_irq(&ctxld->lock); dcss_ctxld_write_irqsafe(ctxld, ctx_id, val, reg_ofs); spin_unlock_irq(&ctxld->lock); } bool dcss_ctxld_is_flushed(struct dcss_ctxld *ctxld) { return ctxld->ctx_size[ctxld->current_ctx][CTX_DB] == 0 && ctxld->ctx_size[ctxld->current_ctx][CTX_SB_HP] == 0 && ctxld->ctx_size[ctxld->current_ctx][CTX_SB_LP] == 0; } int dcss_ctxld_resume(struct dcss_ctxld *ctxld) { dcss_ctxld_hw_cfg(ctxld); if (!ctxld->irq_en) { enable_irq(ctxld->irq); ctxld->irq_en = true; } return 0; } int dcss_ctxld_suspend(struct dcss_ctxld *ctxld) { int ret = 0; unsigned long timeout = jiffies + msecs_to_jiffies(500); if (!dcss_ctxld_is_flushed(ctxld)) { dcss_ctxld_kick(ctxld); while (!time_after(jiffies, timeout) && ctxld->in_use) msleep(20); if (time_after(jiffies, timeout)) return -ETIMEDOUT; } spin_lock_irq(&ctxld->lock); if (ctxld->irq_en) { disable_irq_nosync(ctxld->irq); ctxld->irq_en = false; } /* reset context region and sizes */ ctxld->current_ctx = 0; ctxld->ctx_size[0][CTX_DB] = 0; ctxld->ctx_size[0][CTX_SB_HP] = 0; ctxld->ctx_size[0][CTX_SB_LP] = 0; spin_unlock_irq(&ctxld->lock); return ret; } void dcss_ctxld_assert_locked(struct dcss_ctxld *ctxld) { lockdep_assert_held(&ctxld->lock); }
linux-master
drivers/gpu/drm/imx/dcss/dcss-ctxld.c
// SPDX-License-Identifier: GPL-2.0-only // SPDX-FileCopyrightText: 2020 Marian Cichy <[email protected]> #include <drm/drm_bridge.h> #include <drm/drm_bridge_connector.h> #include <drm/drm_damage_helper.h> #include <drm/drm_drv.h> #include <drm/drm_fbdev_generic.h> #include <drm/drm_fb_dma_helper.h> #include <drm/drm_fourcc.h> #include <drm/drm_framebuffer.h> #include <drm/drm_gem_atomic_helper.h> #include <drm/drm_gem_dma_helper.h> #include <drm/drm_gem_framebuffer_helper.h> #include <drm/drm_of.h> #include <drm/drm_probe_helper.h> #include <drm/drm_simple_kms_helper.h> #include <drm/drm_vblank.h> #include <linux/bitfield.h> #include <linux/clk.h> #include <linux/dma-mapping.h> #include <linux/mod_devicetable.h> #include <linux/module.h> #include <linux/platform_device.h> #define IMX21LCDC_LSSAR 0x0000 /* LCDC Screen Start Address Register */ #define IMX21LCDC_LSR 0x0004 /* LCDC Size Register */ #define IMX21LCDC_LVPWR 0x0008 /* LCDC Virtual Page Width Register */ #define IMX21LCDC_LCPR 0x000C /* LCDC Cursor Position Register */ #define IMX21LCDC_LCWHB 0x0010 /* LCDC Cursor Width Height and Blink Register*/ #define IMX21LCDC_LCCMR 0x0014 /* LCDC Color Cursor Mapping Register */ #define IMX21LCDC_LPCR 0x0018 /* LCDC Panel Configuration Register */ #define IMX21LCDC_LHCR 0x001C /* LCDC Horizontal Configuration Register */ #define IMX21LCDC_LVCR 0x0020 /* LCDC Vertical Configuration Register */ #define IMX21LCDC_LPOR 0x0024 /* LCDC Panning Offset Register */ #define IMX21LCDC_LSCR 0x0028 /* LCDC Sharp Configuration Register */ #define IMX21LCDC_LPCCR 0x002C /* LCDC PWM Contrast Control Register */ #define IMX21LCDC_LDCR 0x0030 /* LCDC DMA Control Register */ #define IMX21LCDC_LRMCR 0x0034 /* LCDC Refresh Mode Control Register */ #define IMX21LCDC_LICR 0x0038 /* LCDC Interrupt Configuration Register */ #define IMX21LCDC_LIER 0x003C /* LCDC Interrupt Enable Register */ #define IMX21LCDC_LISR 0x0040 /* LCDC Interrupt Status Register */ #define IMX21LCDC_LGWSAR 0x0050 /* LCDC Graphic Window Start Address Register */ #define IMX21LCDC_LGWSR 0x0054 /* LCDC Graph Window Size Register */ #define IMX21LCDC_LGWVPWR 0x0058 /* LCDC Graphic Window Virtual Page Width Register */ #define IMX21LCDC_LGWPOR 0x005C /* LCDC Graphic Window Panning Offset Register */ #define IMX21LCDC_LGWPR 0x0060 /* LCDC Graphic Window Position Register */ #define IMX21LCDC_LGWCR 0x0064 /* LCDC Graphic Window Control Register */ #define IMX21LCDC_LGWDCR 0x0068 /* LCDC Graphic Window DMA Control Register */ #define IMX21LCDC_LAUSCR 0x0080 /* LCDC AUS Mode Control Register */ #define IMX21LCDC_LAUSCCR 0x0084 /* LCDC AUS Mode Cursor Control Register */ #define IMX21LCDC_BGLUT 0x0800 /* Background Lookup Table */ #define IMX21LCDC_GWLUT 0x0C00 /* Graphic Window Lookup Table */ #define IMX21LCDC_LCPR_CC0 BIT(30) /* Cursor Control Bit 0 */ #define IMX21LCDC_LCPR_CC1 BIT(31) /* Cursor Control Bit 1 */ /* Values HSYNC, VSYNC and Framesize Register */ #define IMX21LCDC_LHCR_HWIDTH GENMASK(31, 26) #define IMX21LCDC_LHCR_HFPORCH GENMASK(15, 8) /* H_WAIT_1 in the i.MX25 Reference manual */ #define IMX21LCDC_LHCR_HBPORCH GENMASK(7, 0) /* H_WAIT_2 in the i.MX25 Reference manual */ #define IMX21LCDC_LVCR_VWIDTH GENMASK(31, 26) #define IMX21LCDC_LVCR_VFPORCH GENMASK(15, 8) /* V_WAIT_1 in the i.MX25 Reference manual */ #define IMX21LCDC_LVCR_VBPORCH GENMASK(7, 0) /* V_WAIT_2 in the i.MX25 Reference manual */ #define IMX21LCDC_LSR_XMAX GENMASK(25, 20) #define IMX21LCDC_LSR_YMAX GENMASK(9, 0) /* Values for LPCR Register */ #define IMX21LCDC_LPCR_PCD GENMASK(5, 0) #define IMX21LCDC_LPCR_SHARP BIT(6) #define IMX21LCDC_LPCR_SCLKSEL BIT(7) #define IMX21LCDC_LPCR_ACD GENMASK(14, 8) #define IMX21LCDC_LPCR_ACDSEL BIT(15) #define IMX21LCDC_LPCR_REV_VS BIT(16) #define IMX21LCDC_LPCR_SWAP_SEL BIT(17) #define IMX21LCDC_LPCR_END_SEL BIT(18) #define IMX21LCDC_LPCR_SCLKIDLE BIT(19) #define IMX21LCDC_LPCR_OEPOL BIT(20) #define IMX21LCDC_LPCR_CLKPOL BIT(21) #define IMX21LCDC_LPCR_LPPOL BIT(22) #define IMX21LCDC_LPCR_FLMPOL BIT(23) #define IMX21LCDC_LPCR_PIXPOL BIT(24) #define IMX21LCDC_LPCR_BPIX GENMASK(27, 25) #define IMX21LCDC_LPCR_PBSIZ GENMASK(29, 28) #define IMX21LCDC_LPCR_COLOR BIT(30) #define IMX21LCDC_LPCR_TFT BIT(31) #define INTR_EOF BIT(1) /* VBLANK Interrupt Bit */ #define BPP_RGB565 0x05 #define BPP_XRGB8888 0x07 #define LCDC_MIN_XRES 64 #define LCDC_MIN_YRES 64 #define LCDC_MAX_XRES 1024 #define LCDC_MAX_YRES 1024 struct imx_lcdc { struct drm_device drm; struct drm_simple_display_pipe pipe; struct drm_connector *connector; void __iomem *base; struct clk *clk_ipg; struct clk *clk_ahb; struct clk *clk_per; }; static const u32 imx_lcdc_formats[] = { DRM_FORMAT_RGB565, DRM_FORMAT_XRGB8888, }; static inline struct imx_lcdc *imx_lcdc_from_drmdev(struct drm_device *drm) { return container_of(drm, struct imx_lcdc, drm); } static unsigned int imx_lcdc_get_format(unsigned int drm_format) { switch (drm_format) { default: DRM_WARN("Format not supported - fallback to XRGB8888\n"); fallthrough; case DRM_FORMAT_XRGB8888: return BPP_XRGB8888; case DRM_FORMAT_RGB565: return BPP_RGB565; } } static void imx_lcdc_update_hw_registers(struct drm_simple_display_pipe *pipe, struct drm_plane_state *old_state, bool mode_set) { struct drm_crtc *crtc = &pipe->crtc; struct drm_plane_state *new_state = pipe->plane.state; struct drm_framebuffer *fb = new_state->fb; struct imx_lcdc *lcdc = imx_lcdc_from_drmdev(pipe->crtc.dev); u32 lpcr, lvcr, lhcr; u32 framesize; dma_addr_t addr; addr = drm_fb_dma_get_gem_addr(fb, new_state, 0); /* The LSSAR register specifies the LCD screen start address (SSA). */ writel(addr, lcdc->base + IMX21LCDC_LSSAR); if (!mode_set) return; /* Disable PER clock to make register write possible */ if (old_state && old_state->crtc && old_state->crtc->enabled) clk_disable_unprepare(lcdc->clk_per); /* Framesize */ framesize = FIELD_PREP(IMX21LCDC_LSR_XMAX, crtc->mode.hdisplay >> 4) | FIELD_PREP(IMX21LCDC_LSR_YMAX, crtc->mode.vdisplay); writel(framesize, lcdc->base + IMX21LCDC_LSR); /* HSYNC */ lhcr = FIELD_PREP(IMX21LCDC_LHCR_HFPORCH, crtc->mode.hsync_start - crtc->mode.hdisplay - 1) | FIELD_PREP(IMX21LCDC_LHCR_HWIDTH, crtc->mode.hsync_end - crtc->mode.hsync_start - 1) | FIELD_PREP(IMX21LCDC_LHCR_HBPORCH, crtc->mode.htotal - crtc->mode.hsync_end - 3); writel(lhcr, lcdc->base + IMX21LCDC_LHCR); /* VSYNC */ lvcr = FIELD_PREP(IMX21LCDC_LVCR_VFPORCH, crtc->mode.vsync_start - crtc->mode.vdisplay) | FIELD_PREP(IMX21LCDC_LVCR_VWIDTH, crtc->mode.vsync_end - crtc->mode.vsync_start) | FIELD_PREP(IMX21LCDC_LVCR_VBPORCH, crtc->mode.vtotal - crtc->mode.vsync_end); writel(lvcr, lcdc->base + IMX21LCDC_LVCR); lpcr = readl(lcdc->base + IMX21LCDC_LPCR); lpcr &= ~IMX21LCDC_LPCR_BPIX; lpcr |= FIELD_PREP(IMX21LCDC_LPCR_BPIX, imx_lcdc_get_format(fb->format->format)); writel(lpcr, lcdc->base + IMX21LCDC_LPCR); /* Virtual Page Width */ writel(new_state->fb->pitches[0] / 4, lcdc->base + IMX21LCDC_LVPWR); /* Enable PER clock */ if (new_state->crtc->enabled) clk_prepare_enable(lcdc->clk_per); } static void imx_lcdc_pipe_enable(struct drm_simple_display_pipe *pipe, struct drm_crtc_state *crtc_state, struct drm_plane_state *plane_state) { int ret; int clk_div; int bpp; struct imx_lcdc *lcdc = imx_lcdc_from_drmdev(pipe->crtc.dev); struct drm_display_mode *mode = &pipe->crtc.mode; struct drm_display_info *disp_info = &lcdc->connector->display_info; const int hsync_pol = (mode->flags & DRM_MODE_FLAG_PHSYNC) ? 0 : 1; const int vsync_pol = (mode->flags & DRM_MODE_FLAG_PVSYNC) ? 0 : 1; const int data_enable_pol = (disp_info->bus_flags & DRM_BUS_FLAG_DE_HIGH) ? 0 : 1; const int clk_pol = (disp_info->bus_flags & DRM_BUS_FLAG_PIXDATA_DRIVE_POSEDGE) ? 0 : 1; clk_div = DIV_ROUND_CLOSEST_ULL(clk_get_rate(lcdc->clk_per), mode->clock * 1000); bpp = imx_lcdc_get_format(plane_state->fb->format->format); writel(FIELD_PREP(IMX21LCDC_LPCR_PCD, clk_div - 1) | FIELD_PREP(IMX21LCDC_LPCR_LPPOL, hsync_pol) | FIELD_PREP(IMX21LCDC_LPCR_FLMPOL, vsync_pol) | FIELD_PREP(IMX21LCDC_LPCR_OEPOL, data_enable_pol) | FIELD_PREP(IMX21LCDC_LPCR_TFT, 1) | FIELD_PREP(IMX21LCDC_LPCR_COLOR, 1) | FIELD_PREP(IMX21LCDC_LPCR_PBSIZ, 3) | FIELD_PREP(IMX21LCDC_LPCR_BPIX, bpp) | FIELD_PREP(IMX21LCDC_LPCR_SCLKSEL, 1) | FIELD_PREP(IMX21LCDC_LPCR_PIXPOL, 0) | FIELD_PREP(IMX21LCDC_LPCR_CLKPOL, clk_pol), lcdc->base + IMX21LCDC_LPCR); /* 0px panning offset */ writel(0x00000000, lcdc->base + IMX21LCDC_LPOR); /* disable hardware cursor */ writel(readl(lcdc->base + IMX21LCDC_LCPR) & ~(IMX21LCDC_LCPR_CC0 | IMX21LCDC_LCPR_CC1), lcdc->base + IMX21LCDC_LCPR); ret = clk_prepare_enable(lcdc->clk_ipg); if (ret) { dev_err(pipe->crtc.dev->dev, "Cannot enable ipg clock: %pe\n", ERR_PTR(ret)); return; } ret = clk_prepare_enable(lcdc->clk_ahb); if (ret) { dev_err(pipe->crtc.dev->dev, "Cannot enable ahb clock: %pe\n", ERR_PTR(ret)); clk_disable_unprepare(lcdc->clk_ipg); return; } imx_lcdc_update_hw_registers(pipe, NULL, true); /* Enable VBLANK Interrupt */ writel(INTR_EOF, lcdc->base + IMX21LCDC_LIER); } static void imx_lcdc_pipe_disable(struct drm_simple_display_pipe *pipe) { struct imx_lcdc *lcdc = imx_lcdc_from_drmdev(pipe->crtc.dev); struct drm_crtc *crtc = &lcdc->pipe.crtc; struct drm_pending_vblank_event *event; clk_disable_unprepare(lcdc->clk_ahb); clk_disable_unprepare(lcdc->clk_ipg); if (pipe->crtc.enabled) clk_disable_unprepare(lcdc->clk_per); spin_lock_irq(&lcdc->drm.event_lock); event = crtc->state->event; if (event) { crtc->state->event = NULL; drm_crtc_send_vblank_event(crtc, event); } spin_unlock_irq(&lcdc->drm.event_lock); /* Disable VBLANK Interrupt */ writel(0, lcdc->base + IMX21LCDC_LIER); } static int imx_lcdc_pipe_check(struct drm_simple_display_pipe *pipe, struct drm_plane_state *plane_state, struct drm_crtc_state *crtc_state) { const struct drm_display_mode *mode = &crtc_state->mode; const struct drm_display_mode *old_mode = &pipe->crtc.state->mode; if (mode->hdisplay < LCDC_MIN_XRES || mode->hdisplay > LCDC_MAX_XRES || mode->vdisplay < LCDC_MIN_YRES || mode->vdisplay > LCDC_MAX_YRES || mode->hdisplay % 0x10) { /* must be multiple of 16 */ drm_err(pipe->crtc.dev, "unsupported display mode (%u x %u)\n", mode->hdisplay, mode->vdisplay); return -EINVAL; } crtc_state->mode_changed = old_mode->hdisplay != mode->hdisplay || old_mode->vdisplay != mode->vdisplay; return 0; } static void imx_lcdc_pipe_update(struct drm_simple_display_pipe *pipe, struct drm_plane_state *old_state) { struct drm_crtc *crtc = &pipe->crtc; struct drm_pending_vblank_event *event = crtc->state->event; struct drm_plane_state *new_state = pipe->plane.state; struct drm_framebuffer *fb = new_state->fb; struct drm_framebuffer *old_fb = old_state->fb; struct drm_crtc *old_crtc = old_state->crtc; bool mode_changed = false; if (old_fb && old_fb->format != fb->format) mode_changed = true; else if (old_crtc != crtc) mode_changed = true; imx_lcdc_update_hw_registers(pipe, old_state, mode_changed); if (event) { crtc->state->event = NULL; spin_lock_irq(&crtc->dev->event_lock); if (crtc->state->active && drm_crtc_vblank_get(crtc) == 0) drm_crtc_arm_vblank_event(crtc, event); else drm_crtc_send_vblank_event(crtc, event); spin_unlock_irq(&crtc->dev->event_lock); } } static const struct drm_simple_display_pipe_funcs imx_lcdc_pipe_funcs = { .enable = imx_lcdc_pipe_enable, .disable = imx_lcdc_pipe_disable, .check = imx_lcdc_pipe_check, .update = imx_lcdc_pipe_update, }; static const struct drm_mode_config_funcs imx_lcdc_mode_config_funcs = { .fb_create = drm_gem_fb_create_with_dirty, .atomic_check = drm_atomic_helper_check, .atomic_commit = drm_atomic_helper_commit, }; static const struct drm_mode_config_helper_funcs imx_lcdc_mode_config_helpers = { .atomic_commit_tail = drm_atomic_helper_commit_tail_rpm, }; static void imx_lcdc_release(struct drm_device *drm) { struct imx_lcdc *lcdc = imx_lcdc_from_drmdev(drm); drm_kms_helper_poll_fini(drm); kfree(lcdc); } DEFINE_DRM_GEM_DMA_FOPS(imx_lcdc_drm_fops); static struct drm_driver imx_lcdc_drm_driver = { .driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC, .fops = &imx_lcdc_drm_fops, DRM_GEM_DMA_DRIVER_OPS_VMAP, .release = imx_lcdc_release, .name = "imx-lcdc", .desc = "i.MX LCDC driver", .date = "20200716", }; static const struct of_device_id imx_lcdc_of_dev_id[] = { { .compatible = "fsl,imx21-lcdc", }, { .compatible = "fsl,imx25-lcdc", }, { /* sentinel */ } }; MODULE_DEVICE_TABLE(of, imx_lcdc_of_dev_id); static irqreturn_t imx_lcdc_irq_handler(int irq, void *arg) { struct imx_lcdc *lcdc = arg; struct drm_crtc *crtc = &lcdc->pipe.crtc; unsigned int status; status = readl(lcdc->base + IMX21LCDC_LISR); if (status & INTR_EOF) { drm_crtc_handle_vblank(crtc); return IRQ_HANDLED; } return IRQ_NONE; } static int imx_lcdc_probe(struct platform_device *pdev) { struct imx_lcdc *lcdc; struct drm_device *drm; struct drm_bridge *bridge; int irq; int ret; struct device *dev = &pdev->dev; lcdc = devm_drm_dev_alloc(dev, &imx_lcdc_drm_driver, struct imx_lcdc, drm); if (IS_ERR(lcdc)) return PTR_ERR(lcdc); drm = &lcdc->drm; lcdc->base = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(lcdc->base)) return dev_err_probe(dev, PTR_ERR(lcdc->base), "Cannot get IO memory\n"); bridge = devm_drm_of_get_bridge(dev, dev->of_node, 0, 0); if (IS_ERR(bridge)) return dev_err_probe(dev, PTR_ERR(bridge), "Failed to find bridge\n"); /* Get Clocks */ lcdc->clk_ipg = devm_clk_get(dev, "ipg"); if (IS_ERR(lcdc->clk_ipg)) return dev_err_probe(dev, PTR_ERR(lcdc->clk_ipg), "Failed to get %s clk\n", "ipg"); lcdc->clk_ahb = devm_clk_get(dev, "ahb"); if (IS_ERR(lcdc->clk_ahb)) return dev_err_probe(dev, PTR_ERR(lcdc->clk_ahb), "Failed to get %s clk\n", "ahb"); lcdc->clk_per = devm_clk_get(dev, "per"); if (IS_ERR(lcdc->clk_per)) return dev_err_probe(dev, PTR_ERR(lcdc->clk_per), "Failed to get %s clk\n", "per"); ret = dma_set_mask_and_coherent(drm->dev, DMA_BIT_MASK(32)); if (ret) return dev_err_probe(dev, ret, "Cannot set DMA Mask\n"); /* Modeset init */ ret = drmm_mode_config_init(drm); if (ret) return dev_err_probe(dev, ret, "Cannot initialize mode configuration structure\n"); /* CRTC, Plane, Encoder */ ret = drm_simple_display_pipe_init(drm, &lcdc->pipe, &imx_lcdc_pipe_funcs, imx_lcdc_formats, ARRAY_SIZE(imx_lcdc_formats), NULL, NULL); if (ret < 0) return dev_err_probe(drm->dev, ret, "Cannot setup simple display pipe\n"); ret = drm_vblank_init(drm, drm->mode_config.num_crtc); if (ret < 0) return dev_err_probe(drm->dev, ret, "Failed to initialize vblank\n"); ret = drm_bridge_attach(&lcdc->pipe.encoder, bridge, NULL, DRM_BRIDGE_ATTACH_NO_CONNECTOR); if (ret) return dev_err_probe(drm->dev, ret, "Cannot attach bridge\n"); lcdc->connector = drm_bridge_connector_init(drm, &lcdc->pipe.encoder); if (IS_ERR(lcdc->connector)) return dev_err_probe(drm->dev, PTR_ERR(lcdc->connector), "Cannot init bridge connector\n"); drm_connector_attach_encoder(lcdc->connector, &lcdc->pipe.encoder); /* * The LCDC controller does not have an enable bit. The * controller starts directly when the clocks are enabled. * If the clocks are enabled when the controller is not yet * programmed with proper register values (enabled at the * bootloader, for example) then it just goes into some undefined * state. * To avoid this issue, let's enable and disable LCDC IPG, * PER and AHB clock so that we force some kind of 'reset' * to the LCDC block. */ ret = clk_prepare_enable(lcdc->clk_ipg); if (ret) return dev_err_probe(dev, ret, "Cannot enable ipg clock\n"); clk_disable_unprepare(lcdc->clk_ipg); ret = clk_prepare_enable(lcdc->clk_per); if (ret) return dev_err_probe(dev, ret, "Cannot enable per clock\n"); clk_disable_unprepare(lcdc->clk_per); ret = clk_prepare_enable(lcdc->clk_ahb); if (ret) return dev_err_probe(dev, ret, "Cannot enable ahb clock\n"); clk_disable_unprepare(lcdc->clk_ahb); drm->mode_config.min_width = LCDC_MIN_XRES; drm->mode_config.max_width = LCDC_MAX_XRES; drm->mode_config.min_height = LCDC_MIN_YRES; drm->mode_config.max_height = LCDC_MAX_YRES; drm->mode_config.preferred_depth = 16; drm->mode_config.funcs = &imx_lcdc_mode_config_funcs; drm->mode_config.helper_private = &imx_lcdc_mode_config_helpers; drm_mode_config_reset(drm); irq = platform_get_irq(pdev, 0); if (irq < 0) { ret = irq; return ret; } ret = devm_request_irq(dev, irq, imx_lcdc_irq_handler, 0, "imx-lcdc", lcdc); if (ret < 0) return dev_err_probe(drm->dev, ret, "Failed to install IRQ handler\n"); platform_set_drvdata(pdev, drm); ret = drm_dev_register(&lcdc->drm, 0); if (ret) return dev_err_probe(dev, ret, "Cannot register device\n"); drm_fbdev_generic_setup(drm, 0); return 0; } static int imx_lcdc_remove(struct platform_device *pdev) { struct drm_device *drm = platform_get_drvdata(pdev); drm_dev_unregister(drm); drm_atomic_helper_shutdown(drm); return 0; } static void imx_lcdc_shutdown(struct platform_device *pdev) { drm_atomic_helper_shutdown(platform_get_drvdata(pdev)); } static struct platform_driver imx_lcdc_driver = { .driver = { .name = "imx-lcdc", .of_match_table = imx_lcdc_of_dev_id, }, .probe = imx_lcdc_probe, .remove = imx_lcdc_remove, .shutdown = imx_lcdc_shutdown, }; module_platform_driver(imx_lcdc_driver); MODULE_AUTHOR("Marian Cichy <[email protected]>"); MODULE_DESCRIPTION("Freescale i.MX LCDC driver"); MODULE_LICENSE("GPL");
linux-master
drivers/gpu/drm/imx/lcdc/imx-lcdc.c
// SPDX-License-Identifier: GPL-2.0 /* Copyright (C) 2011-2013 Freescale Semiconductor, Inc. * * derived from imx-hdmi.c(renamed to bridge/dw_hdmi.c now) */ #include <linux/component.h> #include <linux/mfd/syscon.h> #include <linux/mfd/syscon/imx6q-iomuxc-gpr.h> #include <linux/module.h> #include <linux/platform_device.h> #include <linux/regmap.h> #include <video/imx-ipu-v3.h> #include <drm/bridge/dw_hdmi.h> #include <drm/drm_atomic_helper.h> #include <drm/drm_bridge.h> #include <drm/drm_edid.h> #include <drm/drm_encoder.h> #include <drm/drm_managed.h> #include <drm/drm_of.h> #include <drm/drm_simple_kms_helper.h> #include "imx-drm.h" struct imx_hdmi; struct imx_hdmi_encoder { struct drm_encoder encoder; struct imx_hdmi *hdmi; }; struct imx_hdmi { struct device *dev; struct drm_bridge *bridge; struct dw_hdmi *hdmi; struct regmap *regmap; }; static inline struct imx_hdmi *enc_to_imx_hdmi(struct drm_encoder *e) { return container_of(e, struct imx_hdmi_encoder, encoder)->hdmi; } static const struct dw_hdmi_mpll_config imx_mpll_cfg[] = { { 45250000, { { 0x01e0, 0x0000 }, { 0x21e1, 0x0000 }, { 0x41e2, 0x0000 } }, }, { 92500000, { { 0x0140, 0x0005 }, { 0x2141, 0x0005 }, { 0x4142, 0x0005 }, }, }, { 148500000, { { 0x00a0, 0x000a }, { 0x20a1, 0x000a }, { 0x40a2, 0x000a }, }, }, { 216000000, { { 0x00a0, 0x000a }, { 0x2001, 0x000f }, { 0x4002, 0x000f }, }, }, { ~0UL, { { 0x0000, 0x0000 }, { 0x0000, 0x0000 }, { 0x0000, 0x0000 }, }, } }; static const struct dw_hdmi_curr_ctrl imx_cur_ctr[] = { /* pixelclk bpp8 bpp10 bpp12 */ { 54000000, { 0x091c, 0x091c, 0x06dc }, }, { 58400000, { 0x091c, 0x06dc, 0x06dc }, }, { 72000000, { 0x06dc, 0x06dc, 0x091c }, }, { 74250000, { 0x06dc, 0x0b5c, 0x091c }, }, { 118800000, { 0x091c, 0x091c, 0x06dc }, }, { 216000000, { 0x06dc, 0x0b5c, 0x091c }, }, { ~0UL, { 0x0000, 0x0000, 0x0000 }, }, }; /* * Resistance term 133Ohm Cfg * PREEMP config 0.00 * TX/CK level 10 */ static const struct dw_hdmi_phy_config imx_phy_config[] = { /*pixelclk symbol term vlev */ { 216000000, 0x800d, 0x0005, 0x01ad}, { ~0UL, 0x0000, 0x0000, 0x0000} }; static void dw_hdmi_imx_encoder_enable(struct drm_encoder *encoder) { struct imx_hdmi *hdmi = enc_to_imx_hdmi(encoder); int mux = drm_of_encoder_active_port_id(hdmi->dev->of_node, encoder); regmap_update_bits(hdmi->regmap, IOMUXC_GPR3, IMX6Q_GPR3_HDMI_MUX_CTL_MASK, mux << IMX6Q_GPR3_HDMI_MUX_CTL_SHIFT); } static int dw_hdmi_imx_atomic_check(struct drm_encoder *encoder, struct drm_crtc_state *crtc_state, struct drm_connector_state *conn_state) { struct imx_crtc_state *imx_crtc_state = to_imx_crtc_state(crtc_state); imx_crtc_state->bus_format = MEDIA_BUS_FMT_RGB888_1X24; imx_crtc_state->di_hsync_pin = 2; imx_crtc_state->di_vsync_pin = 3; return 0; } static const struct drm_encoder_helper_funcs dw_hdmi_imx_encoder_helper_funcs = { .enable = dw_hdmi_imx_encoder_enable, .atomic_check = dw_hdmi_imx_atomic_check, }; static enum drm_mode_status imx6q_hdmi_mode_valid(struct dw_hdmi *hdmi, void *data, const struct drm_display_info *info, const struct drm_display_mode *mode) { if (mode->clock < 13500) return MODE_CLOCK_LOW; /* FIXME: Hardware is capable of 266MHz, but setup data is missing. */ if (mode->clock > 216000) return MODE_CLOCK_HIGH; return MODE_OK; } static enum drm_mode_status imx6dl_hdmi_mode_valid(struct dw_hdmi *hdmi, void *data, const struct drm_display_info *info, const struct drm_display_mode *mode) { if (mode->clock < 13500) return MODE_CLOCK_LOW; /* FIXME: Hardware is capable of 270MHz, but setup data is missing. */ if (mode->clock > 216000) return MODE_CLOCK_HIGH; return MODE_OK; } static struct dw_hdmi_plat_data imx6q_hdmi_drv_data = { .mpll_cfg = imx_mpll_cfg, .cur_ctr = imx_cur_ctr, .phy_config = imx_phy_config, .mode_valid = imx6q_hdmi_mode_valid, }; static struct dw_hdmi_plat_data imx6dl_hdmi_drv_data = { .mpll_cfg = imx_mpll_cfg, .cur_ctr = imx_cur_ctr, .phy_config = imx_phy_config, .mode_valid = imx6dl_hdmi_mode_valid, }; static const struct of_device_id dw_hdmi_imx_dt_ids[] = { { .compatible = "fsl,imx6q-hdmi", .data = &imx6q_hdmi_drv_data }, { .compatible = "fsl,imx6dl-hdmi", .data = &imx6dl_hdmi_drv_data }, {}, }; MODULE_DEVICE_TABLE(of, dw_hdmi_imx_dt_ids); static int dw_hdmi_imx_bind(struct device *dev, struct device *master, void *data) { struct drm_device *drm = data; struct imx_hdmi_encoder *hdmi_encoder; struct drm_encoder *encoder; int ret; hdmi_encoder = drmm_simple_encoder_alloc(drm, struct imx_hdmi_encoder, encoder, DRM_MODE_ENCODER_TMDS); if (IS_ERR(hdmi_encoder)) return PTR_ERR(hdmi_encoder); hdmi_encoder->hdmi = dev_get_drvdata(dev); encoder = &hdmi_encoder->encoder; ret = imx_drm_encoder_parse_of(drm, encoder, dev->of_node); if (ret) return ret; drm_encoder_helper_add(encoder, &dw_hdmi_imx_encoder_helper_funcs); return drm_bridge_attach(encoder, hdmi_encoder->hdmi->bridge, NULL, 0); } static const struct component_ops dw_hdmi_imx_ops = { .bind = dw_hdmi_imx_bind, }; static int dw_hdmi_imx_probe(struct platform_device *pdev) { struct device_node *np = pdev->dev.of_node; const struct of_device_id *match = of_match_node(dw_hdmi_imx_dt_ids, np); struct imx_hdmi *hdmi; int ret; hdmi = devm_kzalloc(&pdev->dev, sizeof(*hdmi), GFP_KERNEL); if (!hdmi) return -ENOMEM; platform_set_drvdata(pdev, hdmi); hdmi->dev = &pdev->dev; hdmi->regmap = syscon_regmap_lookup_by_phandle(np, "gpr"); if (IS_ERR(hdmi->regmap)) { dev_err(hdmi->dev, "Unable to get gpr\n"); return PTR_ERR(hdmi->regmap); } hdmi->hdmi = dw_hdmi_probe(pdev, match->data); if (IS_ERR(hdmi->hdmi)) return PTR_ERR(hdmi->hdmi); hdmi->bridge = of_drm_find_bridge(np); if (!hdmi->bridge) { dev_err(hdmi->dev, "Unable to find bridge\n"); dw_hdmi_remove(hdmi->hdmi); return -ENODEV; } ret = component_add(&pdev->dev, &dw_hdmi_imx_ops); if (ret) dw_hdmi_remove(hdmi->hdmi); return ret; } static int dw_hdmi_imx_remove(struct platform_device *pdev) { struct imx_hdmi *hdmi = platform_get_drvdata(pdev); component_del(&pdev->dev, &dw_hdmi_imx_ops); dw_hdmi_remove(hdmi->hdmi); return 0; } static struct platform_driver dw_hdmi_imx_platform_driver = { .probe = dw_hdmi_imx_probe, .remove = dw_hdmi_imx_remove, .driver = { .name = "dwhdmi-imx", .of_match_table = dw_hdmi_imx_dt_ids, }, }; module_platform_driver(dw_hdmi_imx_platform_driver); MODULE_AUTHOR("Andy Yan <[email protected]>"); MODULE_AUTHOR("Yakir Yang <[email protected]>"); MODULE_DESCRIPTION("IMX6 Specific DW-HDMI Driver Extension"); MODULE_LICENSE("GPL"); MODULE_ALIAS("platform:dwhdmi-imx");
linux-master
drivers/gpu/drm/imx/ipuv3/dw_hdmi-imx.c
// SPDX-License-Identifier: GPL-2.0+ /* * Freescale i.MX drm driver * * Copyright (C) 2011 Sascha Hauer, Pengutronix */ #include <linux/component.h> #include <linux/device.h> #include <linux/dma-buf.h> #include <linux/module.h> #include <linux/platform_device.h> #include <video/imx-ipu-v3.h> #include <drm/drm_atomic.h> #include <drm/drm_atomic_helper.h> #include <drm/drm_drv.h> #include <drm/drm_fbdev_dma.h> #include <drm/drm_gem_dma_helper.h> #include <drm/drm_gem_framebuffer_helper.h> #include <drm/drm_managed.h> #include <drm/drm_of.h> #include <drm/drm_probe_helper.h> #include <drm/drm_vblank.h> #include "imx-drm.h" #include "ipuv3-plane.h" #define MAX_CRTC 4 static int legacyfb_depth = 16; module_param(legacyfb_depth, int, 0444); DEFINE_DRM_GEM_DMA_FOPS(imx_drm_driver_fops); void imx_drm_connector_destroy(struct drm_connector *connector) { drm_connector_unregister(connector); drm_connector_cleanup(connector); } EXPORT_SYMBOL_GPL(imx_drm_connector_destroy); static int imx_drm_atomic_check(struct drm_device *dev, struct drm_atomic_state *state) { int ret; ret = drm_atomic_helper_check(dev, state); if (ret) return ret; /* * Check modeset again in case crtc_state->mode_changed is * updated in plane's ->atomic_check callback. */ ret = drm_atomic_helper_check_modeset(dev, state); if (ret) return ret; /* Assign PRG/PRE channels and check if all constrains are satisfied. */ ret = ipu_planes_assign_pre(dev, state); if (ret) return ret; return ret; } static const struct drm_mode_config_funcs imx_drm_mode_config_funcs = { .fb_create = drm_gem_fb_create, .atomic_check = imx_drm_atomic_check, .atomic_commit = drm_atomic_helper_commit, }; static void imx_drm_atomic_commit_tail(struct drm_atomic_state *state) { struct drm_device *dev = state->dev; struct drm_plane *plane; struct drm_plane_state *old_plane_state, *new_plane_state; bool plane_disabling = false; int i; drm_atomic_helper_commit_modeset_disables(dev, state); drm_atomic_helper_commit_planes(dev, state, DRM_PLANE_COMMIT_ACTIVE_ONLY | DRM_PLANE_COMMIT_NO_DISABLE_AFTER_MODESET); drm_atomic_helper_commit_modeset_enables(dev, state); for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) { if (drm_atomic_plane_disabling(old_plane_state, new_plane_state)) plane_disabling = true; } /* * The flip done wait is only strictly required by imx-drm if a deferred * plane disable is in-flight. As the core requires blocking commits * to wait for the flip it is done here unconditionally. This keeps the * workitem around a bit longer than required for the majority of * non-blocking commits, but we accept that for the sake of simplicity. */ drm_atomic_helper_wait_for_flip_done(dev, state); if (plane_disabling) { for_each_old_plane_in_state(state, plane, old_plane_state, i) ipu_plane_disable_deferred(plane); } drm_atomic_helper_commit_hw_done(state); } static const struct drm_mode_config_helper_funcs imx_drm_mode_config_helpers = { .atomic_commit_tail = imx_drm_atomic_commit_tail, }; int imx_drm_encoder_parse_of(struct drm_device *drm, struct drm_encoder *encoder, struct device_node *np) { uint32_t crtc_mask = drm_of_find_possible_crtcs(drm, np); /* * If we failed to find the CRTC(s) which this encoder is * supposed to be connected to, it's because the CRTC has * not been registered yet. Defer probing, and hope that * the required CRTC is added later. */ if (crtc_mask == 0) return -EPROBE_DEFER; encoder->possible_crtcs = crtc_mask; /* FIXME: cloning support not clear, disable it all for now */ encoder->possible_clones = 0; return 0; } EXPORT_SYMBOL_GPL(imx_drm_encoder_parse_of); static const struct drm_ioctl_desc imx_drm_ioctls[] = { /* none so far */ }; static int imx_drm_dumb_create(struct drm_file *file_priv, struct drm_device *drm, struct drm_mode_create_dumb *args) { u32 width = args->width; int ret; args->width = ALIGN(width, 8); ret = drm_gem_dma_dumb_create(file_priv, drm, args); if (ret) return ret; args->width = width; return ret; } static const struct drm_driver imx_drm_driver = { .driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_ATOMIC, DRM_GEM_DMA_DRIVER_OPS_WITH_DUMB_CREATE(imx_drm_dumb_create), .ioctls = imx_drm_ioctls, .num_ioctls = ARRAY_SIZE(imx_drm_ioctls), .fops = &imx_drm_driver_fops, .name = "imx-drm", .desc = "i.MX DRM graphics", .date = "20120507", .major = 1, .minor = 0, .patchlevel = 0, }; static int compare_of(struct device *dev, void *data) { struct device_node *np = data; /* Special case for DI, dev->of_node may not be set yet */ if (strcmp(dev->driver->name, "imx-ipuv3-crtc") == 0) { struct ipu_client_platformdata *pdata = dev->platform_data; return pdata->of_node == np; } /* Special case for LDB, one device for two channels */ if (of_node_name_eq(np, "lvds-channel")) { np = of_get_parent(np); of_node_put(np); } return dev->of_node == np; } static int imx_drm_bind(struct device *dev) { struct drm_device *drm; int ret; drm = drm_dev_alloc(&imx_drm_driver, dev); if (IS_ERR(drm)) return PTR_ERR(drm); /* * set max width and height as default value(4096x4096). * this value would be used to check framebuffer size limitation * at drm_mode_addfb(). */ drm->mode_config.min_width = 1; drm->mode_config.min_height = 1; drm->mode_config.max_width = 4096; drm->mode_config.max_height = 4096; drm->mode_config.funcs = &imx_drm_mode_config_funcs; drm->mode_config.helper_private = &imx_drm_mode_config_helpers; drm->mode_config.normalize_zpos = true; ret = drmm_mode_config_init(drm); if (ret) goto err_kms; ret = drm_vblank_init(drm, MAX_CRTC); if (ret) goto err_kms; dev_set_drvdata(dev, drm); /* Now try and bind all our sub-components */ ret = component_bind_all(dev, drm); if (ret) goto err_kms; drm_mode_config_reset(drm); /* * All components are now initialised, so setup the fb helper. * The fb helper takes copies of key hardware information, so the * crtcs/connectors/encoders must not change after this point. */ if (legacyfb_depth != 16 && legacyfb_depth != 32) { dev_warn(dev, "Invalid legacyfb_depth. Defaulting to 16bpp\n"); legacyfb_depth = 16; } drm_kms_helper_poll_init(drm); ret = drm_dev_register(drm, 0); if (ret) goto err_poll_fini; drm_fbdev_dma_setup(drm, legacyfb_depth); return 0; err_poll_fini: drm_kms_helper_poll_fini(drm); component_unbind_all(drm->dev, drm); err_kms: drm_dev_put(drm); return ret; } static void imx_drm_unbind(struct device *dev) { struct drm_device *drm = dev_get_drvdata(dev); drm_dev_unregister(drm); drm_kms_helper_poll_fini(drm); component_unbind_all(drm->dev, drm); drm_dev_put(drm); dev_set_drvdata(dev, NULL); } static const struct component_master_ops imx_drm_ops = { .bind = imx_drm_bind, .unbind = imx_drm_unbind, }; static int imx_drm_platform_probe(struct platform_device *pdev) { int ret = drm_of_component_probe(&pdev->dev, compare_of, &imx_drm_ops); if (!ret) ret = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32)); return ret; } static int imx_drm_platform_remove(struct platform_device *pdev) { component_master_del(&pdev->dev, &imx_drm_ops); return 0; } #ifdef CONFIG_PM_SLEEP static int imx_drm_suspend(struct device *dev) { struct drm_device *drm_dev = dev_get_drvdata(dev); return drm_mode_config_helper_suspend(drm_dev); } static int imx_drm_resume(struct device *dev) { struct drm_device *drm_dev = dev_get_drvdata(dev); return drm_mode_config_helper_resume(drm_dev); } #endif static SIMPLE_DEV_PM_OPS(imx_drm_pm_ops, imx_drm_suspend, imx_drm_resume); static const struct of_device_id imx_drm_dt_ids[] = { { .compatible = "fsl,imx-display-subsystem", }, { /* sentinel */ }, }; MODULE_DEVICE_TABLE(of, imx_drm_dt_ids); static struct platform_driver imx_drm_pdrv = { .probe = imx_drm_platform_probe, .remove = imx_drm_platform_remove, .driver = { .name = "imx-drm", .pm = &imx_drm_pm_ops, .of_match_table = imx_drm_dt_ids, }, }; static struct platform_driver * const drivers[] = { &imx_drm_pdrv, &ipu_drm_driver, }; static int __init imx_drm_init(void) { if (drm_firmware_drivers_only()) return -ENODEV; return platform_register_drivers(drivers, ARRAY_SIZE(drivers)); } module_init(imx_drm_init); static void __exit imx_drm_exit(void) { platform_unregister_drivers(drivers, ARRAY_SIZE(drivers)); } module_exit(imx_drm_exit); MODULE_AUTHOR("Sascha Hauer <[email protected]>"); MODULE_DESCRIPTION("i.MX drm driver core"); MODULE_LICENSE("GPL");
linux-master
drivers/gpu/drm/imx/ipuv3/imx-drm-core.c
// SPDX-License-Identifier: GPL-2.0+ /* * i.MX drm driver - LVDS display bridge * * Copyright (C) 2012 Sascha Hauer, Pengutronix */ #include <linux/clk.h> #include <linux/component.h> #include <linux/i2c.h> #include <linux/media-bus-format.h> #include <linux/mfd/syscon.h> #include <linux/mfd/syscon/imx6q-iomuxc-gpr.h> #include <linux/module.h> #include <linux/of_device.h> #include <linux/of_graph.h> #include <linux/regmap.h> #include <linux/videodev2.h> #include <video/of_display_timing.h> #include <video/of_videomode.h> #include <drm/drm_atomic.h> #include <drm/drm_atomic_helper.h> #include <drm/drm_bridge.h> #include <drm/drm_edid.h> #include <drm/drm_managed.h> #include <drm/drm_of.h> #include <drm/drm_panel.h> #include <drm/drm_print.h> #include <drm/drm_probe_helper.h> #include <drm/drm_simple_kms_helper.h> #include "imx-drm.h" #define DRIVER_NAME "imx-ldb" #define LDB_CH0_MODE_EN_TO_DI0 (1 << 0) #define LDB_CH0_MODE_EN_TO_DI1 (3 << 0) #define LDB_CH0_MODE_EN_MASK (3 << 0) #define LDB_CH1_MODE_EN_TO_DI0 (1 << 2) #define LDB_CH1_MODE_EN_TO_DI1 (3 << 2) #define LDB_CH1_MODE_EN_MASK (3 << 2) #define LDB_SPLIT_MODE_EN (1 << 4) #define LDB_DATA_WIDTH_CH0_24 (1 << 5) #define LDB_BIT_MAP_CH0_JEIDA (1 << 6) #define LDB_DATA_WIDTH_CH1_24 (1 << 7) #define LDB_BIT_MAP_CH1_JEIDA (1 << 8) #define LDB_DI0_VS_POL_ACT_LOW (1 << 9) #define LDB_DI1_VS_POL_ACT_LOW (1 << 10) #define LDB_BGREF_RMODE_INT (1 << 15) struct imx_ldb_channel; struct imx_ldb_encoder { struct drm_connector connector; struct drm_encoder encoder; struct imx_ldb_channel *channel; }; struct imx_ldb; struct imx_ldb_channel { struct imx_ldb *ldb; /* Defines what is connected to the ldb, only one at a time */ struct drm_panel *panel; struct drm_bridge *bridge; struct device_node *child; struct i2c_adapter *ddc; int chno; void *edid; struct drm_display_mode mode; int mode_valid; u32 bus_format; u32 bus_flags; }; static inline struct imx_ldb_channel *con_to_imx_ldb_ch(struct drm_connector *c) { return container_of(c, struct imx_ldb_encoder, connector)->channel; } static inline struct imx_ldb_channel *enc_to_imx_ldb_ch(struct drm_encoder *e) { return container_of(e, struct imx_ldb_encoder, encoder)->channel; } struct bus_mux { int reg; int shift; int mask; }; struct imx_ldb { struct regmap *regmap; struct device *dev; struct imx_ldb_channel channel[2]; struct clk *clk[2]; /* our own clock */ struct clk *clk_sel[4]; /* parent of display clock */ struct clk *clk_parent[4]; /* original parent of clk_sel */ struct clk *clk_pll[2]; /* upstream clock we can adjust */ u32 ldb_ctrl; const struct bus_mux *lvds_mux; }; static void imx_ldb_ch_set_bus_format(struct imx_ldb_channel *imx_ldb_ch, u32 bus_format) { struct imx_ldb *ldb = imx_ldb_ch->ldb; int dual = ldb->ldb_ctrl & LDB_SPLIT_MODE_EN; switch (bus_format) { case MEDIA_BUS_FMT_RGB666_1X7X3_SPWG: break; case MEDIA_BUS_FMT_RGB888_1X7X4_SPWG: if (imx_ldb_ch->chno == 0 || dual) ldb->ldb_ctrl |= LDB_DATA_WIDTH_CH0_24; if (imx_ldb_ch->chno == 1 || dual) ldb->ldb_ctrl |= LDB_DATA_WIDTH_CH1_24; break; case MEDIA_BUS_FMT_RGB888_1X7X4_JEIDA: if (imx_ldb_ch->chno == 0 || dual) ldb->ldb_ctrl |= LDB_DATA_WIDTH_CH0_24 | LDB_BIT_MAP_CH0_JEIDA; if (imx_ldb_ch->chno == 1 || dual) ldb->ldb_ctrl |= LDB_DATA_WIDTH_CH1_24 | LDB_BIT_MAP_CH1_JEIDA; break; } } static int imx_ldb_connector_get_modes(struct drm_connector *connector) { struct imx_ldb_channel *imx_ldb_ch = con_to_imx_ldb_ch(connector); int num_modes; num_modes = drm_panel_get_modes(imx_ldb_ch->panel, connector); if (num_modes > 0) return num_modes; if (!imx_ldb_ch->edid && imx_ldb_ch->ddc) imx_ldb_ch->edid = drm_get_edid(connector, imx_ldb_ch->ddc); if (imx_ldb_ch->edid) { drm_connector_update_edid_property(connector, imx_ldb_ch->edid); num_modes = drm_add_edid_modes(connector, imx_ldb_ch->edid); } if (imx_ldb_ch->mode_valid) { struct drm_display_mode *mode; mode = drm_mode_duplicate(connector->dev, &imx_ldb_ch->mode); if (!mode) return -EINVAL; mode->type |= DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED; drm_mode_probed_add(connector, mode); num_modes++; } return num_modes; } static void imx_ldb_set_clock(struct imx_ldb *ldb, int mux, int chno, unsigned long serial_clk, unsigned long di_clk) { int ret; dev_dbg(ldb->dev, "%s: now: %ld want: %ld\n", __func__, clk_get_rate(ldb->clk_pll[chno]), serial_clk); clk_set_rate(ldb->clk_pll[chno], serial_clk); dev_dbg(ldb->dev, "%s after: %ld\n", __func__, clk_get_rate(ldb->clk_pll[chno])); dev_dbg(ldb->dev, "%s: now: %ld want: %ld\n", __func__, clk_get_rate(ldb->clk[chno]), (long int)di_clk); clk_set_rate(ldb->clk[chno], di_clk); dev_dbg(ldb->dev, "%s after: %ld\n", __func__, clk_get_rate(ldb->clk[chno])); /* set display clock mux to LDB input clock */ ret = clk_set_parent(ldb->clk_sel[mux], ldb->clk[chno]); if (ret) dev_err(ldb->dev, "unable to set di%d parent clock to ldb_di%d\n", mux, chno); } static void imx_ldb_encoder_enable(struct drm_encoder *encoder) { struct imx_ldb_channel *imx_ldb_ch = enc_to_imx_ldb_ch(encoder); struct imx_ldb *ldb = imx_ldb_ch->ldb; int dual = ldb->ldb_ctrl & LDB_SPLIT_MODE_EN; int mux = drm_of_encoder_active_port_id(imx_ldb_ch->child, encoder); if (mux < 0 || mux >= ARRAY_SIZE(ldb->clk_sel)) { dev_warn(ldb->dev, "%s: invalid mux %d\n", __func__, mux); return; } drm_panel_prepare(imx_ldb_ch->panel); if (dual) { clk_set_parent(ldb->clk_sel[mux], ldb->clk[0]); clk_set_parent(ldb->clk_sel[mux], ldb->clk[1]); clk_prepare_enable(ldb->clk[0]); clk_prepare_enable(ldb->clk[1]); } else { clk_set_parent(ldb->clk_sel[mux], ldb->clk[imx_ldb_ch->chno]); } if (imx_ldb_ch == &ldb->channel[0] || dual) { ldb->ldb_ctrl &= ~LDB_CH0_MODE_EN_MASK; if (mux == 0 || ldb->lvds_mux) ldb->ldb_ctrl |= LDB_CH0_MODE_EN_TO_DI0; else if (mux == 1) ldb->ldb_ctrl |= LDB_CH0_MODE_EN_TO_DI1; } if (imx_ldb_ch == &ldb->channel[1] || dual) { ldb->ldb_ctrl &= ~LDB_CH1_MODE_EN_MASK; if (mux == 1 || ldb->lvds_mux) ldb->ldb_ctrl |= LDB_CH1_MODE_EN_TO_DI1; else if (mux == 0) ldb->ldb_ctrl |= LDB_CH1_MODE_EN_TO_DI0; } if (ldb->lvds_mux) { const struct bus_mux *lvds_mux = NULL; if (imx_ldb_ch == &ldb->channel[0]) lvds_mux = &ldb->lvds_mux[0]; else if (imx_ldb_ch == &ldb->channel[1]) lvds_mux = &ldb->lvds_mux[1]; regmap_update_bits(ldb->regmap, lvds_mux->reg, lvds_mux->mask, mux << lvds_mux->shift); } regmap_write(ldb->regmap, IOMUXC_GPR2, ldb->ldb_ctrl); drm_panel_enable(imx_ldb_ch->panel); } static void imx_ldb_encoder_atomic_mode_set(struct drm_encoder *encoder, struct drm_crtc_state *crtc_state, struct drm_connector_state *connector_state) { struct imx_ldb_channel *imx_ldb_ch = enc_to_imx_ldb_ch(encoder); struct drm_display_mode *mode = &crtc_state->adjusted_mode; struct imx_ldb *ldb = imx_ldb_ch->ldb; int dual = ldb->ldb_ctrl & LDB_SPLIT_MODE_EN; unsigned long serial_clk; unsigned long di_clk = mode->clock * 1000; int mux = drm_of_encoder_active_port_id(imx_ldb_ch->child, encoder); u32 bus_format = imx_ldb_ch->bus_format; if (mux < 0 || mux >= ARRAY_SIZE(ldb->clk_sel)) { dev_warn(ldb->dev, "%s: invalid mux %d\n", __func__, mux); return; } if (mode->clock > 170000) { dev_warn(ldb->dev, "%s: mode exceeds 170 MHz pixel clock\n", __func__); } if (mode->clock > 85000 && !dual) { dev_warn(ldb->dev, "%s: mode exceeds 85 MHz pixel clock\n", __func__); } if (!IS_ALIGNED(mode->hdisplay, 8)) { dev_warn(ldb->dev, "%s: hdisplay does not align to 8 byte\n", __func__); } if (dual) { serial_clk = 3500UL * mode->clock; imx_ldb_set_clock(ldb, mux, 0, serial_clk, di_clk); imx_ldb_set_clock(ldb, mux, 1, serial_clk, di_clk); } else { serial_clk = 7000UL * mode->clock; imx_ldb_set_clock(ldb, mux, imx_ldb_ch->chno, serial_clk, di_clk); } /* FIXME - assumes straight connections DI0 --> CH0, DI1 --> CH1 */ if (imx_ldb_ch == &ldb->channel[0] || dual) { if (mode->flags & DRM_MODE_FLAG_NVSYNC) ldb->ldb_ctrl |= LDB_DI0_VS_POL_ACT_LOW; else if (mode->flags & DRM_MODE_FLAG_PVSYNC) ldb->ldb_ctrl &= ~LDB_DI0_VS_POL_ACT_LOW; } if (imx_ldb_ch == &ldb->channel[1] || dual) { if (mode->flags & DRM_MODE_FLAG_NVSYNC) ldb->ldb_ctrl |= LDB_DI1_VS_POL_ACT_LOW; else if (mode->flags & DRM_MODE_FLAG_PVSYNC) ldb->ldb_ctrl &= ~LDB_DI1_VS_POL_ACT_LOW; } if (!bus_format) { struct drm_connector *connector = connector_state->connector; struct drm_display_info *di = &connector->display_info; if (di->num_bus_formats) bus_format = di->bus_formats[0]; } imx_ldb_ch_set_bus_format(imx_ldb_ch, bus_format); } static void imx_ldb_encoder_disable(struct drm_encoder *encoder) { struct imx_ldb_channel *imx_ldb_ch = enc_to_imx_ldb_ch(encoder); struct imx_ldb *ldb = imx_ldb_ch->ldb; int dual = ldb->ldb_ctrl & LDB_SPLIT_MODE_EN; int mux, ret; drm_panel_disable(imx_ldb_ch->panel); if (imx_ldb_ch == &ldb->channel[0] || dual) ldb->ldb_ctrl &= ~LDB_CH0_MODE_EN_MASK; if (imx_ldb_ch == &ldb->channel[1] || dual) ldb->ldb_ctrl &= ~LDB_CH1_MODE_EN_MASK; regmap_write(ldb->regmap, IOMUXC_GPR2, ldb->ldb_ctrl); if (dual) { clk_disable_unprepare(ldb->clk[0]); clk_disable_unprepare(ldb->clk[1]); } if (ldb->lvds_mux) { const struct bus_mux *lvds_mux = NULL; if (imx_ldb_ch == &ldb->channel[0]) lvds_mux = &ldb->lvds_mux[0]; else if (imx_ldb_ch == &ldb->channel[1]) lvds_mux = &ldb->lvds_mux[1]; regmap_read(ldb->regmap, lvds_mux->reg, &mux); mux &= lvds_mux->mask; mux >>= lvds_mux->shift; } else { mux = (imx_ldb_ch == &ldb->channel[0]) ? 0 : 1; } /* set display clock mux back to original input clock */ ret = clk_set_parent(ldb->clk_sel[mux], ldb->clk_parent[mux]); if (ret) dev_err(ldb->dev, "unable to set di%d parent clock to original parent\n", mux); drm_panel_unprepare(imx_ldb_ch->panel); } static int imx_ldb_encoder_atomic_check(struct drm_encoder *encoder, struct drm_crtc_state *crtc_state, struct drm_connector_state *conn_state) { struct imx_crtc_state *imx_crtc_state = to_imx_crtc_state(crtc_state); struct imx_ldb_channel *imx_ldb_ch = enc_to_imx_ldb_ch(encoder); struct drm_display_info *di = &conn_state->connector->display_info; u32 bus_format = imx_ldb_ch->bus_format; /* Bus format description in DT overrides connector display info. */ if (!bus_format && di->num_bus_formats) { bus_format = di->bus_formats[0]; imx_crtc_state->bus_flags = di->bus_flags; } else { bus_format = imx_ldb_ch->bus_format; imx_crtc_state->bus_flags = imx_ldb_ch->bus_flags; } switch (bus_format) { case MEDIA_BUS_FMT_RGB666_1X7X3_SPWG: imx_crtc_state->bus_format = MEDIA_BUS_FMT_RGB666_1X18; break; case MEDIA_BUS_FMT_RGB888_1X7X4_SPWG: case MEDIA_BUS_FMT_RGB888_1X7X4_JEIDA: imx_crtc_state->bus_format = MEDIA_BUS_FMT_RGB888_1X24; break; default: return -EINVAL; } imx_crtc_state->di_hsync_pin = 2; imx_crtc_state->di_vsync_pin = 3; return 0; } static const struct drm_connector_funcs imx_ldb_connector_funcs = { .fill_modes = drm_helper_probe_single_connector_modes, .destroy = imx_drm_connector_destroy, .reset = drm_atomic_helper_connector_reset, .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state, .atomic_destroy_state = drm_atomic_helper_connector_destroy_state, }; static const struct drm_connector_helper_funcs imx_ldb_connector_helper_funcs = { .get_modes = imx_ldb_connector_get_modes, }; static const struct drm_encoder_helper_funcs imx_ldb_encoder_helper_funcs = { .atomic_mode_set = imx_ldb_encoder_atomic_mode_set, .enable = imx_ldb_encoder_enable, .disable = imx_ldb_encoder_disable, .atomic_check = imx_ldb_encoder_atomic_check, }; static int imx_ldb_get_clk(struct imx_ldb *ldb, int chno) { char clkname[16]; snprintf(clkname, sizeof(clkname), "di%d", chno); ldb->clk[chno] = devm_clk_get(ldb->dev, clkname); if (IS_ERR(ldb->clk[chno])) return PTR_ERR(ldb->clk[chno]); snprintf(clkname, sizeof(clkname), "di%d_pll", chno); ldb->clk_pll[chno] = devm_clk_get(ldb->dev, clkname); return PTR_ERR_OR_ZERO(ldb->clk_pll[chno]); } static int imx_ldb_register(struct drm_device *drm, struct imx_ldb_channel *imx_ldb_ch) { struct imx_ldb *ldb = imx_ldb_ch->ldb; struct imx_ldb_encoder *ldb_encoder; struct drm_connector *connector; struct drm_encoder *encoder; int ret; ldb_encoder = drmm_simple_encoder_alloc(drm, struct imx_ldb_encoder, encoder, DRM_MODE_ENCODER_LVDS); if (IS_ERR(ldb_encoder)) return PTR_ERR(ldb_encoder); ldb_encoder->channel = imx_ldb_ch; connector = &ldb_encoder->connector; encoder = &ldb_encoder->encoder; ret = imx_drm_encoder_parse_of(drm, encoder, imx_ldb_ch->child); if (ret) return ret; ret = imx_ldb_get_clk(ldb, imx_ldb_ch->chno); if (ret) return ret; if (ldb->ldb_ctrl & LDB_SPLIT_MODE_EN) { ret = imx_ldb_get_clk(ldb, 1); if (ret) return ret; } drm_encoder_helper_add(encoder, &imx_ldb_encoder_helper_funcs); if (imx_ldb_ch->bridge) { ret = drm_bridge_attach(encoder, imx_ldb_ch->bridge, NULL, 0); if (ret) return ret; } else { /* * We want to add the connector whenever there is no bridge * that brings its own, not only when there is a panel. For * historical reasons, the ldb driver can also work without * a panel. */ drm_connector_helper_add(connector, &imx_ldb_connector_helper_funcs); drm_connector_init_with_ddc(drm, connector, &imx_ldb_connector_funcs, DRM_MODE_CONNECTOR_LVDS, imx_ldb_ch->ddc); drm_connector_attach_encoder(connector, encoder); } return 0; } struct imx_ldb_bit_mapping { u32 bus_format; u32 datawidth; const char * const mapping; }; static const struct imx_ldb_bit_mapping imx_ldb_bit_mappings[] = { { MEDIA_BUS_FMT_RGB666_1X7X3_SPWG, 18, "spwg" }, { MEDIA_BUS_FMT_RGB888_1X7X4_SPWG, 24, "spwg" }, { MEDIA_BUS_FMT_RGB888_1X7X4_JEIDA, 24, "jeida" }, }; static u32 of_get_bus_format(struct device *dev, struct device_node *np) { const char *bm; u32 datawidth = 0; int ret, i; ret = of_property_read_string(np, "fsl,data-mapping", &bm); if (ret < 0) return ret; of_property_read_u32(np, "fsl,data-width", &datawidth); for (i = 0; i < ARRAY_SIZE(imx_ldb_bit_mappings); i++) { if (!strcasecmp(bm, imx_ldb_bit_mappings[i].mapping) && datawidth == imx_ldb_bit_mappings[i].datawidth) return imx_ldb_bit_mappings[i].bus_format; } dev_err(dev, "invalid data mapping: %d-bit \"%s\"\n", datawidth, bm); return -ENOENT; } static struct bus_mux imx6q_lvds_mux[2] = { { .reg = IOMUXC_GPR3, .shift = 6, .mask = IMX6Q_GPR3_LVDS0_MUX_CTL_MASK, }, { .reg = IOMUXC_GPR3, .shift = 8, .mask = IMX6Q_GPR3_LVDS1_MUX_CTL_MASK, } }; /* * For a device declaring compatible = "fsl,imx6q-ldb", "fsl,imx53-ldb", * of_match_device will walk through this list and take the first entry * matching any of its compatible values. Therefore, the more generic * entries (in this case fsl,imx53-ldb) need to be ordered last. */ static const struct of_device_id imx_ldb_dt_ids[] = { { .compatible = "fsl,imx6q-ldb", .data = imx6q_lvds_mux, }, { .compatible = "fsl,imx53-ldb", .data = NULL, }, { } }; MODULE_DEVICE_TABLE(of, imx_ldb_dt_ids); static int imx_ldb_panel_ddc(struct device *dev, struct imx_ldb_channel *channel, struct device_node *child) { struct device_node *ddc_node; const u8 *edidp; int ret; ddc_node = of_parse_phandle(child, "ddc-i2c-bus", 0); if (ddc_node) { channel->ddc = of_find_i2c_adapter_by_node(ddc_node); of_node_put(ddc_node); if (!channel->ddc) { dev_warn(dev, "failed to get ddc i2c adapter\n"); return -EPROBE_DEFER; } } if (!channel->ddc) { int edid_len; /* if no DDC available, fallback to hardcoded EDID */ dev_dbg(dev, "no ddc available\n"); edidp = of_get_property(child, "edid", &edid_len); if (edidp) { channel->edid = kmemdup(edidp, edid_len, GFP_KERNEL); if (!channel->edid) return -ENOMEM; } else if (!channel->panel) { /* fallback to display-timings node */ ret = of_get_drm_display_mode(child, &channel->mode, &channel->bus_flags, OF_USE_NATIVE_MODE); if (!ret) channel->mode_valid = 1; } } return 0; } static int imx_ldb_bind(struct device *dev, struct device *master, void *data) { struct drm_device *drm = data; struct imx_ldb *imx_ldb = dev_get_drvdata(dev); int ret; int i; for (i = 0; i < 2; i++) { struct imx_ldb_channel *channel = &imx_ldb->channel[i]; if (!channel->ldb) continue; ret = imx_ldb_register(drm, channel); if (ret) return ret; } return 0; } static const struct component_ops imx_ldb_ops = { .bind = imx_ldb_bind, }; static int imx_ldb_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; struct device_node *np = dev->of_node; const struct of_device_id *of_id = of_match_device(imx_ldb_dt_ids, dev); struct device_node *child; struct imx_ldb *imx_ldb; int dual; int ret; int i; imx_ldb = devm_kzalloc(dev, sizeof(*imx_ldb), GFP_KERNEL); if (!imx_ldb) return -ENOMEM; imx_ldb->regmap = syscon_regmap_lookup_by_phandle(np, "gpr"); if (IS_ERR(imx_ldb->regmap)) { dev_err(dev, "failed to get parent regmap\n"); return PTR_ERR(imx_ldb->regmap); } /* disable LDB by resetting the control register to POR default */ regmap_write(imx_ldb->regmap, IOMUXC_GPR2, 0); imx_ldb->dev = dev; if (of_id) imx_ldb->lvds_mux = of_id->data; dual = of_property_read_bool(np, "fsl,dual-channel"); if (dual) imx_ldb->ldb_ctrl |= LDB_SPLIT_MODE_EN; /* * There are three different possible clock mux configurations: * i.MX53: ipu1_di0_sel, ipu1_di1_sel * i.MX6q: ipu1_di0_sel, ipu1_di1_sel, ipu2_di0_sel, ipu2_di1_sel * i.MX6dl: ipu1_di0_sel, ipu1_di1_sel, lcdif_sel * Map them all to di0_sel...di3_sel. */ for (i = 0; i < 4; i++) { char clkname[16]; sprintf(clkname, "di%d_sel", i); imx_ldb->clk_sel[i] = devm_clk_get(imx_ldb->dev, clkname); if (IS_ERR(imx_ldb->clk_sel[i])) { ret = PTR_ERR(imx_ldb->clk_sel[i]); imx_ldb->clk_sel[i] = NULL; break; } imx_ldb->clk_parent[i] = clk_get_parent(imx_ldb->clk_sel[i]); } if (i == 0) return ret; for_each_child_of_node(np, child) { struct imx_ldb_channel *channel; int bus_format; ret = of_property_read_u32(child, "reg", &i); if (ret || i < 0 || i > 1) { ret = -EINVAL; goto free_child; } if (!of_device_is_available(child)) continue; if (dual && i > 0) { dev_warn(dev, "dual-channel mode, ignoring second output\n"); continue; } channel = &imx_ldb->channel[i]; channel->ldb = imx_ldb; channel->chno = i; /* * The output port is port@4 with an external 4-port mux or * port@2 with the internal 2-port mux. */ ret = drm_of_find_panel_or_bridge(child, imx_ldb->lvds_mux ? 4 : 2, 0, &channel->panel, &channel->bridge); if (ret && ret != -ENODEV) goto free_child; /* panel ddc only if there is no bridge */ if (!channel->bridge) { ret = imx_ldb_panel_ddc(dev, channel, child); if (ret) goto free_child; } bus_format = of_get_bus_format(dev, child); if (bus_format == -EINVAL) { /* * If no bus format was specified in the device tree, * we can still get it from the connected panel later. */ if (channel->panel && channel->panel->funcs && channel->panel->funcs->get_modes) bus_format = 0; } if (bus_format < 0) { dev_err(dev, "could not determine data mapping: %d\n", bus_format); ret = bus_format; goto free_child; } channel->bus_format = bus_format; channel->child = child; } platform_set_drvdata(pdev, imx_ldb); return component_add(&pdev->dev, &imx_ldb_ops); free_child: of_node_put(child); return ret; } static int imx_ldb_remove(struct platform_device *pdev) { struct imx_ldb *imx_ldb = platform_get_drvdata(pdev); int i; for (i = 0; i < 2; i++) { struct imx_ldb_channel *channel = &imx_ldb->channel[i]; kfree(channel->edid); i2c_put_adapter(channel->ddc); } component_del(&pdev->dev, &imx_ldb_ops); return 0; } static struct platform_driver imx_ldb_driver = { .probe = imx_ldb_probe, .remove = imx_ldb_remove, .driver = { .of_match_table = imx_ldb_dt_ids, .name = DRIVER_NAME, }, }; module_platform_driver(imx_ldb_driver); MODULE_DESCRIPTION("i.MX LVDS driver"); MODULE_AUTHOR("Sascha Hauer, Pengutronix"); MODULE_LICENSE("GPL"); MODULE_ALIAS("platform:" DRIVER_NAME);
linux-master
drivers/gpu/drm/imx/ipuv3/imx-ldb.c
// SPDX-License-Identifier: GPL-2.0+ /* * i.MX IPUv3 DP Overlay Planes * * Copyright (C) 2013 Philipp Zabel, Pengutronix */ #include <drm/drm_atomic.h> #include <drm/drm_atomic_helper.h> #include <drm/drm_blend.h> #include <drm/drm_fb_dma_helper.h> #include <drm/drm_fourcc.h> #include <drm/drm_framebuffer.h> #include <drm/drm_gem_atomic_helper.h> #include <drm/drm_gem_dma_helper.h> #include <drm/drm_managed.h> #include <video/imx-ipu-v3.h> #include "imx-drm.h" #include "ipuv3-plane.h" struct ipu_plane_state { struct drm_plane_state base; bool use_pre; }; static inline struct ipu_plane_state * to_ipu_plane_state(struct drm_plane_state *p) { return container_of(p, struct ipu_plane_state, base); } static unsigned int ipu_src_rect_width(const struct drm_plane_state *state) { return ALIGN(drm_rect_width(&state->src) >> 16, 8); } static inline struct ipu_plane *to_ipu_plane(struct drm_plane *p) { return container_of(p, struct ipu_plane, base); } static const uint32_t ipu_plane_all_formats[] = { DRM_FORMAT_ARGB1555, DRM_FORMAT_XRGB1555, DRM_FORMAT_ABGR1555, DRM_FORMAT_XBGR1555, DRM_FORMAT_RGBA5551, DRM_FORMAT_BGRA5551, DRM_FORMAT_ARGB4444, DRM_FORMAT_ARGB8888, DRM_FORMAT_XRGB8888, DRM_FORMAT_ABGR8888, DRM_FORMAT_XBGR8888, DRM_FORMAT_RGBA8888, DRM_FORMAT_RGBX8888, DRM_FORMAT_BGRA8888, DRM_FORMAT_BGRX8888, DRM_FORMAT_UYVY, DRM_FORMAT_VYUY, DRM_FORMAT_YUYV, DRM_FORMAT_YVYU, DRM_FORMAT_YUV420, DRM_FORMAT_YVU420, DRM_FORMAT_YUV422, DRM_FORMAT_YVU422, DRM_FORMAT_YUV444, DRM_FORMAT_YVU444, DRM_FORMAT_NV12, DRM_FORMAT_NV16, DRM_FORMAT_RGB565, DRM_FORMAT_RGB565_A8, DRM_FORMAT_BGR565_A8, DRM_FORMAT_RGB888_A8, DRM_FORMAT_BGR888_A8, DRM_FORMAT_RGBX8888_A8, DRM_FORMAT_BGRX8888_A8, }; static const uint32_t ipu_plane_rgb_formats[] = { DRM_FORMAT_ARGB1555, DRM_FORMAT_XRGB1555, DRM_FORMAT_ABGR1555, DRM_FORMAT_XBGR1555, DRM_FORMAT_RGBA5551, DRM_FORMAT_BGRA5551, DRM_FORMAT_ARGB4444, DRM_FORMAT_ARGB8888, DRM_FORMAT_XRGB8888, DRM_FORMAT_ABGR8888, DRM_FORMAT_XBGR8888, DRM_FORMAT_RGBA8888, DRM_FORMAT_RGBX8888, DRM_FORMAT_BGRA8888, DRM_FORMAT_BGRX8888, DRM_FORMAT_RGB565, DRM_FORMAT_RGB565_A8, DRM_FORMAT_BGR565_A8, DRM_FORMAT_RGB888_A8, DRM_FORMAT_BGR888_A8, DRM_FORMAT_RGBX8888_A8, DRM_FORMAT_BGRX8888_A8, }; static const uint64_t ipu_format_modifiers[] = { DRM_FORMAT_MOD_LINEAR, DRM_FORMAT_MOD_INVALID }; static const uint64_t pre_format_modifiers[] = { DRM_FORMAT_MOD_LINEAR, DRM_FORMAT_MOD_VIVANTE_TILED, DRM_FORMAT_MOD_VIVANTE_SUPER_TILED, DRM_FORMAT_MOD_INVALID }; int ipu_plane_irq(struct ipu_plane *ipu_plane) { return ipu_idmac_channel_irq(ipu_plane->ipu, ipu_plane->ipu_ch, IPU_IRQ_EOF); } static inline unsigned long drm_plane_state_to_eba(struct drm_plane_state *state, int plane) { struct drm_framebuffer *fb = state->fb; struct drm_gem_dma_object *dma_obj; int x = state->src.x1 >> 16; int y = state->src.y1 >> 16; dma_obj = drm_fb_dma_get_gem_obj(fb, plane); BUG_ON(!dma_obj); return dma_obj->dma_addr + fb->offsets[plane] + fb->pitches[plane] * y + fb->format->cpp[plane] * x; } static inline unsigned long drm_plane_state_to_ubo(struct drm_plane_state *state) { struct drm_framebuffer *fb = state->fb; struct drm_gem_dma_object *dma_obj; unsigned long eba = drm_plane_state_to_eba(state, 0); int x = state->src.x1 >> 16; int y = state->src.y1 >> 16; dma_obj = drm_fb_dma_get_gem_obj(fb, 1); BUG_ON(!dma_obj); x /= fb->format->hsub; y /= fb->format->vsub; return dma_obj->dma_addr + fb->offsets[1] + fb->pitches[1] * y + fb->format->cpp[1] * x - eba; } static inline unsigned long drm_plane_state_to_vbo(struct drm_plane_state *state) { struct drm_framebuffer *fb = state->fb; struct drm_gem_dma_object *dma_obj; unsigned long eba = drm_plane_state_to_eba(state, 0); int x = state->src.x1 >> 16; int y = state->src.y1 >> 16; dma_obj = drm_fb_dma_get_gem_obj(fb, 2); BUG_ON(!dma_obj); x /= fb->format->hsub; y /= fb->format->vsub; return dma_obj->dma_addr + fb->offsets[2] + fb->pitches[2] * y + fb->format->cpp[2] * x - eba; } static void ipu_plane_put_resources(struct drm_device *dev, void *ptr) { struct ipu_plane *ipu_plane = ptr; if (!IS_ERR_OR_NULL(ipu_plane->dp)) ipu_dp_put(ipu_plane->dp); if (!IS_ERR_OR_NULL(ipu_plane->dmfc)) ipu_dmfc_put(ipu_plane->dmfc); if (!IS_ERR_OR_NULL(ipu_plane->ipu_ch)) ipu_idmac_put(ipu_plane->ipu_ch); if (!IS_ERR_OR_NULL(ipu_plane->alpha_ch)) ipu_idmac_put(ipu_plane->alpha_ch); } static int ipu_plane_get_resources(struct drm_device *dev, struct ipu_plane *ipu_plane) { int ret; int alpha_ch; ipu_plane->ipu_ch = ipu_idmac_get(ipu_plane->ipu, ipu_plane->dma); if (IS_ERR(ipu_plane->ipu_ch)) { ret = PTR_ERR(ipu_plane->ipu_ch); DRM_ERROR("failed to get idmac channel: %d\n", ret); return ret; } ret = drmm_add_action_or_reset(dev, ipu_plane_put_resources, ipu_plane); if (ret) return ret; alpha_ch = ipu_channel_alpha_channel(ipu_plane->dma); if (alpha_ch >= 0) { ipu_plane->alpha_ch = ipu_idmac_get(ipu_plane->ipu, alpha_ch); if (IS_ERR(ipu_plane->alpha_ch)) { ret = PTR_ERR(ipu_plane->alpha_ch); DRM_ERROR("failed to get alpha idmac channel %d: %d\n", alpha_ch, ret); return ret; } } ipu_plane->dmfc = ipu_dmfc_get(ipu_plane->ipu, ipu_plane->dma); if (IS_ERR(ipu_plane->dmfc)) { ret = PTR_ERR(ipu_plane->dmfc); DRM_ERROR("failed to get dmfc: ret %d\n", ret); return ret; } if (ipu_plane->dp_flow >= 0) { ipu_plane->dp = ipu_dp_get(ipu_plane->ipu, ipu_plane->dp_flow); if (IS_ERR(ipu_plane->dp)) { ret = PTR_ERR(ipu_plane->dp); DRM_ERROR("failed to get dp flow: %d\n", ret); return ret; } } return 0; } static bool ipu_plane_separate_alpha(struct ipu_plane *ipu_plane) { switch (ipu_plane->base.state->fb->format->format) { case DRM_FORMAT_RGB565_A8: case DRM_FORMAT_BGR565_A8: case DRM_FORMAT_RGB888_A8: case DRM_FORMAT_BGR888_A8: case DRM_FORMAT_RGBX8888_A8: case DRM_FORMAT_BGRX8888_A8: return true; default: return false; } } static void ipu_plane_enable(struct ipu_plane *ipu_plane) { if (ipu_plane->dp) ipu_dp_enable(ipu_plane->ipu); ipu_dmfc_enable_channel(ipu_plane->dmfc); ipu_idmac_enable_channel(ipu_plane->ipu_ch); if (ipu_plane_separate_alpha(ipu_plane)) ipu_idmac_enable_channel(ipu_plane->alpha_ch); if (ipu_plane->dp) ipu_dp_enable_channel(ipu_plane->dp); } void ipu_plane_disable(struct ipu_plane *ipu_plane, bool disable_dp_channel) { int ret; DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__); ret = ipu_idmac_wait_busy(ipu_plane->ipu_ch, 50); if (ret == -ETIMEDOUT) { DRM_ERROR("[PLANE:%d] IDMAC timeout\n", ipu_plane->base.base.id); } if (ipu_plane->dp && disable_dp_channel) ipu_dp_disable_channel(ipu_plane->dp, false); ipu_idmac_disable_channel(ipu_plane->ipu_ch); if (ipu_plane->alpha_ch) ipu_idmac_disable_channel(ipu_plane->alpha_ch); ipu_dmfc_disable_channel(ipu_plane->dmfc); if (ipu_plane->dp) ipu_dp_disable(ipu_plane->ipu); if (ipu_prg_present(ipu_plane->ipu)) ipu_prg_channel_disable(ipu_plane->ipu_ch); } void ipu_plane_disable_deferred(struct drm_plane *plane) { struct ipu_plane *ipu_plane = to_ipu_plane(plane); if (ipu_plane->disabling) { ipu_plane->disabling = false; ipu_plane_disable(ipu_plane, false); } } static void ipu_plane_state_reset(struct drm_plane *plane) { struct ipu_plane_state *ipu_state; if (plane->state) { ipu_state = to_ipu_plane_state(plane->state); __drm_atomic_helper_plane_destroy_state(plane->state); kfree(ipu_state); plane->state = NULL; } ipu_state = kzalloc(sizeof(*ipu_state), GFP_KERNEL); if (ipu_state) __drm_atomic_helper_plane_reset(plane, &ipu_state->base); } static struct drm_plane_state * ipu_plane_duplicate_state(struct drm_plane *plane) { struct ipu_plane_state *state; if (WARN_ON(!plane->state)) return NULL; state = kmalloc(sizeof(*state), GFP_KERNEL); if (state) __drm_atomic_helper_plane_duplicate_state(plane, &state->base); return &state->base; } static void ipu_plane_destroy_state(struct drm_plane *plane, struct drm_plane_state *state) { struct ipu_plane_state *ipu_state = to_ipu_plane_state(state); __drm_atomic_helper_plane_destroy_state(state); kfree(ipu_state); } static bool ipu_plane_format_mod_supported(struct drm_plane *plane, uint32_t format, uint64_t modifier) { struct ipu_soc *ipu = to_ipu_plane(plane)->ipu; /* linear is supported for all planes and formats */ if (modifier == DRM_FORMAT_MOD_LINEAR) return true; /* * Without a PRG the possible modifiers list only includes the linear * modifier, so we always take the early return from this function and * only end up here if the PRG is present. */ return ipu_prg_format_supported(ipu, format, modifier); } static const struct drm_plane_funcs ipu_plane_funcs = { .update_plane = drm_atomic_helper_update_plane, .disable_plane = drm_atomic_helper_disable_plane, .reset = ipu_plane_state_reset, .atomic_duplicate_state = ipu_plane_duplicate_state, .atomic_destroy_state = ipu_plane_destroy_state, .format_mod_supported = ipu_plane_format_mod_supported, }; static int ipu_plane_atomic_check(struct drm_plane *plane, struct drm_atomic_state *state) { struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state, plane); struct drm_plane_state *old_state = drm_atomic_get_old_plane_state(state, plane); struct drm_crtc_state *crtc_state; struct device *dev = plane->dev->dev; struct drm_framebuffer *fb = new_state->fb; struct drm_framebuffer *old_fb = old_state->fb; unsigned long eba, ubo, vbo, old_ubo, old_vbo, alpha_eba; bool can_position = (plane->type == DRM_PLANE_TYPE_OVERLAY); int ret; /* Ok to disable */ if (!fb) return 0; if (WARN_ON(!new_state->crtc)) return -EINVAL; crtc_state = drm_atomic_get_existing_crtc_state(state, new_state->crtc); if (WARN_ON(!crtc_state)) return -EINVAL; ret = drm_atomic_helper_check_plane_state(new_state, crtc_state, DRM_PLANE_NO_SCALING, DRM_PLANE_NO_SCALING, can_position, true); if (ret) return ret; /* nothing to check when disabling or disabled */ if (!crtc_state->enable) return 0; switch (plane->type) { case DRM_PLANE_TYPE_PRIMARY: /* full plane minimum width is 13 pixels */ if (drm_rect_width(&new_state->dst) < 13) return -EINVAL; break; case DRM_PLANE_TYPE_OVERLAY: break; default: dev_warn(dev, "Unsupported plane type %d\n", plane->type); return -EINVAL; } if (drm_rect_height(&new_state->dst) < 2) return -EINVAL; /* * We support resizing active plane or changing its format by * forcing CRTC mode change in plane's ->atomic_check callback * and disabling all affected active planes in CRTC's ->atomic_disable * callback. The planes will be reenabled in plane's ->atomic_update * callback. */ if (old_fb && (drm_rect_width(&new_state->dst) != drm_rect_width(&old_state->dst) || drm_rect_height(&new_state->dst) != drm_rect_height(&old_state->dst) || fb->format != old_fb->format)) crtc_state->mode_changed = true; eba = drm_plane_state_to_eba(new_state, 0); if (eba & 0x7) return -EINVAL; if (fb->pitches[0] < 1 || fb->pitches[0] > 16384) return -EINVAL; if (old_fb && fb->pitches[0] != old_fb->pitches[0]) crtc_state->mode_changed = true; if (ALIGN(fb->width, 8) * fb->format->cpp[0] > fb->pitches[0] + fb->offsets[0]) { dev_warn(dev, "pitch is not big enough for 8 pixels alignment"); return -EINVAL; } switch (fb->format->format) { case DRM_FORMAT_YUV420: case DRM_FORMAT_YVU420: case DRM_FORMAT_YUV422: case DRM_FORMAT_YVU422: case DRM_FORMAT_YUV444: case DRM_FORMAT_YVU444: /* * Multiplanar formats have to meet the following restrictions: * - The (up to) three plane addresses are EBA, EBA+UBO, EBA+VBO * - EBA, UBO and VBO are a multiple of 8 * - UBO and VBO are unsigned and not larger than 0xfffff8 * - Only EBA may be changed while scanout is active * - The strides of U and V planes must be identical. */ vbo = drm_plane_state_to_vbo(new_state); if (vbo & 0x7 || vbo > 0xfffff8) return -EINVAL; if (old_fb && (fb->format == old_fb->format)) { old_vbo = drm_plane_state_to_vbo(old_state); if (vbo != old_vbo) crtc_state->mode_changed = true; } if (fb->pitches[1] != fb->pitches[2]) return -EINVAL; fallthrough; case DRM_FORMAT_NV12: case DRM_FORMAT_NV16: ubo = drm_plane_state_to_ubo(new_state); if (ubo & 0x7 || ubo > 0xfffff8) return -EINVAL; if (old_fb && (fb->format == old_fb->format)) { old_ubo = drm_plane_state_to_ubo(old_state); if (ubo != old_ubo) crtc_state->mode_changed = true; } if (fb->pitches[1] < 1 || fb->pitches[1] > 16384) return -EINVAL; if (old_fb && old_fb->pitches[1] != fb->pitches[1]) crtc_state->mode_changed = true; /* * The x/y offsets must be even in case of horizontal/vertical * chroma subsampling. */ if (((new_state->src.x1 >> 16) & (fb->format->hsub - 1)) || ((new_state->src.y1 >> 16) & (fb->format->vsub - 1))) return -EINVAL; break; case DRM_FORMAT_RGB565_A8: case DRM_FORMAT_BGR565_A8: case DRM_FORMAT_RGB888_A8: case DRM_FORMAT_BGR888_A8: case DRM_FORMAT_RGBX8888_A8: case DRM_FORMAT_BGRX8888_A8: alpha_eba = drm_plane_state_to_eba(new_state, 1); if (alpha_eba & 0x7) return -EINVAL; if (fb->pitches[1] < 1 || fb->pitches[1] > 16384) return -EINVAL; if (old_fb && old_fb->pitches[1] != fb->pitches[1]) crtc_state->mode_changed = true; break; } return 0; } static void ipu_plane_atomic_disable(struct drm_plane *plane, struct drm_atomic_state *state) { struct ipu_plane *ipu_plane = to_ipu_plane(plane); if (ipu_plane->dp) ipu_dp_disable_channel(ipu_plane->dp, true); ipu_plane->disabling = true; } static int ipu_chan_assign_axi_id(int ipu_chan) { switch (ipu_chan) { case IPUV3_CHANNEL_MEM_BG_SYNC: return 1; case IPUV3_CHANNEL_MEM_FG_SYNC: return 2; case IPUV3_CHANNEL_MEM_DC_SYNC: return 3; default: return 0; } } static void ipu_calculate_bursts(u32 width, u32 cpp, u32 stride, u8 *burstsize, u8 *num_bursts) { const unsigned int width_bytes = width * cpp; unsigned int npb, bursts; /* Maximum number of pixels per burst without overshooting stride */ for (npb = 64 / cpp; npb > 0; --npb) { if (round_up(width_bytes, npb * cpp) <= stride) break; } *burstsize = npb; /* Maximum number of consecutive bursts without overshooting stride */ for (bursts = 8; bursts > 1; bursts /= 2) { if (round_up(width_bytes, npb * cpp * bursts) <= stride) break; } *num_bursts = bursts; } static void ipu_plane_atomic_update(struct drm_plane *plane, struct drm_atomic_state *state) { struct drm_plane_state *old_state = drm_atomic_get_old_plane_state(state, plane); struct ipu_plane *ipu_plane = to_ipu_plane(plane); struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state, plane); struct ipu_plane_state *ipu_state = to_ipu_plane_state(new_state); struct drm_crtc_state *crtc_state = new_state->crtc->state; struct drm_framebuffer *fb = new_state->fb; struct drm_rect *dst = &new_state->dst; unsigned long eba, ubo, vbo; unsigned long alpha_eba = 0; enum ipu_color_space ics; unsigned int axi_id = 0; const struct drm_format_info *info; u8 burstsize, num_bursts; u32 width, height; int active; if (ipu_plane->dp_flow == IPU_DP_FLOW_SYNC_FG) ipu_dp_set_window_pos(ipu_plane->dp, dst->x1, dst->y1); switch (ipu_plane->dp_flow) { case IPU_DP_FLOW_SYNC_BG: if (new_state->normalized_zpos == 1) { ipu_dp_set_global_alpha(ipu_plane->dp, !fb->format->has_alpha, 0xff, true); } else { ipu_dp_set_global_alpha(ipu_plane->dp, true, 0, true); } break; case IPU_DP_FLOW_SYNC_FG: if (new_state->normalized_zpos == 1) { ipu_dp_set_global_alpha(ipu_plane->dp, !fb->format->has_alpha, 0xff, false); } break; } if (ipu_plane->dp_flow == IPU_DP_FLOW_SYNC_BG) width = ipu_src_rect_width(new_state); else width = drm_rect_width(&new_state->src) >> 16; height = drm_rect_height(&new_state->src) >> 16; eba = drm_plane_state_to_eba(new_state, 0); /* * Configure PRG channel and attached PRE, this changes the EBA to an * internal SRAM location. */ if (ipu_state->use_pre) { axi_id = ipu_chan_assign_axi_id(ipu_plane->dma); ipu_prg_channel_configure(ipu_plane->ipu_ch, axi_id, width, height, fb->pitches[0], fb->format->format, fb->modifier, &eba); } if (!old_state->fb || old_state->fb->format->format != fb->format->format || old_state->color_encoding != new_state->color_encoding || old_state->color_range != new_state->color_range) { ics = ipu_drm_fourcc_to_colorspace(fb->format->format); switch (ipu_plane->dp_flow) { case IPU_DP_FLOW_SYNC_BG: ipu_dp_setup_channel(ipu_plane->dp, new_state->color_encoding, new_state->color_range, ics, IPUV3_COLORSPACE_RGB); break; case IPU_DP_FLOW_SYNC_FG: ipu_dp_setup_channel(ipu_plane->dp, new_state->color_encoding, new_state->color_range, ics, IPUV3_COLORSPACE_UNKNOWN); break; } } if (old_state->fb && !drm_atomic_crtc_needs_modeset(crtc_state)) { /* nothing to do if PRE is used */ if (ipu_state->use_pre) return; active = ipu_idmac_get_current_buffer(ipu_plane->ipu_ch); ipu_cpmem_set_buffer(ipu_plane->ipu_ch, !active, eba); ipu_idmac_select_buffer(ipu_plane->ipu_ch, !active); if (ipu_plane_separate_alpha(ipu_plane)) { active = ipu_idmac_get_current_buffer(ipu_plane->alpha_ch); ipu_cpmem_set_buffer(ipu_plane->alpha_ch, !active, alpha_eba); ipu_idmac_select_buffer(ipu_plane->alpha_ch, !active); } return; } ics = ipu_drm_fourcc_to_colorspace(fb->format->format); switch (ipu_plane->dp_flow) { case IPU_DP_FLOW_SYNC_BG: ipu_dp_setup_channel(ipu_plane->dp, DRM_COLOR_YCBCR_BT601, DRM_COLOR_YCBCR_LIMITED_RANGE, ics, IPUV3_COLORSPACE_RGB); break; case IPU_DP_FLOW_SYNC_FG: ipu_dp_setup_channel(ipu_plane->dp, DRM_COLOR_YCBCR_BT601, DRM_COLOR_YCBCR_LIMITED_RANGE, ics, IPUV3_COLORSPACE_UNKNOWN); break; } ipu_dmfc_config_wait4eot(ipu_plane->dmfc, width); info = drm_format_info(fb->format->format); ipu_calculate_bursts(width, info->cpp[0], fb->pitches[0], &burstsize, &num_bursts); ipu_cpmem_zero(ipu_plane->ipu_ch); ipu_cpmem_set_resolution(ipu_plane->ipu_ch, width, height); ipu_cpmem_set_fmt(ipu_plane->ipu_ch, fb->format->format); ipu_cpmem_set_burstsize(ipu_plane->ipu_ch, burstsize); ipu_cpmem_set_high_priority(ipu_plane->ipu_ch); ipu_idmac_enable_watermark(ipu_plane->ipu_ch, true); ipu_idmac_set_double_buffer(ipu_plane->ipu_ch, 1); ipu_cpmem_set_stride(ipu_plane->ipu_ch, fb->pitches[0]); ipu_cpmem_set_axi_id(ipu_plane->ipu_ch, axi_id); switch (fb->format->format) { case DRM_FORMAT_YUV420: case DRM_FORMAT_YVU420: case DRM_FORMAT_YUV422: case DRM_FORMAT_YVU422: case DRM_FORMAT_YUV444: case DRM_FORMAT_YVU444: ubo = drm_plane_state_to_ubo(new_state); vbo = drm_plane_state_to_vbo(new_state); if (fb->format->format == DRM_FORMAT_YVU420 || fb->format->format == DRM_FORMAT_YVU422 || fb->format->format == DRM_FORMAT_YVU444) swap(ubo, vbo); ipu_cpmem_set_yuv_planar_full(ipu_plane->ipu_ch, fb->pitches[1], ubo, vbo); dev_dbg(ipu_plane->base.dev->dev, "phy = %lu %lu %lu, x = %d, y = %d", eba, ubo, vbo, new_state->src.x1 >> 16, new_state->src.y1 >> 16); break; case DRM_FORMAT_NV12: case DRM_FORMAT_NV16: ubo = drm_plane_state_to_ubo(new_state); ipu_cpmem_set_yuv_planar_full(ipu_plane->ipu_ch, fb->pitches[1], ubo, ubo); dev_dbg(ipu_plane->base.dev->dev, "phy = %lu %lu, x = %d, y = %d", eba, ubo, new_state->src.x1 >> 16, new_state->src.y1 >> 16); break; case DRM_FORMAT_RGB565_A8: case DRM_FORMAT_BGR565_A8: case DRM_FORMAT_RGB888_A8: case DRM_FORMAT_BGR888_A8: case DRM_FORMAT_RGBX8888_A8: case DRM_FORMAT_BGRX8888_A8: alpha_eba = drm_plane_state_to_eba(new_state, 1); num_bursts = 0; dev_dbg(ipu_plane->base.dev->dev, "phys = %lu %lu, x = %d, y = %d", eba, alpha_eba, new_state->src.x1 >> 16, new_state->src.y1 >> 16); ipu_cpmem_set_burstsize(ipu_plane->ipu_ch, 16); ipu_cpmem_zero(ipu_plane->alpha_ch); ipu_cpmem_set_resolution(ipu_plane->alpha_ch, width, height); ipu_cpmem_set_format_passthrough(ipu_plane->alpha_ch, 8); ipu_cpmem_set_high_priority(ipu_plane->alpha_ch); ipu_idmac_set_double_buffer(ipu_plane->alpha_ch, 1); ipu_cpmem_set_stride(ipu_plane->alpha_ch, fb->pitches[1]); ipu_cpmem_set_burstsize(ipu_plane->alpha_ch, 16); ipu_cpmem_set_buffer(ipu_plane->alpha_ch, 0, alpha_eba); ipu_cpmem_set_buffer(ipu_plane->alpha_ch, 1, alpha_eba); break; default: dev_dbg(ipu_plane->base.dev->dev, "phys = %lu, x = %d, y = %d", eba, new_state->src.x1 >> 16, new_state->src.y1 >> 16); break; } ipu_cpmem_set_buffer(ipu_plane->ipu_ch, 0, eba); ipu_cpmem_set_buffer(ipu_plane->ipu_ch, 1, eba); ipu_idmac_lock_enable(ipu_plane->ipu_ch, num_bursts); ipu_plane_enable(ipu_plane); } static const struct drm_plane_helper_funcs ipu_plane_helper_funcs = { .atomic_check = ipu_plane_atomic_check, .atomic_disable = ipu_plane_atomic_disable, .atomic_update = ipu_plane_atomic_update, }; bool ipu_plane_atomic_update_pending(struct drm_plane *plane) { struct ipu_plane *ipu_plane = to_ipu_plane(plane); struct drm_plane_state *state = plane->state; struct ipu_plane_state *ipu_state = to_ipu_plane_state(state); /* disabled crtcs must not block the update */ if (!state->crtc) return false; if (ipu_state->use_pre) return ipu_prg_channel_configure_pending(ipu_plane->ipu_ch); /* * Pretend no update is pending in the non-PRE/PRG case. For this to * happen, an atomic update would have to be deferred until after the * start of the next frame and simultaneously interrupt latency would * have to be high enough to let the atomic update finish and issue an * event before the previous end of frame interrupt handler can be * executed. */ return false; } int ipu_planes_assign_pre(struct drm_device *dev, struct drm_atomic_state *state) { struct drm_crtc_state *old_crtc_state, *crtc_state; struct drm_plane_state *plane_state; struct ipu_plane_state *ipu_state; struct ipu_plane *ipu_plane; struct drm_plane *plane; struct drm_crtc *crtc; int available_pres = ipu_prg_max_active_channels(); int ret, i; for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, crtc_state, i) { ret = drm_atomic_add_affected_planes(state, crtc); if (ret) return ret; } /* * We are going over the planes in 2 passes: first we assign PREs to * planes with a tiling modifier, which need the PREs to resolve into * linear. Any failure to assign a PRE there is fatal. In the second * pass we try to assign PREs to linear FBs, to improve memory access * patterns for them. Failure at this point is non-fatal, as we can * scan out linear FBs without a PRE. */ for_each_new_plane_in_state(state, plane, plane_state, i) { ipu_state = to_ipu_plane_state(plane_state); ipu_plane = to_ipu_plane(plane); if (!plane_state->fb) { ipu_state->use_pre = false; continue; } if (!(plane_state->fb->flags & DRM_MODE_FB_MODIFIERS) || plane_state->fb->modifier == DRM_FORMAT_MOD_LINEAR) continue; if (!ipu_prg_present(ipu_plane->ipu) || !available_pres) return -EINVAL; if (!ipu_prg_format_supported(ipu_plane->ipu, plane_state->fb->format->format, plane_state->fb->modifier)) return -EINVAL; ipu_state->use_pre = true; available_pres--; } for_each_new_plane_in_state(state, plane, plane_state, i) { ipu_state = to_ipu_plane_state(plane_state); ipu_plane = to_ipu_plane(plane); if (!plane_state->fb) { ipu_state->use_pre = false; continue; } if ((plane_state->fb->flags & DRM_MODE_FB_MODIFIERS) && plane_state->fb->modifier != DRM_FORMAT_MOD_LINEAR) continue; /* make sure that modifier is initialized */ plane_state->fb->modifier = DRM_FORMAT_MOD_LINEAR; if (ipu_prg_present(ipu_plane->ipu) && available_pres && ipu_prg_format_supported(ipu_plane->ipu, plane_state->fb->format->format, plane_state->fb->modifier)) { ipu_state->use_pre = true; available_pres--; } else { ipu_state->use_pre = false; } } return 0; } struct ipu_plane *ipu_plane_init(struct drm_device *dev, struct ipu_soc *ipu, int dma, int dp, unsigned int possible_crtcs, enum drm_plane_type type) { struct ipu_plane *ipu_plane; const uint64_t *modifiers = ipu_format_modifiers; unsigned int zpos = (type == DRM_PLANE_TYPE_PRIMARY) ? 0 : 1; unsigned int format_count; const uint32_t *formats; int ret; DRM_DEBUG_KMS("channel %d, dp flow %d, possible_crtcs=0x%x\n", dma, dp, possible_crtcs); if (dp == IPU_DP_FLOW_SYNC_BG || dp == IPU_DP_FLOW_SYNC_FG) { formats = ipu_plane_all_formats; format_count = ARRAY_SIZE(ipu_plane_all_formats); } else { formats = ipu_plane_rgb_formats; format_count = ARRAY_SIZE(ipu_plane_rgb_formats); } if (ipu_prg_present(ipu)) modifiers = pre_format_modifiers; ipu_plane = drmm_universal_plane_alloc(dev, struct ipu_plane, base, possible_crtcs, &ipu_plane_funcs, formats, format_count, modifiers, type, NULL); if (IS_ERR(ipu_plane)) { DRM_ERROR("failed to allocate and initialize %s plane\n", zpos ? "overlay" : "primary"); return ipu_plane; } ipu_plane->ipu = ipu; ipu_plane->dma = dma; ipu_plane->dp_flow = dp; drm_plane_helper_add(&ipu_plane->base, &ipu_plane_helper_funcs); if (dp == IPU_DP_FLOW_SYNC_BG || dp == IPU_DP_FLOW_SYNC_FG) ret = drm_plane_create_zpos_property(&ipu_plane->base, zpos, 0, 1); else ret = drm_plane_create_zpos_immutable_property(&ipu_plane->base, 0); if (ret) return ERR_PTR(ret); ret = drm_plane_create_color_properties(&ipu_plane->base, BIT(DRM_COLOR_YCBCR_BT601) | BIT(DRM_COLOR_YCBCR_BT709), BIT(DRM_COLOR_YCBCR_LIMITED_RANGE), DRM_COLOR_YCBCR_BT601, DRM_COLOR_YCBCR_LIMITED_RANGE); if (ret) return ERR_PTR(ret); ret = ipu_plane_get_resources(dev, ipu_plane); if (ret) { DRM_ERROR("failed to get %s plane resources: %pe\n", zpos ? "overlay" : "primary", &ret); return ERR_PTR(ret); } return ipu_plane; }
linux-master
drivers/gpu/drm/imx/ipuv3/ipuv3-plane.c
// SPDX-License-Identifier: GPL-2.0+ /* * i.MX IPUv3 Graphics driver * * Copyright (C) 2011 Sascha Hauer, Pengutronix */ #include <linux/clk.h> #include <linux/component.h> #include <linux/device.h> #include <linux/dma-mapping.h> #include <linux/errno.h> #include <linux/export.h> #include <linux/module.h> #include <linux/platform_device.h> #include <video/imx-ipu-v3.h> #include <drm/drm_atomic.h> #include <drm/drm_atomic_helper.h> #include <drm/drm_gem_dma_helper.h> #include <drm/drm_managed.h> #include <drm/drm_probe_helper.h> #include <drm/drm_vblank.h> #include "imx-drm.h" #include "ipuv3-plane.h" #define DRIVER_DESC "i.MX IPUv3 Graphics" struct ipu_crtc { struct device *dev; struct drm_crtc base; /* plane[0] is the full plane, plane[1] is the partial plane */ struct ipu_plane *plane[2]; struct ipu_dc *dc; struct ipu_di *di; int irq; struct drm_pending_vblank_event *event; }; static inline struct ipu_crtc *to_ipu_crtc(struct drm_crtc *crtc) { return container_of(crtc, struct ipu_crtc, base); } static void ipu_crtc_atomic_enable(struct drm_crtc *crtc, struct drm_atomic_state *state) { struct ipu_crtc *ipu_crtc = to_ipu_crtc(crtc); struct ipu_soc *ipu = dev_get_drvdata(ipu_crtc->dev->parent); ipu_prg_enable(ipu); ipu_dc_enable(ipu); ipu_dc_enable_channel(ipu_crtc->dc); ipu_di_enable(ipu_crtc->di); } static void ipu_crtc_disable_planes(struct ipu_crtc *ipu_crtc, struct drm_crtc_state *old_crtc_state) { bool disable_partial = false; bool disable_full = false; struct drm_plane *plane; drm_atomic_crtc_state_for_each_plane(plane, old_crtc_state) { if (plane == &ipu_crtc->plane[0]->base) disable_full = true; if (ipu_crtc->plane[1] && plane == &ipu_crtc->plane[1]->base) disable_partial = true; } if (disable_partial) ipu_plane_disable(ipu_crtc->plane[1], true); if (disable_full) ipu_plane_disable(ipu_crtc->plane[0], true); } static void ipu_crtc_atomic_disable(struct drm_crtc *crtc, struct drm_atomic_state *state) { struct drm_crtc_state *old_crtc_state = drm_atomic_get_old_crtc_state(state, crtc); struct ipu_crtc *ipu_crtc = to_ipu_crtc(crtc); struct ipu_soc *ipu = dev_get_drvdata(ipu_crtc->dev->parent); ipu_dc_disable_channel(ipu_crtc->dc); ipu_di_disable(ipu_crtc->di); /* * Planes must be disabled before DC clock is removed, as otherwise the * attached IDMACs will be left in undefined state, possibly hanging * the IPU or even system. */ ipu_crtc_disable_planes(ipu_crtc, old_crtc_state); ipu_dc_disable(ipu); ipu_prg_disable(ipu); drm_crtc_vblank_off(crtc); spin_lock_irq(&crtc->dev->event_lock); if (crtc->state->event && !crtc->state->active) { drm_crtc_send_vblank_event(crtc, crtc->state->event); crtc->state->event = NULL; } spin_unlock_irq(&crtc->dev->event_lock); } static void imx_drm_crtc_reset(struct drm_crtc *crtc) { struct imx_crtc_state *state; if (crtc->state) __drm_atomic_helper_crtc_destroy_state(crtc->state); kfree(to_imx_crtc_state(crtc->state)); crtc->state = NULL; state = kzalloc(sizeof(*state), GFP_KERNEL); if (state) __drm_atomic_helper_crtc_reset(crtc, &state->base); } static struct drm_crtc_state *imx_drm_crtc_duplicate_state(struct drm_crtc *crtc) { struct imx_crtc_state *state; state = kzalloc(sizeof(*state), GFP_KERNEL); if (!state) return NULL; __drm_atomic_helper_crtc_duplicate_state(crtc, &state->base); WARN_ON(state->base.crtc != crtc); state->base.crtc = crtc; return &state->base; } static void imx_drm_crtc_destroy_state(struct drm_crtc *crtc, struct drm_crtc_state *state) { __drm_atomic_helper_crtc_destroy_state(state); kfree(to_imx_crtc_state(state)); } static int ipu_enable_vblank(struct drm_crtc *crtc) { struct ipu_crtc *ipu_crtc = to_ipu_crtc(crtc); enable_irq(ipu_crtc->irq); return 0; } static void ipu_disable_vblank(struct drm_crtc *crtc) { struct ipu_crtc *ipu_crtc = to_ipu_crtc(crtc); disable_irq_nosync(ipu_crtc->irq); } static const struct drm_crtc_funcs ipu_crtc_funcs = { .set_config = drm_atomic_helper_set_config, .page_flip = drm_atomic_helper_page_flip, .reset = imx_drm_crtc_reset, .atomic_duplicate_state = imx_drm_crtc_duplicate_state, .atomic_destroy_state = imx_drm_crtc_destroy_state, .enable_vblank = ipu_enable_vblank, .disable_vblank = ipu_disable_vblank, }; static irqreturn_t ipu_irq_handler(int irq, void *dev_id) { struct ipu_crtc *ipu_crtc = dev_id; struct drm_crtc *crtc = &ipu_crtc->base; unsigned long flags; int i; drm_crtc_handle_vblank(crtc); if (ipu_crtc->event) { for (i = 0; i < ARRAY_SIZE(ipu_crtc->plane); i++) { struct ipu_plane *plane = ipu_crtc->plane[i]; if (!plane) continue; if (ipu_plane_atomic_update_pending(&plane->base)) break; } if (i == ARRAY_SIZE(ipu_crtc->plane)) { spin_lock_irqsave(&crtc->dev->event_lock, flags); drm_crtc_send_vblank_event(crtc, ipu_crtc->event); ipu_crtc->event = NULL; drm_crtc_vblank_put(crtc); spin_unlock_irqrestore(&crtc->dev->event_lock, flags); } } return IRQ_HANDLED; } static bool ipu_crtc_mode_fixup(struct drm_crtc *crtc, const struct drm_display_mode *mode, struct drm_display_mode *adjusted_mode) { struct ipu_crtc *ipu_crtc = to_ipu_crtc(crtc); struct videomode vm; int ret; drm_display_mode_to_videomode(adjusted_mode, &vm); ret = ipu_di_adjust_videomode(ipu_crtc->di, &vm); if (ret) return false; if ((vm.vsync_len == 0) || (vm.hsync_len == 0)) return false; drm_display_mode_from_videomode(&vm, adjusted_mode); return true; } static int ipu_crtc_atomic_check(struct drm_crtc *crtc, struct drm_atomic_state *state) { struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state, crtc); u32 primary_plane_mask = drm_plane_mask(crtc->primary); if (crtc_state->active && (primary_plane_mask & crtc_state->plane_mask) == 0) return -EINVAL; return 0; } static void ipu_crtc_atomic_begin(struct drm_crtc *crtc, struct drm_atomic_state *state) { drm_crtc_vblank_on(crtc); } static void ipu_crtc_atomic_flush(struct drm_crtc *crtc, struct drm_atomic_state *state) { spin_lock_irq(&crtc->dev->event_lock); if (crtc->state->event) { struct ipu_crtc *ipu_crtc = to_ipu_crtc(crtc); WARN_ON(drm_crtc_vblank_get(crtc)); ipu_crtc->event = crtc->state->event; crtc->state->event = NULL; } spin_unlock_irq(&crtc->dev->event_lock); } static void ipu_crtc_mode_set_nofb(struct drm_crtc *crtc) { struct drm_device *dev = crtc->dev; struct drm_encoder *encoder; struct ipu_crtc *ipu_crtc = to_ipu_crtc(crtc); struct drm_display_mode *mode = &crtc->state->adjusted_mode; struct imx_crtc_state *imx_crtc_state = to_imx_crtc_state(crtc->state); struct ipu_di_signal_cfg sig_cfg = {}; unsigned long encoder_types = 0; dev_dbg(ipu_crtc->dev, "%s: mode->hdisplay: %d\n", __func__, mode->hdisplay); dev_dbg(ipu_crtc->dev, "%s: mode->vdisplay: %d\n", __func__, mode->vdisplay); list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { if (encoder->crtc == crtc) encoder_types |= BIT(encoder->encoder_type); } dev_dbg(ipu_crtc->dev, "%s: attached to encoder types 0x%lx\n", __func__, encoder_types); /* * If we have DAC or LDB, then we need the IPU DI clock to be * the same as the LDB DI clock. For TVDAC, derive the IPU DI * clock from 27 MHz TVE_DI clock, but allow to divide it. */ if (encoder_types & (BIT(DRM_MODE_ENCODER_DAC) | BIT(DRM_MODE_ENCODER_LVDS))) sig_cfg.clkflags = IPU_DI_CLKMODE_SYNC | IPU_DI_CLKMODE_EXT; else if (encoder_types & BIT(DRM_MODE_ENCODER_TVDAC)) sig_cfg.clkflags = IPU_DI_CLKMODE_EXT; else sig_cfg.clkflags = 0; sig_cfg.enable_pol = !(imx_crtc_state->bus_flags & DRM_BUS_FLAG_DE_LOW); /* Default to driving pixel data on negative clock edges */ sig_cfg.clk_pol = !!(imx_crtc_state->bus_flags & DRM_BUS_FLAG_PIXDATA_DRIVE_POSEDGE); sig_cfg.bus_format = imx_crtc_state->bus_format; sig_cfg.v_to_h_sync = 0; sig_cfg.hsync_pin = imx_crtc_state->di_hsync_pin; sig_cfg.vsync_pin = imx_crtc_state->di_vsync_pin; drm_display_mode_to_videomode(mode, &sig_cfg.mode); if (!IS_ALIGNED(sig_cfg.mode.hactive, 8)) { unsigned int new_hactive = ALIGN(sig_cfg.mode.hactive, 8); dev_warn(ipu_crtc->dev, "8-pixel align hactive %d -> %d\n", sig_cfg.mode.hactive, new_hactive); sig_cfg.mode.hfront_porch -= new_hactive - sig_cfg.mode.hactive; sig_cfg.mode.hactive = new_hactive; } ipu_dc_init_sync(ipu_crtc->dc, ipu_crtc->di, mode->flags & DRM_MODE_FLAG_INTERLACE, imx_crtc_state->bus_format, sig_cfg.mode.hactive); ipu_di_init_sync_panel(ipu_crtc->di, &sig_cfg); } static const struct drm_crtc_helper_funcs ipu_helper_funcs = { .mode_fixup = ipu_crtc_mode_fixup, .mode_set_nofb = ipu_crtc_mode_set_nofb, .atomic_check = ipu_crtc_atomic_check, .atomic_begin = ipu_crtc_atomic_begin, .atomic_flush = ipu_crtc_atomic_flush, .atomic_disable = ipu_crtc_atomic_disable, .atomic_enable = ipu_crtc_atomic_enable, }; static void ipu_put_resources(struct drm_device *dev, void *ptr) { struct ipu_crtc *ipu_crtc = ptr; if (!IS_ERR_OR_NULL(ipu_crtc->dc)) ipu_dc_put(ipu_crtc->dc); if (!IS_ERR_OR_NULL(ipu_crtc->di)) ipu_di_put(ipu_crtc->di); } static int ipu_get_resources(struct drm_device *dev, struct ipu_crtc *ipu_crtc, struct ipu_client_platformdata *pdata) { struct ipu_soc *ipu = dev_get_drvdata(ipu_crtc->dev->parent); int ret; ipu_crtc->dc = ipu_dc_get(ipu, pdata->dc); if (IS_ERR(ipu_crtc->dc)) return PTR_ERR(ipu_crtc->dc); ret = drmm_add_action_or_reset(dev, ipu_put_resources, ipu_crtc); if (ret) return ret; ipu_crtc->di = ipu_di_get(ipu, pdata->di); if (IS_ERR(ipu_crtc->di)) return PTR_ERR(ipu_crtc->di); return 0; } static int ipu_drm_bind(struct device *dev, struct device *master, void *data) { struct ipu_client_platformdata *pdata = dev->platform_data; struct ipu_soc *ipu = dev_get_drvdata(dev->parent); struct drm_device *drm = data; struct ipu_plane *primary_plane; struct ipu_crtc *ipu_crtc; struct drm_crtc *crtc; int dp = -EINVAL; int ret; if (pdata->dp >= 0) dp = IPU_DP_FLOW_SYNC_BG; primary_plane = ipu_plane_init(drm, ipu, pdata->dma[0], dp, 0, DRM_PLANE_TYPE_PRIMARY); if (IS_ERR(primary_plane)) return PTR_ERR(primary_plane); ipu_crtc = drmm_crtc_alloc_with_planes(drm, struct ipu_crtc, base, &primary_plane->base, NULL, &ipu_crtc_funcs, NULL); if (IS_ERR(ipu_crtc)) return PTR_ERR(ipu_crtc); ipu_crtc->dev = dev; ipu_crtc->plane[0] = primary_plane; crtc = &ipu_crtc->base; crtc->port = pdata->of_node; drm_crtc_helper_add(crtc, &ipu_helper_funcs); ret = ipu_get_resources(drm, ipu_crtc, pdata); if (ret) { dev_err(ipu_crtc->dev, "getting resources failed with %d.\n", ret); return ret; } /* If this crtc is using the DP, add an overlay plane */ if (pdata->dp >= 0 && pdata->dma[1] > 0) { ipu_crtc->plane[1] = ipu_plane_init(drm, ipu, pdata->dma[1], IPU_DP_FLOW_SYNC_FG, drm_crtc_mask(&ipu_crtc->base), DRM_PLANE_TYPE_OVERLAY); if (IS_ERR(ipu_crtc->plane[1])) ipu_crtc->plane[1] = NULL; } ipu_crtc->irq = ipu_plane_irq(ipu_crtc->plane[0]); ret = devm_request_irq(ipu_crtc->dev, ipu_crtc->irq, ipu_irq_handler, 0, "imx_drm", ipu_crtc); if (ret < 0) { dev_err(ipu_crtc->dev, "irq request failed with %d.\n", ret); return ret; } /* Only enable IRQ when we actually need it to trigger work. */ disable_irq(ipu_crtc->irq); return 0; } static const struct component_ops ipu_crtc_ops = { .bind = ipu_drm_bind, }; static int ipu_drm_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; int ret; if (!dev->platform_data) return -EINVAL; ret = dma_set_coherent_mask(dev, DMA_BIT_MASK(32)); if (ret) return ret; return component_add(dev, &ipu_crtc_ops); } static int ipu_drm_remove(struct platform_device *pdev) { component_del(&pdev->dev, &ipu_crtc_ops); return 0; } struct platform_driver ipu_drm_driver = { .driver = { .name = "imx-ipuv3-crtc", }, .probe = ipu_drm_probe, .remove = ipu_drm_remove, };
linux-master
drivers/gpu/drm/imx/ipuv3/ipuv3-crtc.c
// SPDX-License-Identifier: GPL-2.0+ /* * i.MX drm driver - parallel display implementation * * Copyright (C) 2012 Sascha Hauer, Pengutronix */ #include <linux/component.h> #include <linux/media-bus-format.h> #include <linux/module.h> #include <linux/of.h> #include <linux/platform_device.h> #include <linux/videodev2.h> #include <video/of_display_timing.h> #include <drm/drm_atomic_helper.h> #include <drm/drm_bridge.h> #include <drm/drm_edid.h> #include <drm/drm_managed.h> #include <drm/drm_of.h> #include <drm/drm_panel.h> #include <drm/drm_probe_helper.h> #include <drm/drm_simple_kms_helper.h> #include "imx-drm.h" struct imx_parallel_display_encoder { struct drm_connector connector; struct drm_encoder encoder; struct drm_bridge bridge; struct imx_parallel_display *pd; }; struct imx_parallel_display { struct device *dev; void *edid; u32 bus_format; u32 bus_flags; struct drm_display_mode mode; struct drm_panel *panel; struct drm_bridge *next_bridge; }; static inline struct imx_parallel_display *con_to_imxpd(struct drm_connector *c) { return container_of(c, struct imx_parallel_display_encoder, connector)->pd; } static inline struct imx_parallel_display *bridge_to_imxpd(struct drm_bridge *b) { return container_of(b, struct imx_parallel_display_encoder, bridge)->pd; } static int imx_pd_connector_get_modes(struct drm_connector *connector) { struct imx_parallel_display *imxpd = con_to_imxpd(connector); struct device_node *np = imxpd->dev->of_node; int num_modes; num_modes = drm_panel_get_modes(imxpd->panel, connector); if (num_modes > 0) return num_modes; if (imxpd->edid) { drm_connector_update_edid_property(connector, imxpd->edid); num_modes = drm_add_edid_modes(connector, imxpd->edid); } if (np) { struct drm_display_mode *mode = drm_mode_create(connector->dev); int ret; if (!mode) return -EINVAL; ret = of_get_drm_display_mode(np, &imxpd->mode, &imxpd->bus_flags, OF_USE_NATIVE_MODE); if (ret) { drm_mode_destroy(connector->dev, mode); return ret; } drm_mode_copy(mode, &imxpd->mode); mode->type |= DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED; drm_mode_probed_add(connector, mode); num_modes++; } return num_modes; } static void imx_pd_bridge_enable(struct drm_bridge *bridge) { struct imx_parallel_display *imxpd = bridge_to_imxpd(bridge); drm_panel_prepare(imxpd->panel); drm_panel_enable(imxpd->panel); } static void imx_pd_bridge_disable(struct drm_bridge *bridge) { struct imx_parallel_display *imxpd = bridge_to_imxpd(bridge); drm_panel_disable(imxpd->panel); drm_panel_unprepare(imxpd->panel); } static const u32 imx_pd_bus_fmts[] = { MEDIA_BUS_FMT_RGB888_1X24, MEDIA_BUS_FMT_BGR888_1X24, MEDIA_BUS_FMT_GBR888_1X24, MEDIA_BUS_FMT_RGB666_1X18, MEDIA_BUS_FMT_RGB666_1X24_CPADHI, MEDIA_BUS_FMT_RGB565_1X16, }; static u32 * imx_pd_bridge_atomic_get_output_bus_fmts(struct drm_bridge *bridge, struct drm_bridge_state *bridge_state, struct drm_crtc_state *crtc_state, struct drm_connector_state *conn_state, unsigned int *num_output_fmts) { struct drm_display_info *di = &conn_state->connector->display_info; struct imx_parallel_display *imxpd = bridge_to_imxpd(bridge); u32 *output_fmts; if (!imxpd->bus_format && !di->num_bus_formats) { *num_output_fmts = ARRAY_SIZE(imx_pd_bus_fmts); return kmemdup(imx_pd_bus_fmts, sizeof(imx_pd_bus_fmts), GFP_KERNEL); } *num_output_fmts = 1; output_fmts = kmalloc(sizeof(*output_fmts), GFP_KERNEL); if (!output_fmts) return NULL; if (!imxpd->bus_format && di->num_bus_formats) output_fmts[0] = di->bus_formats[0]; else output_fmts[0] = imxpd->bus_format; return output_fmts; } static bool imx_pd_format_supported(u32 output_fmt) { unsigned int i; for (i = 0; i < ARRAY_SIZE(imx_pd_bus_fmts); i++) { if (imx_pd_bus_fmts[i] == output_fmt) return true; } return false; } static u32 * imx_pd_bridge_atomic_get_input_bus_fmts(struct drm_bridge *bridge, struct drm_bridge_state *bridge_state, struct drm_crtc_state *crtc_state, struct drm_connector_state *conn_state, u32 output_fmt, unsigned int *num_input_fmts) { struct imx_parallel_display *imxpd = bridge_to_imxpd(bridge); u32 *input_fmts; /* * If the next bridge does not support bus format negotiation, let's * use the static bus format definition (imxpd->bus_format) if it's * specified, RGB888 when it's not. */ if (output_fmt == MEDIA_BUS_FMT_FIXED) output_fmt = imxpd->bus_format ? : MEDIA_BUS_FMT_RGB888_1X24; /* Now make sure the requested output format is supported. */ if ((imxpd->bus_format && imxpd->bus_format != output_fmt) || !imx_pd_format_supported(output_fmt)) { *num_input_fmts = 0; return NULL; } *num_input_fmts = 1; input_fmts = kmalloc(sizeof(*input_fmts), GFP_KERNEL); if (!input_fmts) return NULL; input_fmts[0] = output_fmt; return input_fmts; } static int imx_pd_bridge_atomic_check(struct drm_bridge *bridge, struct drm_bridge_state *bridge_state, struct drm_crtc_state *crtc_state, struct drm_connector_state *conn_state) { struct imx_crtc_state *imx_crtc_state = to_imx_crtc_state(crtc_state); struct drm_display_info *di = &conn_state->connector->display_info; struct imx_parallel_display *imxpd = bridge_to_imxpd(bridge); struct drm_bridge_state *next_bridge_state = NULL; struct drm_bridge *next_bridge; u32 bus_flags, bus_fmt; next_bridge = drm_bridge_get_next_bridge(bridge); if (next_bridge) next_bridge_state = drm_atomic_get_new_bridge_state(crtc_state->state, next_bridge); if (next_bridge_state) bus_flags = next_bridge_state->input_bus_cfg.flags; else if (di->num_bus_formats) bus_flags = di->bus_flags; else bus_flags = imxpd->bus_flags; bus_fmt = bridge_state->input_bus_cfg.format; if (!imx_pd_format_supported(bus_fmt)) return -EINVAL; bridge_state->output_bus_cfg.flags = bus_flags; bridge_state->input_bus_cfg.flags = bus_flags; imx_crtc_state->bus_flags = bus_flags; imx_crtc_state->bus_format = bridge_state->input_bus_cfg.format; imx_crtc_state->di_hsync_pin = 2; imx_crtc_state->di_vsync_pin = 3; return 0; } static const struct drm_connector_funcs imx_pd_connector_funcs = { .fill_modes = drm_helper_probe_single_connector_modes, .destroy = imx_drm_connector_destroy, .reset = drm_atomic_helper_connector_reset, .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state, .atomic_destroy_state = drm_atomic_helper_connector_destroy_state, }; static const struct drm_connector_helper_funcs imx_pd_connector_helper_funcs = { .get_modes = imx_pd_connector_get_modes, }; static const struct drm_bridge_funcs imx_pd_bridge_funcs = { .enable = imx_pd_bridge_enable, .disable = imx_pd_bridge_disable, .atomic_reset = drm_atomic_helper_bridge_reset, .atomic_duplicate_state = drm_atomic_helper_bridge_duplicate_state, .atomic_destroy_state = drm_atomic_helper_bridge_destroy_state, .atomic_check = imx_pd_bridge_atomic_check, .atomic_get_input_bus_fmts = imx_pd_bridge_atomic_get_input_bus_fmts, .atomic_get_output_bus_fmts = imx_pd_bridge_atomic_get_output_bus_fmts, }; static int imx_pd_bind(struct device *dev, struct device *master, void *data) { struct drm_device *drm = data; struct imx_parallel_display *imxpd = dev_get_drvdata(dev); struct imx_parallel_display_encoder *imxpd_encoder; struct drm_connector *connector; struct drm_encoder *encoder; struct drm_bridge *bridge; int ret; imxpd_encoder = drmm_simple_encoder_alloc(drm, struct imx_parallel_display_encoder, encoder, DRM_MODE_ENCODER_NONE); if (IS_ERR(imxpd_encoder)) return PTR_ERR(imxpd_encoder); imxpd_encoder->pd = imxpd; connector = &imxpd_encoder->connector; encoder = &imxpd_encoder->encoder; bridge = &imxpd_encoder->bridge; ret = imx_drm_encoder_parse_of(drm, encoder, imxpd->dev->of_node); if (ret) return ret; /* set the connector's dpms to OFF so that * drm_helper_connector_dpms() won't return * immediately since the current state is ON * at this point. */ connector->dpms = DRM_MODE_DPMS_OFF; bridge->funcs = &imx_pd_bridge_funcs; drm_bridge_attach(encoder, bridge, NULL, 0); if (imxpd->next_bridge) { ret = drm_bridge_attach(encoder, imxpd->next_bridge, bridge, 0); if (ret < 0) return ret; } else { drm_connector_helper_add(connector, &imx_pd_connector_helper_funcs); drm_connector_init(drm, connector, &imx_pd_connector_funcs, DRM_MODE_CONNECTOR_DPI); drm_connector_attach_encoder(connector, encoder); } return 0; } static const struct component_ops imx_pd_ops = { .bind = imx_pd_bind, }; static int imx_pd_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; struct device_node *np = dev->of_node; const u8 *edidp; struct imx_parallel_display *imxpd; int edid_len; int ret; u32 bus_format = 0; const char *fmt; imxpd = devm_kzalloc(dev, sizeof(*imxpd), GFP_KERNEL); if (!imxpd) return -ENOMEM; /* port@1 is the output port */ ret = drm_of_find_panel_or_bridge(np, 1, 0, &imxpd->panel, &imxpd->next_bridge); if (ret && ret != -ENODEV) return ret; edidp = of_get_property(np, "edid", &edid_len); if (edidp) imxpd->edid = devm_kmemdup(dev, edidp, edid_len, GFP_KERNEL); ret = of_property_read_string(np, "interface-pix-fmt", &fmt); if (!ret) { if (!strcmp(fmt, "rgb24")) bus_format = MEDIA_BUS_FMT_RGB888_1X24; else if (!strcmp(fmt, "rgb565")) bus_format = MEDIA_BUS_FMT_RGB565_1X16; else if (!strcmp(fmt, "bgr666")) bus_format = MEDIA_BUS_FMT_RGB666_1X18; else if (!strcmp(fmt, "lvds666")) bus_format = MEDIA_BUS_FMT_RGB666_1X24_CPADHI; } imxpd->bus_format = bus_format; imxpd->dev = dev; platform_set_drvdata(pdev, imxpd); return component_add(dev, &imx_pd_ops); } static int imx_pd_remove(struct platform_device *pdev) { component_del(&pdev->dev, &imx_pd_ops); return 0; } static const struct of_device_id imx_pd_dt_ids[] = { { .compatible = "fsl,imx-parallel-display", }, { /* sentinel */ } }; MODULE_DEVICE_TABLE(of, imx_pd_dt_ids); static struct platform_driver imx_pd_driver = { .probe = imx_pd_probe, .remove = imx_pd_remove, .driver = { .of_match_table = imx_pd_dt_ids, .name = "imx-parallel-display", }, }; module_platform_driver(imx_pd_driver); MODULE_DESCRIPTION("i.MX parallel display driver"); MODULE_AUTHOR("Sascha Hauer, Pengutronix"); MODULE_LICENSE("GPL"); MODULE_ALIAS("platform:imx-parallel-display");
linux-master
drivers/gpu/drm/imx/ipuv3/parallel-display.c
// SPDX-License-Identifier: GPL-2.0+ /* * i.MX drm driver - Television Encoder (TVEv2) * * Copyright (C) 2013 Philipp Zabel, Pengutronix */ #include <linux/clk-provider.h> #include <linux/clk.h> #include <linux/component.h> #include <linux/i2c.h> #include <linux/module.h> #include <linux/platform_device.h> #include <linux/regmap.h> #include <linux/regulator/consumer.h> #include <linux/videodev2.h> #include <video/imx-ipu-v3.h> #include <drm/drm_atomic_helper.h> #include <drm/drm_edid.h> #include <drm/drm_managed.h> #include <drm/drm_probe_helper.h> #include <drm/drm_simple_kms_helper.h> #include "imx-drm.h" #define TVE_COM_CONF_REG 0x00 #define TVE_TVDAC0_CONT_REG 0x28 #define TVE_TVDAC1_CONT_REG 0x2c #define TVE_TVDAC2_CONT_REG 0x30 #define TVE_CD_CONT_REG 0x34 #define TVE_INT_CONT_REG 0x64 #define TVE_STAT_REG 0x68 #define TVE_TST_MODE_REG 0x6c #define TVE_MV_CONT_REG 0xdc /* TVE_COM_CONF_REG */ #define TVE_SYNC_CH_2_EN BIT(22) #define TVE_SYNC_CH_1_EN BIT(21) #define TVE_SYNC_CH_0_EN BIT(20) #define TVE_TV_OUT_MODE_MASK (0x7 << 12) #define TVE_TV_OUT_DISABLE (0x0 << 12) #define TVE_TV_OUT_CVBS_0 (0x1 << 12) #define TVE_TV_OUT_CVBS_2 (0x2 << 12) #define TVE_TV_OUT_CVBS_0_2 (0x3 << 12) #define TVE_TV_OUT_SVIDEO_0_1 (0x4 << 12) #define TVE_TV_OUT_SVIDEO_0_1_CVBS2_2 (0x5 << 12) #define TVE_TV_OUT_YPBPR (0x6 << 12) #define TVE_TV_OUT_RGB (0x7 << 12) #define TVE_TV_STAND_MASK (0xf << 8) #define TVE_TV_STAND_HD_1080P30 (0xc << 8) #define TVE_P2I_CONV_EN BIT(7) #define TVE_INP_VIDEO_FORM BIT(6) #define TVE_INP_YCBCR_422 (0x0 << 6) #define TVE_INP_YCBCR_444 (0x1 << 6) #define TVE_DATA_SOURCE_MASK (0x3 << 4) #define TVE_DATA_SOURCE_BUS1 (0x0 << 4) #define TVE_DATA_SOURCE_BUS2 (0x1 << 4) #define TVE_DATA_SOURCE_EXT (0x2 << 4) #define TVE_DATA_SOURCE_TESTGEN (0x3 << 4) #define TVE_IPU_CLK_EN_OFS 3 #define TVE_IPU_CLK_EN BIT(3) #define TVE_DAC_SAMP_RATE_OFS 1 #define TVE_DAC_SAMP_RATE_WIDTH 2 #define TVE_DAC_SAMP_RATE_MASK (0x3 << 1) #define TVE_DAC_FULL_RATE (0x0 << 1) #define TVE_DAC_DIV2_RATE (0x1 << 1) #define TVE_DAC_DIV4_RATE (0x2 << 1) #define TVE_EN BIT(0) /* TVE_TVDACx_CONT_REG */ #define TVE_TVDAC_GAIN_MASK (0x3f << 0) /* TVE_CD_CONT_REG */ #define TVE_CD_CH_2_SM_EN BIT(22) #define TVE_CD_CH_1_SM_EN BIT(21) #define TVE_CD_CH_0_SM_EN BIT(20) #define TVE_CD_CH_2_LM_EN BIT(18) #define TVE_CD_CH_1_LM_EN BIT(17) #define TVE_CD_CH_0_LM_EN BIT(16) #define TVE_CD_CH_2_REF_LVL BIT(10) #define TVE_CD_CH_1_REF_LVL BIT(9) #define TVE_CD_CH_0_REF_LVL BIT(8) #define TVE_CD_EN BIT(0) /* TVE_INT_CONT_REG */ #define TVE_FRAME_END_IEN BIT(13) #define TVE_CD_MON_END_IEN BIT(2) #define TVE_CD_SM_IEN BIT(1) #define TVE_CD_LM_IEN BIT(0) /* TVE_TST_MODE_REG */ #define TVE_TVDAC_TEST_MODE_MASK (0x7 << 0) #define IMX_TVE_DAC_VOLTAGE 2750000 enum { TVE_MODE_TVOUT, TVE_MODE_VGA, }; struct imx_tve_encoder { struct drm_connector connector; struct drm_encoder encoder; struct imx_tve *tve; }; struct imx_tve { struct device *dev; int mode; int di_hsync_pin; int di_vsync_pin; struct regmap *regmap; struct regulator *dac_reg; struct i2c_adapter *ddc; struct clk *clk; struct clk *di_sel_clk; struct clk_hw clk_hw_di; struct clk *di_clk; }; static inline struct imx_tve *con_to_tve(struct drm_connector *c) { return container_of(c, struct imx_tve_encoder, connector)->tve; } static inline struct imx_tve *enc_to_tve(struct drm_encoder *e) { return container_of(e, struct imx_tve_encoder, encoder)->tve; } static void tve_enable(struct imx_tve *tve) { clk_prepare_enable(tve->clk); regmap_update_bits(tve->regmap, TVE_COM_CONF_REG, TVE_EN, TVE_EN); /* clear interrupt status register */ regmap_write(tve->regmap, TVE_STAT_REG, 0xffffffff); /* cable detection irq disabled in VGA mode, enabled in TVOUT mode */ if (tve->mode == TVE_MODE_VGA) regmap_write(tve->regmap, TVE_INT_CONT_REG, 0); else regmap_write(tve->regmap, TVE_INT_CONT_REG, TVE_CD_SM_IEN | TVE_CD_LM_IEN | TVE_CD_MON_END_IEN); } static void tve_disable(struct imx_tve *tve) { regmap_update_bits(tve->regmap, TVE_COM_CONF_REG, TVE_EN, 0); clk_disable_unprepare(tve->clk); } static int tve_setup_tvout(struct imx_tve *tve) { return -ENOTSUPP; } static int tve_setup_vga(struct imx_tve *tve) { unsigned int mask; unsigned int val; int ret; /* set gain to (1 + 10/128) to provide 0.7V peak-to-peak amplitude */ ret = regmap_update_bits(tve->regmap, TVE_TVDAC0_CONT_REG, TVE_TVDAC_GAIN_MASK, 0x0a); if (ret) return ret; ret = regmap_update_bits(tve->regmap, TVE_TVDAC1_CONT_REG, TVE_TVDAC_GAIN_MASK, 0x0a); if (ret) return ret; ret = regmap_update_bits(tve->regmap, TVE_TVDAC2_CONT_REG, TVE_TVDAC_GAIN_MASK, 0x0a); if (ret) return ret; /* set configuration register */ mask = TVE_DATA_SOURCE_MASK | TVE_INP_VIDEO_FORM; val = TVE_DATA_SOURCE_BUS2 | TVE_INP_YCBCR_444; mask |= TVE_TV_STAND_MASK | TVE_P2I_CONV_EN; val |= TVE_TV_STAND_HD_1080P30 | 0; mask |= TVE_TV_OUT_MODE_MASK | TVE_SYNC_CH_0_EN; val |= TVE_TV_OUT_RGB | TVE_SYNC_CH_0_EN; ret = regmap_update_bits(tve->regmap, TVE_COM_CONF_REG, mask, val); if (ret) return ret; /* set test mode (as documented) */ return regmap_update_bits(tve->regmap, TVE_TST_MODE_REG, TVE_TVDAC_TEST_MODE_MASK, 1); } static int imx_tve_connector_get_modes(struct drm_connector *connector) { struct imx_tve *tve = con_to_tve(connector); struct edid *edid; int ret = 0; if (!tve->ddc) return 0; edid = drm_get_edid(connector, tve->ddc); if (edid) { drm_connector_update_edid_property(connector, edid); ret = drm_add_edid_modes(connector, edid); kfree(edid); } return ret; } static enum drm_mode_status imx_tve_connector_mode_valid(struct drm_connector *connector, struct drm_display_mode *mode) { struct imx_tve *tve = con_to_tve(connector); unsigned long rate; /* pixel clock with 2x oversampling */ rate = clk_round_rate(tve->clk, 2000UL * mode->clock) / 2000; if (rate == mode->clock) return MODE_OK; /* pixel clock without oversampling */ rate = clk_round_rate(tve->clk, 1000UL * mode->clock) / 1000; if (rate == mode->clock) return MODE_OK; dev_warn(tve->dev, "ignoring mode %dx%d\n", mode->hdisplay, mode->vdisplay); return MODE_BAD; } static void imx_tve_encoder_mode_set(struct drm_encoder *encoder, struct drm_display_mode *orig_mode, struct drm_display_mode *mode) { struct imx_tve *tve = enc_to_tve(encoder); unsigned long rounded_rate; unsigned long rate; int div = 1; int ret; /* * FIXME * we should try 4k * mode->clock first, * and enable 4x oversampling for lower resolutions */ rate = 2000UL * mode->clock; clk_set_rate(tve->clk, rate); rounded_rate = clk_get_rate(tve->clk); if (rounded_rate >= rate) div = 2; clk_set_rate(tve->di_clk, rounded_rate / div); ret = clk_set_parent(tve->di_sel_clk, tve->di_clk); if (ret < 0) { dev_err(tve->dev, "failed to set di_sel parent to tve_di: %d\n", ret); } regmap_update_bits(tve->regmap, TVE_COM_CONF_REG, TVE_IPU_CLK_EN, TVE_IPU_CLK_EN); if (tve->mode == TVE_MODE_VGA) ret = tve_setup_vga(tve); else ret = tve_setup_tvout(tve); if (ret) dev_err(tve->dev, "failed to set configuration: %d\n", ret); } static void imx_tve_encoder_enable(struct drm_encoder *encoder) { struct imx_tve *tve = enc_to_tve(encoder); tve_enable(tve); } static void imx_tve_encoder_disable(struct drm_encoder *encoder) { struct imx_tve *tve = enc_to_tve(encoder); tve_disable(tve); } static int imx_tve_atomic_check(struct drm_encoder *encoder, struct drm_crtc_state *crtc_state, struct drm_connector_state *conn_state) { struct imx_crtc_state *imx_crtc_state = to_imx_crtc_state(crtc_state); struct imx_tve *tve = enc_to_tve(encoder); imx_crtc_state->bus_format = MEDIA_BUS_FMT_GBR888_1X24; imx_crtc_state->di_hsync_pin = tve->di_hsync_pin; imx_crtc_state->di_vsync_pin = tve->di_vsync_pin; return 0; } static const struct drm_connector_funcs imx_tve_connector_funcs = { .fill_modes = drm_helper_probe_single_connector_modes, .destroy = imx_drm_connector_destroy, .reset = drm_atomic_helper_connector_reset, .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state, .atomic_destroy_state = drm_atomic_helper_connector_destroy_state, }; static const struct drm_connector_helper_funcs imx_tve_connector_helper_funcs = { .get_modes = imx_tve_connector_get_modes, .mode_valid = imx_tve_connector_mode_valid, }; static const struct drm_encoder_helper_funcs imx_tve_encoder_helper_funcs = { .mode_set = imx_tve_encoder_mode_set, .enable = imx_tve_encoder_enable, .disable = imx_tve_encoder_disable, .atomic_check = imx_tve_atomic_check, }; static irqreturn_t imx_tve_irq_handler(int irq, void *data) { struct imx_tve *tve = data; unsigned int val; regmap_read(tve->regmap, TVE_STAT_REG, &val); /* clear interrupt status register */ regmap_write(tve->regmap, TVE_STAT_REG, 0xffffffff); return IRQ_HANDLED; } static unsigned long clk_tve_di_recalc_rate(struct clk_hw *hw, unsigned long parent_rate) { struct imx_tve *tve = container_of(hw, struct imx_tve, clk_hw_di); unsigned int val; int ret; ret = regmap_read(tve->regmap, TVE_COM_CONF_REG, &val); if (ret < 0) return 0; switch (val & TVE_DAC_SAMP_RATE_MASK) { case TVE_DAC_DIV4_RATE: return parent_rate / 4; case TVE_DAC_DIV2_RATE: return parent_rate / 2; case TVE_DAC_FULL_RATE: default: return parent_rate; } return 0; } static long clk_tve_di_round_rate(struct clk_hw *hw, unsigned long rate, unsigned long *prate) { unsigned long div; div = *prate / rate; if (div >= 4) return *prate / 4; else if (div >= 2) return *prate / 2; return *prate; } static int clk_tve_di_set_rate(struct clk_hw *hw, unsigned long rate, unsigned long parent_rate) { struct imx_tve *tve = container_of(hw, struct imx_tve, clk_hw_di); unsigned long div; u32 val; int ret; div = parent_rate / rate; if (div >= 4) val = TVE_DAC_DIV4_RATE; else if (div >= 2) val = TVE_DAC_DIV2_RATE; else val = TVE_DAC_FULL_RATE; ret = regmap_update_bits(tve->regmap, TVE_COM_CONF_REG, TVE_DAC_SAMP_RATE_MASK, val); if (ret < 0) { dev_err(tve->dev, "failed to set divider: %d\n", ret); return ret; } return 0; } static const struct clk_ops clk_tve_di_ops = { .round_rate = clk_tve_di_round_rate, .set_rate = clk_tve_di_set_rate, .recalc_rate = clk_tve_di_recalc_rate, }; static int tve_clk_init(struct imx_tve *tve, void __iomem *base) { const char *tve_di_parent[1]; struct clk_init_data init = { .name = "tve_di", .ops = &clk_tve_di_ops, .num_parents = 1, .flags = 0, }; tve_di_parent[0] = __clk_get_name(tve->clk); init.parent_names = (const char **)&tve_di_parent; tve->clk_hw_di.init = &init; tve->di_clk = devm_clk_register(tve->dev, &tve->clk_hw_di); if (IS_ERR(tve->di_clk)) { dev_err(tve->dev, "failed to register TVE output clock: %ld\n", PTR_ERR(tve->di_clk)); return PTR_ERR(tve->di_clk); } return 0; } static void imx_tve_disable_regulator(void *data) { struct imx_tve *tve = data; regulator_disable(tve->dac_reg); } static bool imx_tve_readable_reg(struct device *dev, unsigned int reg) { return (reg % 4 == 0) && (reg <= 0xdc); } static struct regmap_config tve_regmap_config = { .reg_bits = 32, .val_bits = 32, .reg_stride = 4, .readable_reg = imx_tve_readable_reg, .fast_io = true, .max_register = 0xdc, }; static const char * const imx_tve_modes[] = { [TVE_MODE_TVOUT] = "tvout", [TVE_MODE_VGA] = "vga", }; static int of_get_tve_mode(struct device_node *np) { const char *bm; int ret, i; ret = of_property_read_string(np, "fsl,tve-mode", &bm); if (ret < 0) return ret; for (i = 0; i < ARRAY_SIZE(imx_tve_modes); i++) if (!strcasecmp(bm, imx_tve_modes[i])) return i; return -EINVAL; } static int imx_tve_bind(struct device *dev, struct device *master, void *data) { struct drm_device *drm = data; struct imx_tve *tve = dev_get_drvdata(dev); struct imx_tve_encoder *tvee; struct drm_encoder *encoder; struct drm_connector *connector; int encoder_type; int ret; encoder_type = tve->mode == TVE_MODE_VGA ? DRM_MODE_ENCODER_DAC : DRM_MODE_ENCODER_TVDAC; tvee = drmm_simple_encoder_alloc(drm, struct imx_tve_encoder, encoder, encoder_type); if (IS_ERR(tvee)) return PTR_ERR(tvee); tvee->tve = tve; encoder = &tvee->encoder; connector = &tvee->connector; ret = imx_drm_encoder_parse_of(drm, encoder, tve->dev->of_node); if (ret) return ret; drm_encoder_helper_add(encoder, &imx_tve_encoder_helper_funcs); drm_connector_helper_add(connector, &imx_tve_connector_helper_funcs); ret = drm_connector_init_with_ddc(drm, connector, &imx_tve_connector_funcs, DRM_MODE_CONNECTOR_VGA, tve->ddc); if (ret) return ret; return drm_connector_attach_encoder(connector, encoder); } static const struct component_ops imx_tve_ops = { .bind = imx_tve_bind, }; static int imx_tve_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; struct device_node *np = dev->of_node; struct device_node *ddc_node; struct imx_tve *tve; void __iomem *base; unsigned int val; int irq; int ret; tve = devm_kzalloc(dev, sizeof(*tve), GFP_KERNEL); if (!tve) return -ENOMEM; tve->dev = dev; ddc_node = of_parse_phandle(np, "ddc-i2c-bus", 0); if (ddc_node) { tve->ddc = of_find_i2c_adapter_by_node(ddc_node); of_node_put(ddc_node); } tve->mode = of_get_tve_mode(np); if (tve->mode != TVE_MODE_VGA) { dev_err(dev, "only VGA mode supported, currently\n"); return -EINVAL; } if (tve->mode == TVE_MODE_VGA) { ret = of_property_read_u32(np, "fsl,hsync-pin", &tve->di_hsync_pin); if (ret < 0) { dev_err(dev, "failed to get hsync pin\n"); return ret; } ret = of_property_read_u32(np, "fsl,vsync-pin", &tve->di_vsync_pin); if (ret < 0) { dev_err(dev, "failed to get vsync pin\n"); return ret; } } base = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(base)) return PTR_ERR(base); tve_regmap_config.lock_arg = tve; tve->regmap = devm_regmap_init_mmio_clk(dev, "tve", base, &tve_regmap_config); if (IS_ERR(tve->regmap)) { dev_err(dev, "failed to init regmap: %ld\n", PTR_ERR(tve->regmap)); return PTR_ERR(tve->regmap); } irq = platform_get_irq(pdev, 0); if (irq < 0) return irq; ret = devm_request_threaded_irq(dev, irq, NULL, imx_tve_irq_handler, IRQF_ONESHOT, "imx-tve", tve); if (ret < 0) { dev_err(dev, "failed to request irq: %d\n", ret); return ret; } tve->dac_reg = devm_regulator_get(dev, "dac"); if (!IS_ERR(tve->dac_reg)) { if (regulator_get_voltage(tve->dac_reg) != IMX_TVE_DAC_VOLTAGE) dev_warn(dev, "dac voltage is not %d uV\n", IMX_TVE_DAC_VOLTAGE); ret = regulator_enable(tve->dac_reg); if (ret) return ret; ret = devm_add_action_or_reset(dev, imx_tve_disable_regulator, tve); if (ret) return ret; } tve->clk = devm_clk_get(dev, "tve"); if (IS_ERR(tve->clk)) { dev_err(dev, "failed to get high speed tve clock: %ld\n", PTR_ERR(tve->clk)); return PTR_ERR(tve->clk); } /* this is the IPU DI clock input selector, can be parented to tve_di */ tve->di_sel_clk = devm_clk_get(dev, "di_sel"); if (IS_ERR(tve->di_sel_clk)) { dev_err(dev, "failed to get ipu di mux clock: %ld\n", PTR_ERR(tve->di_sel_clk)); return PTR_ERR(tve->di_sel_clk); } ret = tve_clk_init(tve, base); if (ret < 0) return ret; ret = regmap_read(tve->regmap, TVE_COM_CONF_REG, &val); if (ret < 0) { dev_err(dev, "failed to read configuration register: %d\n", ret); return ret; } if (val != 0x00100000) { dev_err(dev, "configuration register default value indicates this is not a TVEv2\n"); return -ENODEV; } /* disable cable detection for VGA mode */ ret = regmap_write(tve->regmap, TVE_CD_CONT_REG, 0); if (ret) return ret; platform_set_drvdata(pdev, tve); return component_add(dev, &imx_tve_ops); } static int imx_tve_remove(struct platform_device *pdev) { component_del(&pdev->dev, &imx_tve_ops); return 0; } static const struct of_device_id imx_tve_dt_ids[] = { { .compatible = "fsl,imx53-tve", }, { /* sentinel */ } }; MODULE_DEVICE_TABLE(of, imx_tve_dt_ids); static struct platform_driver imx_tve_driver = { .probe = imx_tve_probe, .remove = imx_tve_remove, .driver = { .of_match_table = imx_tve_dt_ids, .name = "imx-tve", }, }; module_platform_driver(imx_tve_driver); MODULE_DESCRIPTION("i.MX Television Encoder driver"); MODULE_AUTHOR("Philipp Zabel, Pengutronix"); MODULE_LICENSE("GPL"); MODULE_ALIAS("platform:imx-tve");
linux-master
drivers/gpu/drm/imx/ipuv3/imx-tve.c
// SPDX-License-Identifier: GPL-2.0 OR MIT /* Copyright 2018-2019 Qiang Yu <[email protected]> */ #include <linux/io.h> #include <linux/device.h> #include "lima_device.h" #include "lima_dlbu.h" #include "lima_vm.h" #include "lima_regs.h" #define dlbu_write(reg, data) writel(data, ip->iomem + reg) #define dlbu_read(reg) readl(ip->iomem + reg) void lima_dlbu_enable(struct lima_device *dev, int num_pp) { struct lima_sched_pipe *pipe = dev->pipe + lima_pipe_pp; struct lima_ip *ip = dev->ip + lima_ip_dlbu; int i, mask = 0; for (i = 0; i < num_pp; i++) { struct lima_ip *pp = pipe->processor[i]; mask |= 1 << (pp->id - lima_ip_pp0); } dlbu_write(LIMA_DLBU_PP_ENABLE_MASK, mask); } void lima_dlbu_disable(struct lima_device *dev) { struct lima_ip *ip = dev->ip + lima_ip_dlbu; dlbu_write(LIMA_DLBU_PP_ENABLE_MASK, 0); } void lima_dlbu_set_reg(struct lima_ip *ip, u32 *reg) { dlbu_write(LIMA_DLBU_TLLIST_VBASEADDR, reg[0]); dlbu_write(LIMA_DLBU_FB_DIM, reg[1]); dlbu_write(LIMA_DLBU_TLLIST_CONF, reg[2]); dlbu_write(LIMA_DLBU_START_TILE_POS, reg[3]); } static int lima_dlbu_hw_init(struct lima_ip *ip) { struct lima_device *dev = ip->dev; dlbu_write(LIMA_DLBU_MASTER_TLLIST_PHYS_ADDR, dev->dlbu_dma | 1); dlbu_write(LIMA_DLBU_MASTER_TLLIST_VADDR, LIMA_VA_RESERVE_DLBU); return 0; } int lima_dlbu_resume(struct lima_ip *ip) { return lima_dlbu_hw_init(ip); } void lima_dlbu_suspend(struct lima_ip *ip) { } int lima_dlbu_init(struct lima_ip *ip) { return lima_dlbu_hw_init(ip); } void lima_dlbu_fini(struct lima_ip *ip) { }
linux-master
drivers/gpu/drm/lima/lima_dlbu.c
// SPDX-License-Identifier: GPL-2.0 OR MIT /* Copyright 2017-2019 Qiang Yu <[email protected]> */ #include <linux/iopoll.h> #include <linux/device.h> #include "lima_device.h" #include "lima_pmu.h" #include "lima_regs.h" #define pmu_write(reg, data) writel(data, ip->iomem + reg) #define pmu_read(reg) readl(ip->iomem + reg) static int lima_pmu_wait_cmd(struct lima_ip *ip) { struct lima_device *dev = ip->dev; int err; u32 v; err = readl_poll_timeout(ip->iomem + LIMA_PMU_INT_RAWSTAT, v, v & LIMA_PMU_INT_CMD_MASK, 100, 100000); if (err) { dev_err(dev->dev, "timeout wait pmu cmd\n"); return err; } pmu_write(LIMA_PMU_INT_CLEAR, LIMA_PMU_INT_CMD_MASK); return 0; } static u32 lima_pmu_get_ip_mask(struct lima_ip *ip) { struct lima_device *dev = ip->dev; u32 ret = 0; int i; ret |= LIMA_PMU_POWER_GP0_MASK; if (dev->id == lima_gpu_mali400) { ret |= LIMA_PMU_POWER_L2_MASK; for (i = 0; i < 4; i++) { if (dev->ip[lima_ip_pp0 + i].present) ret |= LIMA_PMU_POWER_PP_MASK(i); } } else { if (dev->ip[lima_ip_pp0].present) ret |= LIMA450_PMU_POWER_PP0_MASK; for (i = lima_ip_pp1; i <= lima_ip_pp3; i++) { if (dev->ip[i].present) { ret |= LIMA450_PMU_POWER_PP13_MASK; break; } } for (i = lima_ip_pp4; i <= lima_ip_pp7; i++) { if (dev->ip[i].present) { ret |= LIMA450_PMU_POWER_PP47_MASK; break; } } } return ret; } static int lima_pmu_hw_init(struct lima_ip *ip) { int err; u32 stat; pmu_write(LIMA_PMU_INT_MASK, 0); /* If this value is too low, when in high GPU clk freq, * GPU will be in unstable state. */ pmu_write(LIMA_PMU_SW_DELAY, 0xffff); /* status reg 1=off 0=on */ stat = pmu_read(LIMA_PMU_STATUS); /* power up all ip */ if (stat) { pmu_write(LIMA_PMU_POWER_UP, stat); err = lima_pmu_wait_cmd(ip); if (err) return err; } return 0; } static void lima_pmu_hw_fini(struct lima_ip *ip) { u32 stat; if (!ip->data.mask) ip->data.mask = lima_pmu_get_ip_mask(ip); stat = ~pmu_read(LIMA_PMU_STATUS) & ip->data.mask; if (stat) { pmu_write(LIMA_PMU_POWER_DOWN, stat); /* Don't wait for interrupt on Mali400 if all domains are * powered off because the HW won't generate an interrupt * in this case. */ if (ip->dev->id == lima_gpu_mali400) pmu_write(LIMA_PMU_INT_CLEAR, LIMA_PMU_INT_CMD_MASK); else lima_pmu_wait_cmd(ip); } } int lima_pmu_resume(struct lima_ip *ip) { return lima_pmu_hw_init(ip); } void lima_pmu_suspend(struct lima_ip *ip) { lima_pmu_hw_fini(ip); } int lima_pmu_init(struct lima_ip *ip) { return lima_pmu_hw_init(ip); } void lima_pmu_fini(struct lima_ip *ip) { lima_pmu_hw_fini(ip); }
linux-master
drivers/gpu/drm/lima/lima_pmu.c
// SPDX-License-Identifier: GPL-2.0 OR MIT /* Copyright 2017-2019 Qiang Yu <[email protected]> */ #include <linux/interrupt.h> #include <linux/iopoll.h> #include <linux/device.h> #include <linux/slab.h> #include <drm/lima_drm.h> #include "lima_device.h" #include "lima_gp.h" #include "lima_regs.h" #include "lima_gem.h" #include "lima_vm.h" #define gp_write(reg, data) writel(data, ip->iomem + reg) #define gp_read(reg) readl(ip->iomem + reg) static irqreturn_t lima_gp_irq_handler(int irq, void *data) { struct lima_ip *ip = data; struct lima_device *dev = ip->dev; struct lima_sched_pipe *pipe = dev->pipe + lima_pipe_gp; struct lima_sched_task *task = pipe->current_task; u32 state = gp_read(LIMA_GP_INT_STAT); u32 status = gp_read(LIMA_GP_STATUS); bool done = false; /* for shared irq case */ if (!state) return IRQ_NONE; if (state & LIMA_GP_IRQ_MASK_ERROR) { if ((state & LIMA_GP_IRQ_MASK_ERROR) == LIMA_GP_IRQ_PLBU_OUT_OF_MEM) { dev_dbg(dev->dev, "gp out of heap irq status=%x\n", status); } else { dev_err(dev->dev, "gp error irq state=%x status=%x\n", state, status); if (task) task->recoverable = false; } /* mask all interrupts before hard reset */ gp_write(LIMA_GP_INT_MASK, 0); pipe->error = true; done = true; } else { bool valid = state & (LIMA_GP_IRQ_VS_END_CMD_LST | LIMA_GP_IRQ_PLBU_END_CMD_LST); bool active = status & (LIMA_GP_STATUS_VS_ACTIVE | LIMA_GP_STATUS_PLBU_ACTIVE); done = valid && !active; pipe->error = false; } gp_write(LIMA_GP_INT_CLEAR, state); if (done) lima_sched_pipe_task_done(pipe); return IRQ_HANDLED; } static void lima_gp_soft_reset_async(struct lima_ip *ip) { if (ip->data.async_reset) return; gp_write(LIMA_GP_INT_MASK, 0); gp_write(LIMA_GP_INT_CLEAR, LIMA_GP_IRQ_RESET_COMPLETED); gp_write(LIMA_GP_CMD, LIMA_GP_CMD_SOFT_RESET); ip->data.async_reset = true; } static int lima_gp_soft_reset_async_wait(struct lima_ip *ip) { struct lima_device *dev = ip->dev; int err; u32 v; if (!ip->data.async_reset) return 0; err = readl_poll_timeout(ip->iomem + LIMA_GP_INT_RAWSTAT, v, v & LIMA_GP_IRQ_RESET_COMPLETED, 0, 100); if (err) { dev_err(dev->dev, "gp soft reset time out\n"); return err; } gp_write(LIMA_GP_INT_CLEAR, LIMA_GP_IRQ_MASK_ALL); gp_write(LIMA_GP_INT_MASK, LIMA_GP_IRQ_MASK_USED); ip->data.async_reset = false; return 0; } static int lima_gp_task_validate(struct lima_sched_pipe *pipe, struct lima_sched_task *task) { struct drm_lima_gp_frame *frame = task->frame; u32 *f = frame->frame; (void)pipe; if (f[LIMA_GP_VSCL_START_ADDR >> 2] > f[LIMA_GP_VSCL_END_ADDR >> 2] || f[LIMA_GP_PLBUCL_START_ADDR >> 2] > f[LIMA_GP_PLBUCL_END_ADDR >> 2] || f[LIMA_GP_PLBU_ALLOC_START_ADDR >> 2] > f[LIMA_GP_PLBU_ALLOC_END_ADDR >> 2]) return -EINVAL; if (f[LIMA_GP_VSCL_START_ADDR >> 2] == f[LIMA_GP_VSCL_END_ADDR >> 2] && f[LIMA_GP_PLBUCL_START_ADDR >> 2] == f[LIMA_GP_PLBUCL_END_ADDR >> 2]) return -EINVAL; return 0; } static void lima_gp_task_run(struct lima_sched_pipe *pipe, struct lima_sched_task *task) { struct lima_ip *ip = pipe->processor[0]; struct drm_lima_gp_frame *frame = task->frame; u32 *f = frame->frame; u32 cmd = 0; int i; /* update real heap buffer size for GP */ for (i = 0; i < task->num_bos; i++) { struct lima_bo *bo = task->bos[i]; if (bo->heap_size && lima_vm_get_va(task->vm, bo) == f[LIMA_GP_PLBU_ALLOC_START_ADDR >> 2]) { f[LIMA_GP_PLBU_ALLOC_END_ADDR >> 2] = f[LIMA_GP_PLBU_ALLOC_START_ADDR >> 2] + bo->heap_size; task->recoverable = true; task->heap = bo; break; } } if (f[LIMA_GP_VSCL_START_ADDR >> 2] != f[LIMA_GP_VSCL_END_ADDR >> 2]) cmd |= LIMA_GP_CMD_START_VS; if (f[LIMA_GP_PLBUCL_START_ADDR >> 2] != f[LIMA_GP_PLBUCL_END_ADDR >> 2]) cmd |= LIMA_GP_CMD_START_PLBU; /* before any hw ops, wait last success task async soft reset */ lima_gp_soft_reset_async_wait(ip); for (i = 0; i < LIMA_GP_FRAME_REG_NUM; i++) writel(f[i], ip->iomem + LIMA_GP_VSCL_START_ADDR + i * 4); gp_write(LIMA_GP_CMD, LIMA_GP_CMD_UPDATE_PLBU_ALLOC); gp_write(LIMA_GP_CMD, cmd); } static int lima_gp_hard_reset_poll(struct lima_ip *ip) { gp_write(LIMA_GP_PERF_CNT_0_LIMIT, 0xC01A0000); return gp_read(LIMA_GP_PERF_CNT_0_LIMIT) == 0xC01A0000; } static int lima_gp_hard_reset(struct lima_ip *ip) { struct lima_device *dev = ip->dev; int ret; gp_write(LIMA_GP_PERF_CNT_0_LIMIT, 0xC0FFE000); gp_write(LIMA_GP_INT_MASK, 0); gp_write(LIMA_GP_CMD, LIMA_GP_CMD_RESET); ret = lima_poll_timeout(ip, lima_gp_hard_reset_poll, 10, 100); if (ret) { dev_err(dev->dev, "gp hard reset timeout\n"); return ret; } gp_write(LIMA_GP_PERF_CNT_0_LIMIT, 0); gp_write(LIMA_GP_INT_CLEAR, LIMA_GP_IRQ_MASK_ALL); gp_write(LIMA_GP_INT_MASK, LIMA_GP_IRQ_MASK_USED); return 0; } static void lima_gp_task_fini(struct lima_sched_pipe *pipe) { lima_gp_soft_reset_async(pipe->processor[0]); } static void lima_gp_task_error(struct lima_sched_pipe *pipe) { struct lima_ip *ip = pipe->processor[0]; dev_err(ip->dev->dev, "gp task error int_state=%x status=%x\n", gp_read(LIMA_GP_INT_STAT), gp_read(LIMA_GP_STATUS)); lima_gp_hard_reset(ip); } static void lima_gp_task_mmu_error(struct lima_sched_pipe *pipe) { lima_sched_pipe_task_done(pipe); } static int lima_gp_task_recover(struct lima_sched_pipe *pipe) { struct lima_ip *ip = pipe->processor[0]; struct lima_sched_task *task = pipe->current_task; struct drm_lima_gp_frame *frame = task->frame; u32 *f = frame->frame; size_t fail_size = f[LIMA_GP_PLBU_ALLOC_END_ADDR >> 2] - f[LIMA_GP_PLBU_ALLOC_START_ADDR >> 2]; if (fail_size == task->heap->heap_size) { int ret; ret = lima_heap_alloc(task->heap, task->vm); if (ret < 0) return ret; } gp_write(LIMA_GP_INT_MASK, LIMA_GP_IRQ_MASK_USED); /* Resume from where we stopped, i.e. new start is old end */ gp_write(LIMA_GP_PLBU_ALLOC_START_ADDR, f[LIMA_GP_PLBU_ALLOC_END_ADDR >> 2]); f[LIMA_GP_PLBU_ALLOC_END_ADDR >> 2] = f[LIMA_GP_PLBU_ALLOC_START_ADDR >> 2] + task->heap->heap_size; gp_write(LIMA_GP_PLBU_ALLOC_END_ADDR, f[LIMA_GP_PLBU_ALLOC_END_ADDR >> 2]); gp_write(LIMA_GP_CMD, LIMA_GP_CMD_UPDATE_PLBU_ALLOC); return 0; } static void lima_gp_print_version(struct lima_ip *ip) { u32 version, major, minor; char *name; version = gp_read(LIMA_GP_VERSION); major = (version >> 8) & 0xFF; minor = version & 0xFF; switch (version >> 16) { case 0xA07: name = "mali200"; break; case 0xC07: name = "mali300"; break; case 0xB07: name = "mali400"; break; case 0xD07: name = "mali450"; break; default: name = "unknown"; break; } dev_info(ip->dev->dev, "%s - %s version major %d minor %d\n", lima_ip_name(ip), name, major, minor); } static struct kmem_cache *lima_gp_task_slab; static int lima_gp_task_slab_refcnt; static int lima_gp_hw_init(struct lima_ip *ip) { ip->data.async_reset = false; lima_gp_soft_reset_async(ip); return lima_gp_soft_reset_async_wait(ip); } int lima_gp_resume(struct lima_ip *ip) { return lima_gp_hw_init(ip); } void lima_gp_suspend(struct lima_ip *ip) { } int lima_gp_init(struct lima_ip *ip) { struct lima_device *dev = ip->dev; int err; lima_gp_print_version(ip); err = lima_gp_hw_init(ip); if (err) return err; err = devm_request_irq(dev->dev, ip->irq, lima_gp_irq_handler, IRQF_SHARED, lima_ip_name(ip), ip); if (err) { dev_err(dev->dev, "gp %s fail to request irq\n", lima_ip_name(ip)); return err; } dev->gp_version = gp_read(LIMA_GP_VERSION); return 0; } void lima_gp_fini(struct lima_ip *ip) { } int lima_gp_pipe_init(struct lima_device *dev) { int frame_size = sizeof(struct drm_lima_gp_frame); struct lima_sched_pipe *pipe = dev->pipe + lima_pipe_gp; if (!lima_gp_task_slab) { lima_gp_task_slab = kmem_cache_create_usercopy( "lima_gp_task", sizeof(struct lima_sched_task) + frame_size, 0, SLAB_HWCACHE_ALIGN, sizeof(struct lima_sched_task), frame_size, NULL); if (!lima_gp_task_slab) return -ENOMEM; } lima_gp_task_slab_refcnt++; pipe->frame_size = frame_size; pipe->task_slab = lima_gp_task_slab; pipe->task_validate = lima_gp_task_validate; pipe->task_run = lima_gp_task_run; pipe->task_fini = lima_gp_task_fini; pipe->task_error = lima_gp_task_error; pipe->task_mmu_error = lima_gp_task_mmu_error; pipe->task_recover = lima_gp_task_recover; return 0; } void lima_gp_pipe_fini(struct lima_device *dev) { if (!--lima_gp_task_slab_refcnt) { kmem_cache_destroy(lima_gp_task_slab); lima_gp_task_slab = NULL; } }
linux-master
drivers/gpu/drm/lima/lima_gp.c
// SPDX-License-Identifier: GPL-2.0 OR MIT /* Copyright 2017-2019 Qiang Yu <[email protected]> */ #include <linux/iopoll.h> #include <linux/device.h> #include "lima_device.h" #include "lima_l2_cache.h" #include "lima_regs.h" #define l2_cache_write(reg, data) writel(data, ip->iomem + reg) #define l2_cache_read(reg) readl(ip->iomem + reg) static int lima_l2_cache_wait_idle(struct lima_ip *ip) { struct lima_device *dev = ip->dev; int err; u32 v; err = readl_poll_timeout(ip->iomem + LIMA_L2_CACHE_STATUS, v, !(v & LIMA_L2_CACHE_STATUS_COMMAND_BUSY), 0, 1000); if (err) { dev_err(dev->dev, "l2 cache wait command timeout\n"); return err; } return 0; } int lima_l2_cache_flush(struct lima_ip *ip) { int ret; spin_lock(&ip->data.lock); l2_cache_write(LIMA_L2_CACHE_COMMAND, LIMA_L2_CACHE_COMMAND_CLEAR_ALL); ret = lima_l2_cache_wait_idle(ip); spin_unlock(&ip->data.lock); return ret; } static int lima_l2_cache_hw_init(struct lima_ip *ip) { int err; err = lima_l2_cache_flush(ip); if (err) return err; l2_cache_write(LIMA_L2_CACHE_ENABLE, LIMA_L2_CACHE_ENABLE_ACCESS | LIMA_L2_CACHE_ENABLE_READ_ALLOCATE); l2_cache_write(LIMA_L2_CACHE_MAX_READS, 0x1c); return 0; } int lima_l2_cache_resume(struct lima_ip *ip) { return lima_l2_cache_hw_init(ip); } void lima_l2_cache_suspend(struct lima_ip *ip) { } int lima_l2_cache_init(struct lima_ip *ip) { int i; u32 size; struct lima_device *dev = ip->dev; /* l2_cache2 only exists when one of PP4-7 present */ if (ip->id == lima_ip_l2_cache2) { for (i = lima_ip_pp4; i <= lima_ip_pp7; i++) { if (dev->ip[i].present) break; } if (i > lima_ip_pp7) return -ENODEV; } spin_lock_init(&ip->data.lock); size = l2_cache_read(LIMA_L2_CACHE_SIZE); dev_info(dev->dev, "l2 cache %uK, %u-way, %ubyte cache line, %ubit external bus\n", 1 << (((size >> 16) & 0xff) - 10), 1 << ((size >> 8) & 0xff), 1 << (size & 0xff), 1 << ((size >> 24) & 0xff)); return lima_l2_cache_hw_init(ip); } void lima_l2_cache_fini(struct lima_ip *ip) { }
linux-master
drivers/gpu/drm/lima/lima_l2_cache.c
// SPDX-License-Identifier: GPL-2.0 OR MIT /* Copyright 2018-2019 Qiang Yu <[email protected]> */ #include <linux/io.h> #include <linux/device.h> #include "lima_device.h" #include "lima_bcast.h" #include "lima_regs.h" #define bcast_write(reg, data) writel(data, ip->iomem + reg) #define bcast_read(reg) readl(ip->iomem + reg) void lima_bcast_enable(struct lima_device *dev, int num_pp) { struct lima_sched_pipe *pipe = dev->pipe + lima_pipe_pp; struct lima_ip *ip = dev->ip + lima_ip_bcast; int i, mask = bcast_read(LIMA_BCAST_BROADCAST_MASK) & 0xffff0000; for (i = 0; i < num_pp; i++) { struct lima_ip *pp = pipe->processor[i]; mask |= 1 << (pp->id - lima_ip_pp0); } bcast_write(LIMA_BCAST_BROADCAST_MASK, mask); } static int lima_bcast_hw_init(struct lima_ip *ip) { bcast_write(LIMA_BCAST_BROADCAST_MASK, ip->data.mask << 16); bcast_write(LIMA_BCAST_INTERRUPT_MASK, ip->data.mask); return 0; } int lima_bcast_resume(struct lima_ip *ip) { return lima_bcast_hw_init(ip); } void lima_bcast_suspend(struct lima_ip *ip) { } int lima_bcast_init(struct lima_ip *ip) { int i; for (i = lima_ip_pp0; i <= lima_ip_pp7; i++) { if (ip->dev->ip[i].present) ip->data.mask |= 1 << (i - lima_ip_pp0); } return lima_bcast_hw_init(ip); } void lima_bcast_fini(struct lima_ip *ip) { }
linux-master
drivers/gpu/drm/lima/lima_bcast.c
// SPDX-License-Identifier: GPL-2.0 OR MIT /* Copyright 2017-2019 Qiang Yu <[email protected]> */ #include <linux/interrupt.h> #include <linux/io.h> #include <linux/device.h> #include <linux/slab.h> #include <drm/lima_drm.h> #include "lima_device.h" #include "lima_pp.h" #include "lima_dlbu.h" #include "lima_bcast.h" #include "lima_vm.h" #include "lima_regs.h" #define pp_write(reg, data) writel(data, ip->iomem + reg) #define pp_read(reg) readl(ip->iomem + reg) static void lima_pp_handle_irq(struct lima_ip *ip, u32 state) { struct lima_device *dev = ip->dev; struct lima_sched_pipe *pipe = dev->pipe + lima_pipe_pp; if (state & LIMA_PP_IRQ_MASK_ERROR) { u32 status = pp_read(LIMA_PP_STATUS); dev_err(dev->dev, "pp error irq state=%x status=%x\n", state, status); pipe->error = true; /* mask all interrupts before hard reset */ pp_write(LIMA_PP_INT_MASK, 0); } pp_write(LIMA_PP_INT_CLEAR, state); } static irqreturn_t lima_pp_irq_handler(int irq, void *data) { struct lima_ip *ip = data; struct lima_device *dev = ip->dev; struct lima_sched_pipe *pipe = dev->pipe + lima_pipe_pp; u32 state = pp_read(LIMA_PP_INT_STATUS); /* for shared irq case */ if (!state) return IRQ_NONE; lima_pp_handle_irq(ip, state); if (atomic_dec_and_test(&pipe->task)) lima_sched_pipe_task_done(pipe); return IRQ_HANDLED; } static irqreturn_t lima_pp_bcast_irq_handler(int irq, void *data) { int i; irqreturn_t ret = IRQ_NONE; struct lima_ip *pp_bcast = data; struct lima_device *dev = pp_bcast->dev; struct lima_sched_pipe *pipe = dev->pipe + lima_pipe_pp; struct drm_lima_m450_pp_frame *frame; /* for shared irq case */ if (!pipe->current_task) return IRQ_NONE; frame = pipe->current_task->frame; for (i = 0; i < frame->num_pp; i++) { struct lima_ip *ip = pipe->processor[i]; u32 status, state; if (pipe->done & (1 << i)) continue; /* status read first in case int state change in the middle * which may miss the interrupt handling */ status = pp_read(LIMA_PP_STATUS); state = pp_read(LIMA_PP_INT_STATUS); if (state) { lima_pp_handle_irq(ip, state); ret = IRQ_HANDLED; } else { if (status & LIMA_PP_STATUS_RENDERING_ACTIVE) continue; } pipe->done |= (1 << i); if (atomic_dec_and_test(&pipe->task)) lima_sched_pipe_task_done(pipe); } return ret; } static void lima_pp_soft_reset_async(struct lima_ip *ip) { if (ip->data.async_reset) return; pp_write(LIMA_PP_INT_MASK, 0); pp_write(LIMA_PP_INT_RAWSTAT, LIMA_PP_IRQ_MASK_ALL); pp_write(LIMA_PP_CTRL, LIMA_PP_CTRL_SOFT_RESET); ip->data.async_reset = true; } static int lima_pp_soft_reset_poll(struct lima_ip *ip) { return !(pp_read(LIMA_PP_STATUS) & LIMA_PP_STATUS_RENDERING_ACTIVE) && pp_read(LIMA_PP_INT_RAWSTAT) == LIMA_PP_IRQ_RESET_COMPLETED; } static int lima_pp_soft_reset_async_wait_one(struct lima_ip *ip) { struct lima_device *dev = ip->dev; int ret; ret = lima_poll_timeout(ip, lima_pp_soft_reset_poll, 0, 100); if (ret) { dev_err(dev->dev, "pp %s reset time out\n", lima_ip_name(ip)); return ret; } pp_write(LIMA_PP_INT_CLEAR, LIMA_PP_IRQ_MASK_ALL); pp_write(LIMA_PP_INT_MASK, LIMA_PP_IRQ_MASK_USED); return 0; } static int lima_pp_soft_reset_async_wait(struct lima_ip *ip) { int i, err = 0; if (!ip->data.async_reset) return 0; if (ip->id == lima_ip_pp_bcast) { struct lima_device *dev = ip->dev; struct lima_sched_pipe *pipe = dev->pipe + lima_pipe_pp; struct drm_lima_m450_pp_frame *frame = pipe->current_task->frame; for (i = 0; i < frame->num_pp; i++) err |= lima_pp_soft_reset_async_wait_one(pipe->processor[i]); } else err = lima_pp_soft_reset_async_wait_one(ip); ip->data.async_reset = false; return err; } static void lima_pp_write_frame(struct lima_ip *ip, u32 *frame, u32 *wb) { int i, j, n = 0; for (i = 0; i < LIMA_PP_FRAME_REG_NUM; i++) writel(frame[i], ip->iomem + LIMA_PP_FRAME + i * 4); for (i = 0; i < 3; i++) { for (j = 0; j < LIMA_PP_WB_REG_NUM; j++) writel(wb[n++], ip->iomem + LIMA_PP_WB(i) + j * 4); } } static int lima_pp_hard_reset_poll(struct lima_ip *ip) { pp_write(LIMA_PP_PERF_CNT_0_LIMIT, 0xC01A0000); return pp_read(LIMA_PP_PERF_CNT_0_LIMIT) == 0xC01A0000; } static int lima_pp_hard_reset(struct lima_ip *ip) { struct lima_device *dev = ip->dev; int ret; pp_write(LIMA_PP_PERF_CNT_0_LIMIT, 0xC0FFE000); pp_write(LIMA_PP_INT_MASK, 0); pp_write(LIMA_PP_CTRL, LIMA_PP_CTRL_FORCE_RESET); ret = lima_poll_timeout(ip, lima_pp_hard_reset_poll, 10, 100); if (ret) { dev_err(dev->dev, "pp hard reset timeout\n"); return ret; } pp_write(LIMA_PP_PERF_CNT_0_LIMIT, 0); pp_write(LIMA_PP_INT_CLEAR, LIMA_PP_IRQ_MASK_ALL); pp_write(LIMA_PP_INT_MASK, LIMA_PP_IRQ_MASK_USED); return 0; } static void lima_pp_print_version(struct lima_ip *ip) { u32 version, major, minor; char *name; version = pp_read(LIMA_PP_VERSION); major = (version >> 8) & 0xFF; minor = version & 0xFF; switch (version >> 16) { case 0xC807: name = "mali200"; break; case 0xCE07: name = "mali300"; break; case 0xCD07: name = "mali400"; break; case 0xCF07: name = "mali450"; break; default: name = "unknown"; break; } dev_info(ip->dev->dev, "%s - %s version major %d minor %d\n", lima_ip_name(ip), name, major, minor); } static int lima_pp_hw_init(struct lima_ip *ip) { ip->data.async_reset = false; lima_pp_soft_reset_async(ip); return lima_pp_soft_reset_async_wait(ip); } int lima_pp_resume(struct lima_ip *ip) { return lima_pp_hw_init(ip); } void lima_pp_suspend(struct lima_ip *ip) { } int lima_pp_init(struct lima_ip *ip) { struct lima_device *dev = ip->dev; int err; lima_pp_print_version(ip); err = lima_pp_hw_init(ip); if (err) return err; err = devm_request_irq(dev->dev, ip->irq, lima_pp_irq_handler, IRQF_SHARED, lima_ip_name(ip), ip); if (err) { dev_err(dev->dev, "pp %s fail to request irq\n", lima_ip_name(ip)); return err; } dev->pp_version = pp_read(LIMA_PP_VERSION); return 0; } void lima_pp_fini(struct lima_ip *ip) { } int lima_pp_bcast_resume(struct lima_ip *ip) { /* PP has been reset by individual PP resume */ ip->data.async_reset = false; return 0; } void lima_pp_bcast_suspend(struct lima_ip *ip) { } int lima_pp_bcast_init(struct lima_ip *ip) { struct lima_device *dev = ip->dev; int err; err = devm_request_irq(dev->dev, ip->irq, lima_pp_bcast_irq_handler, IRQF_SHARED, lima_ip_name(ip), ip); if (err) { dev_err(dev->dev, "pp %s fail to request irq\n", lima_ip_name(ip)); return err; } return 0; } void lima_pp_bcast_fini(struct lima_ip *ip) { } static int lima_pp_task_validate(struct lima_sched_pipe *pipe, struct lima_sched_task *task) { u32 num_pp; if (pipe->bcast_processor) { struct drm_lima_m450_pp_frame *f = task->frame; num_pp = f->num_pp; if (f->_pad) return -EINVAL; } else { struct drm_lima_m400_pp_frame *f = task->frame; num_pp = f->num_pp; } if (num_pp == 0 || num_pp > pipe->num_processor) return -EINVAL; return 0; } static void lima_pp_task_run(struct lima_sched_pipe *pipe, struct lima_sched_task *task) { if (pipe->bcast_processor) { struct drm_lima_m450_pp_frame *frame = task->frame; struct lima_device *dev = pipe->bcast_processor->dev; struct lima_ip *ip = pipe->bcast_processor; int i; pipe->done = 0; atomic_set(&pipe->task, frame->num_pp); if (frame->use_dlbu) { lima_dlbu_enable(dev, frame->num_pp); frame->frame[LIMA_PP_FRAME >> 2] = LIMA_VA_RESERVE_DLBU; lima_dlbu_set_reg(dev->ip + lima_ip_dlbu, frame->dlbu_regs); } else lima_dlbu_disable(dev); lima_bcast_enable(dev, frame->num_pp); lima_pp_soft_reset_async_wait(ip); lima_pp_write_frame(ip, frame->frame, frame->wb); for (i = 0; i < frame->num_pp; i++) { struct lima_ip *ip = pipe->processor[i]; pp_write(LIMA_PP_STACK, frame->fragment_stack_address[i]); if (!frame->use_dlbu) pp_write(LIMA_PP_FRAME, frame->plbu_array_address[i]); } pp_write(LIMA_PP_CTRL, LIMA_PP_CTRL_START_RENDERING); } else { struct drm_lima_m400_pp_frame *frame = task->frame; int i; atomic_set(&pipe->task, frame->num_pp); for (i = 0; i < frame->num_pp; i++) { struct lima_ip *ip = pipe->processor[i]; frame->frame[LIMA_PP_FRAME >> 2] = frame->plbu_array_address[i]; frame->frame[LIMA_PP_STACK >> 2] = frame->fragment_stack_address[i]; lima_pp_soft_reset_async_wait(ip); lima_pp_write_frame(ip, frame->frame, frame->wb); pp_write(LIMA_PP_CTRL, LIMA_PP_CTRL_START_RENDERING); } } } static void lima_pp_task_fini(struct lima_sched_pipe *pipe) { if (pipe->bcast_processor) lima_pp_soft_reset_async(pipe->bcast_processor); else { int i; for (i = 0; i < pipe->num_processor; i++) lima_pp_soft_reset_async(pipe->processor[i]); } } static void lima_pp_task_error(struct lima_sched_pipe *pipe) { int i; for (i = 0; i < pipe->num_processor; i++) { struct lima_ip *ip = pipe->processor[i]; dev_err(ip->dev->dev, "pp task error %d int_state=%x status=%x\n", i, pp_read(LIMA_PP_INT_STATUS), pp_read(LIMA_PP_STATUS)); lima_pp_hard_reset(ip); } } static void lima_pp_task_mmu_error(struct lima_sched_pipe *pipe) { if (atomic_dec_and_test(&pipe->task)) lima_sched_pipe_task_done(pipe); } static struct kmem_cache *lima_pp_task_slab; static int lima_pp_task_slab_refcnt; int lima_pp_pipe_init(struct lima_device *dev) { int frame_size; struct lima_sched_pipe *pipe = dev->pipe + lima_pipe_pp; if (dev->id == lima_gpu_mali400) frame_size = sizeof(struct drm_lima_m400_pp_frame); else frame_size = sizeof(struct drm_lima_m450_pp_frame); if (!lima_pp_task_slab) { lima_pp_task_slab = kmem_cache_create_usercopy( "lima_pp_task", sizeof(struct lima_sched_task) + frame_size, 0, SLAB_HWCACHE_ALIGN, sizeof(struct lima_sched_task), frame_size, NULL); if (!lima_pp_task_slab) return -ENOMEM; } lima_pp_task_slab_refcnt++; pipe->frame_size = frame_size; pipe->task_slab = lima_pp_task_slab; pipe->task_validate = lima_pp_task_validate; pipe->task_run = lima_pp_task_run; pipe->task_fini = lima_pp_task_fini; pipe->task_error = lima_pp_task_error; pipe->task_mmu_error = lima_pp_task_mmu_error; return 0; } void lima_pp_pipe_fini(struct lima_device *dev) { if (!--lima_pp_task_slab_refcnt) { kmem_cache_destroy(lima_pp_task_slab); lima_pp_task_slab = NULL; } }
linux-master
drivers/gpu/drm/lima/lima_pp.c
// SPDX-License-Identifier: GPL-2.0 OR MIT /* Copyright 2018-2019 Qiang Yu <[email protected]> */ #include <linux/slab.h> #include "lima_device.h" #include "lima_ctx.h" int lima_ctx_create(struct lima_device *dev, struct lima_ctx_mgr *mgr, u32 *id) { struct lima_ctx *ctx; int i, err; ctx = kzalloc(sizeof(*ctx), GFP_KERNEL); if (!ctx) return -ENOMEM; ctx->dev = dev; kref_init(&ctx->refcnt); for (i = 0; i < lima_pipe_num; i++) { err = lima_sched_context_init(dev->pipe + i, ctx->context + i, &ctx->guilty); if (err) goto err_out0; } err = xa_alloc(&mgr->handles, id, ctx, xa_limit_32b, GFP_KERNEL); if (err < 0) goto err_out0; ctx->pid = task_pid_nr(current); get_task_comm(ctx->pname, current); return 0; err_out0: for (i--; i >= 0; i--) lima_sched_context_fini(dev->pipe + i, ctx->context + i); kfree(ctx); return err; } static void lima_ctx_do_release(struct kref *ref) { struct lima_ctx *ctx = container_of(ref, struct lima_ctx, refcnt); int i; for (i = 0; i < lima_pipe_num; i++) lima_sched_context_fini(ctx->dev->pipe + i, ctx->context + i); kfree(ctx); } int lima_ctx_free(struct lima_ctx_mgr *mgr, u32 id) { struct lima_ctx *ctx; int ret = 0; mutex_lock(&mgr->lock); ctx = xa_erase(&mgr->handles, id); if (ctx) kref_put(&ctx->refcnt, lima_ctx_do_release); else ret = -EINVAL; mutex_unlock(&mgr->lock); return ret; } struct lima_ctx *lima_ctx_get(struct lima_ctx_mgr *mgr, u32 id) { struct lima_ctx *ctx; mutex_lock(&mgr->lock); ctx = xa_load(&mgr->handles, id); if (ctx) kref_get(&ctx->refcnt); mutex_unlock(&mgr->lock); return ctx; } void lima_ctx_put(struct lima_ctx *ctx) { kref_put(&ctx->refcnt, lima_ctx_do_release); } void lima_ctx_mgr_init(struct lima_ctx_mgr *mgr) { mutex_init(&mgr->lock); xa_init_flags(&mgr->handles, XA_FLAGS_ALLOC); } void lima_ctx_mgr_fini(struct lima_ctx_mgr *mgr) { struct lima_ctx *ctx; unsigned long id; xa_for_each(&mgr->handles, id, ctx) { kref_put(&ctx->refcnt, lima_ctx_do_release); } xa_destroy(&mgr->handles); mutex_destroy(&mgr->lock); }
linux-master
drivers/gpu/drm/lima/lima_ctx.c
// SPDX-License-Identifier: GPL-2.0 OR MIT /* Copyright 2017-2019 Qiang Yu <[email protected]> */ #include <linux/regulator/consumer.h> #include <linux/reset.h> #include <linux/clk.h> #include <linux/slab.h> #include <linux/dma-mapping.h> #include <linux/platform_device.h> #include "lima_device.h" #include "lima_gp.h" #include "lima_pp.h" #include "lima_mmu.h" #include "lima_pmu.h" #include "lima_l2_cache.h" #include "lima_dlbu.h" #include "lima_bcast.h" #include "lima_vm.h" struct lima_ip_desc { char *name; char *irq_name; bool must_have[lima_gpu_num]; int offset[lima_gpu_num]; int (*init)(struct lima_ip *ip); void (*fini)(struct lima_ip *ip); int (*resume)(struct lima_ip *ip); void (*suspend)(struct lima_ip *ip); }; #define LIMA_IP_DESC(ipname, mst0, mst1, off0, off1, func, irq) \ [lima_ip_##ipname] = { \ .name = #ipname, \ .irq_name = irq, \ .must_have = { \ [lima_gpu_mali400] = mst0, \ [lima_gpu_mali450] = mst1, \ }, \ .offset = { \ [lima_gpu_mali400] = off0, \ [lima_gpu_mali450] = off1, \ }, \ .init = lima_##func##_init, \ .fini = lima_##func##_fini, \ .resume = lima_##func##_resume, \ .suspend = lima_##func##_suspend, \ } static struct lima_ip_desc lima_ip_desc[lima_ip_num] = { LIMA_IP_DESC(pmu, false, false, 0x02000, 0x02000, pmu, "pmu"), LIMA_IP_DESC(l2_cache0, true, true, 0x01000, 0x10000, l2_cache, NULL), LIMA_IP_DESC(l2_cache1, false, true, -1, 0x01000, l2_cache, NULL), LIMA_IP_DESC(l2_cache2, false, false, -1, 0x11000, l2_cache, NULL), LIMA_IP_DESC(gp, true, true, 0x00000, 0x00000, gp, "gp"), LIMA_IP_DESC(pp0, true, true, 0x08000, 0x08000, pp, "pp0"), LIMA_IP_DESC(pp1, false, false, 0x0A000, 0x0A000, pp, "pp1"), LIMA_IP_DESC(pp2, false, false, 0x0C000, 0x0C000, pp, "pp2"), LIMA_IP_DESC(pp3, false, false, 0x0E000, 0x0E000, pp, "pp3"), LIMA_IP_DESC(pp4, false, false, -1, 0x28000, pp, "pp4"), LIMA_IP_DESC(pp5, false, false, -1, 0x2A000, pp, "pp5"), LIMA_IP_DESC(pp6, false, false, -1, 0x2C000, pp, "pp6"), LIMA_IP_DESC(pp7, false, false, -1, 0x2E000, pp, "pp7"), LIMA_IP_DESC(gpmmu, true, true, 0x03000, 0x03000, mmu, "gpmmu"), LIMA_IP_DESC(ppmmu0, true, true, 0x04000, 0x04000, mmu, "ppmmu0"), LIMA_IP_DESC(ppmmu1, false, false, 0x05000, 0x05000, mmu, "ppmmu1"), LIMA_IP_DESC(ppmmu2, false, false, 0x06000, 0x06000, mmu, "ppmmu2"), LIMA_IP_DESC(ppmmu3, false, false, 0x07000, 0x07000, mmu, "ppmmu3"), LIMA_IP_DESC(ppmmu4, false, false, -1, 0x1C000, mmu, "ppmmu4"), LIMA_IP_DESC(ppmmu5, false, false, -1, 0x1D000, mmu, "ppmmu5"), LIMA_IP_DESC(ppmmu6, false, false, -1, 0x1E000, mmu, "ppmmu6"), LIMA_IP_DESC(ppmmu7, false, false, -1, 0x1F000, mmu, "ppmmu7"), LIMA_IP_DESC(dlbu, false, true, -1, 0x14000, dlbu, NULL), LIMA_IP_DESC(bcast, false, true, -1, 0x13000, bcast, NULL), LIMA_IP_DESC(pp_bcast, false, true, -1, 0x16000, pp_bcast, "pp"), LIMA_IP_DESC(ppmmu_bcast, false, true, -1, 0x15000, mmu, NULL), }; const char *lima_ip_name(struct lima_ip *ip) { return lima_ip_desc[ip->id].name; } static int lima_clk_enable(struct lima_device *dev) { int err; err = clk_prepare_enable(dev->clk_bus); if (err) return err; err = clk_prepare_enable(dev->clk_gpu); if (err) goto error_out0; if (dev->reset) { err = reset_control_deassert(dev->reset); if (err) { dev_err(dev->dev, "reset controller deassert failed %d\n", err); goto error_out1; } } return 0; error_out1: clk_disable_unprepare(dev->clk_gpu); error_out0: clk_disable_unprepare(dev->clk_bus); return err; } static void lima_clk_disable(struct lima_device *dev) { if (dev->reset) reset_control_assert(dev->reset); clk_disable_unprepare(dev->clk_gpu); clk_disable_unprepare(dev->clk_bus); } static int lima_clk_init(struct lima_device *dev) { int err; dev->clk_bus = devm_clk_get(dev->dev, "bus"); if (IS_ERR(dev->clk_bus)) { err = PTR_ERR(dev->clk_bus); if (err != -EPROBE_DEFER) dev_err(dev->dev, "get bus clk failed %d\n", err); dev->clk_bus = NULL; return err; } dev->clk_gpu = devm_clk_get(dev->dev, "core"); if (IS_ERR(dev->clk_gpu)) { err = PTR_ERR(dev->clk_gpu); if (err != -EPROBE_DEFER) dev_err(dev->dev, "get core clk failed %d\n", err); dev->clk_gpu = NULL; return err; } dev->reset = devm_reset_control_array_get_optional_shared(dev->dev); if (IS_ERR(dev->reset)) { err = PTR_ERR(dev->reset); if (err != -EPROBE_DEFER) dev_err(dev->dev, "get reset controller failed %d\n", err); dev->reset = NULL; return err; } return lima_clk_enable(dev); } static void lima_clk_fini(struct lima_device *dev) { lima_clk_disable(dev); } static int lima_regulator_enable(struct lima_device *dev) { int ret; if (!dev->regulator) return 0; ret = regulator_enable(dev->regulator); if (ret < 0) { dev_err(dev->dev, "failed to enable regulator: %d\n", ret); return ret; } return 0; } static void lima_regulator_disable(struct lima_device *dev) { if (dev->regulator) regulator_disable(dev->regulator); } static int lima_regulator_init(struct lima_device *dev) { int ret; dev->regulator = devm_regulator_get_optional(dev->dev, "mali"); if (IS_ERR(dev->regulator)) { ret = PTR_ERR(dev->regulator); dev->regulator = NULL; if (ret == -ENODEV) return 0; if (ret != -EPROBE_DEFER) dev_err(dev->dev, "failed to get regulator: %d\n", ret); return ret; } return lima_regulator_enable(dev); } static void lima_regulator_fini(struct lima_device *dev) { lima_regulator_disable(dev); } static int lima_init_ip(struct lima_device *dev, int index) { struct platform_device *pdev = to_platform_device(dev->dev); struct lima_ip_desc *desc = lima_ip_desc + index; struct lima_ip *ip = dev->ip + index; const char *irq_name = desc->irq_name; int offset = desc->offset[dev->id]; bool must = desc->must_have[dev->id]; int err; if (offset < 0) return 0; ip->dev = dev; ip->id = index; ip->iomem = dev->iomem + offset; if (irq_name) { err = must ? platform_get_irq_byname(pdev, irq_name) : platform_get_irq_byname_optional(pdev, irq_name); if (err < 0) goto out; ip->irq = err; } err = desc->init(ip); if (!err) { ip->present = true; return 0; } out: return must ? err : 0; } static void lima_fini_ip(struct lima_device *ldev, int index) { struct lima_ip_desc *desc = lima_ip_desc + index; struct lima_ip *ip = ldev->ip + index; if (ip->present) desc->fini(ip); } static int lima_resume_ip(struct lima_device *ldev, int index) { struct lima_ip_desc *desc = lima_ip_desc + index; struct lima_ip *ip = ldev->ip + index; int ret = 0; if (ip->present) ret = desc->resume(ip); return ret; } static void lima_suspend_ip(struct lima_device *ldev, int index) { struct lima_ip_desc *desc = lima_ip_desc + index; struct lima_ip *ip = ldev->ip + index; if (ip->present) desc->suspend(ip); } static int lima_init_gp_pipe(struct lima_device *dev) { struct lima_sched_pipe *pipe = dev->pipe + lima_pipe_gp; int err; pipe->ldev = dev; err = lima_sched_pipe_init(pipe, "gp"); if (err) return err; pipe->l2_cache[pipe->num_l2_cache++] = dev->ip + lima_ip_l2_cache0; pipe->mmu[pipe->num_mmu++] = dev->ip + lima_ip_gpmmu; pipe->processor[pipe->num_processor++] = dev->ip + lima_ip_gp; err = lima_gp_pipe_init(dev); if (err) { lima_sched_pipe_fini(pipe); return err; } return 0; } static void lima_fini_gp_pipe(struct lima_device *dev) { struct lima_sched_pipe *pipe = dev->pipe + lima_pipe_gp; lima_gp_pipe_fini(dev); lima_sched_pipe_fini(pipe); } static int lima_init_pp_pipe(struct lima_device *dev) { struct lima_sched_pipe *pipe = dev->pipe + lima_pipe_pp; int err, i; pipe->ldev = dev; err = lima_sched_pipe_init(pipe, "pp"); if (err) return err; for (i = 0; i < LIMA_SCHED_PIPE_MAX_PROCESSOR; i++) { struct lima_ip *pp = dev->ip + lima_ip_pp0 + i; struct lima_ip *ppmmu = dev->ip + lima_ip_ppmmu0 + i; struct lima_ip *l2_cache; if (dev->id == lima_gpu_mali400) l2_cache = dev->ip + lima_ip_l2_cache0; else l2_cache = dev->ip + lima_ip_l2_cache1 + (i >> 2); if (pp->present && ppmmu->present && l2_cache->present) { pipe->mmu[pipe->num_mmu++] = ppmmu; pipe->processor[pipe->num_processor++] = pp; if (!pipe->l2_cache[i >> 2]) pipe->l2_cache[pipe->num_l2_cache++] = l2_cache; } } if (dev->ip[lima_ip_bcast].present) { pipe->bcast_processor = dev->ip + lima_ip_pp_bcast; pipe->bcast_mmu = dev->ip + lima_ip_ppmmu_bcast; } err = lima_pp_pipe_init(dev); if (err) { lima_sched_pipe_fini(pipe); return err; } return 0; } static void lima_fini_pp_pipe(struct lima_device *dev) { struct lima_sched_pipe *pipe = dev->pipe + lima_pipe_pp; lima_pp_pipe_fini(dev); lima_sched_pipe_fini(pipe); } int lima_device_init(struct lima_device *ldev) { struct platform_device *pdev = to_platform_device(ldev->dev); int err, i; dma_set_coherent_mask(ldev->dev, DMA_BIT_MASK(32)); dma_set_max_seg_size(ldev->dev, UINT_MAX); err = lima_clk_init(ldev); if (err) return err; err = lima_regulator_init(ldev); if (err) goto err_out0; ldev->empty_vm = lima_vm_create(ldev); if (!ldev->empty_vm) { err = -ENOMEM; goto err_out1; } ldev->va_start = 0; if (ldev->id == lima_gpu_mali450) { ldev->va_end = LIMA_VA_RESERVE_START; ldev->dlbu_cpu = dma_alloc_wc( ldev->dev, LIMA_PAGE_SIZE, &ldev->dlbu_dma, GFP_KERNEL | __GFP_NOWARN); if (!ldev->dlbu_cpu) { err = -ENOMEM; goto err_out2; } } else ldev->va_end = LIMA_VA_RESERVE_END; ldev->iomem = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(ldev->iomem)) { dev_err(ldev->dev, "fail to ioremap iomem\n"); err = PTR_ERR(ldev->iomem); goto err_out3; } for (i = 0; i < lima_ip_num; i++) { err = lima_init_ip(ldev, i); if (err) goto err_out4; } err = lima_init_gp_pipe(ldev); if (err) goto err_out4; err = lima_init_pp_pipe(ldev); if (err) goto err_out5; ldev->dump.magic = LIMA_DUMP_MAGIC; ldev->dump.version_major = LIMA_DUMP_MAJOR; ldev->dump.version_minor = LIMA_DUMP_MINOR; INIT_LIST_HEAD(&ldev->error_task_list); mutex_init(&ldev->error_task_list_lock); dev_info(ldev->dev, "bus rate = %lu\n", clk_get_rate(ldev->clk_bus)); dev_info(ldev->dev, "mod rate = %lu", clk_get_rate(ldev->clk_gpu)); return 0; err_out5: lima_fini_gp_pipe(ldev); err_out4: while (--i >= 0) lima_fini_ip(ldev, i); err_out3: if (ldev->dlbu_cpu) dma_free_wc(ldev->dev, LIMA_PAGE_SIZE, ldev->dlbu_cpu, ldev->dlbu_dma); err_out2: lima_vm_put(ldev->empty_vm); err_out1: lima_regulator_fini(ldev); err_out0: lima_clk_fini(ldev); return err; } void lima_device_fini(struct lima_device *ldev) { int i; struct lima_sched_error_task *et, *tmp; list_for_each_entry_safe(et, tmp, &ldev->error_task_list, list) { list_del(&et->list); kvfree(et); } mutex_destroy(&ldev->error_task_list_lock); lima_fini_pp_pipe(ldev); lima_fini_gp_pipe(ldev); for (i = lima_ip_num - 1; i >= 0; i--) lima_fini_ip(ldev, i); if (ldev->dlbu_cpu) dma_free_wc(ldev->dev, LIMA_PAGE_SIZE, ldev->dlbu_cpu, ldev->dlbu_dma); lima_vm_put(ldev->empty_vm); lima_regulator_fini(ldev); lima_clk_fini(ldev); } int lima_device_resume(struct device *dev) { struct lima_device *ldev = dev_get_drvdata(dev); int i, err; err = lima_clk_enable(ldev); if (err) { dev_err(dev, "resume clk fail %d\n", err); return err; } err = lima_regulator_enable(ldev); if (err) { dev_err(dev, "resume regulator fail %d\n", err); goto err_out0; } for (i = 0; i < lima_ip_num; i++) { err = lima_resume_ip(ldev, i); if (err) { dev_err(dev, "resume ip %d fail\n", i); goto err_out1; } } err = lima_devfreq_resume(&ldev->devfreq); if (err) { dev_err(dev, "devfreq resume fail\n"); goto err_out1; } return 0; err_out1: while (--i >= 0) lima_suspend_ip(ldev, i); lima_regulator_disable(ldev); err_out0: lima_clk_disable(ldev); return err; } int lima_device_suspend(struct device *dev) { struct lima_device *ldev = dev_get_drvdata(dev); int i, err; /* check any task running */ for (i = 0; i < lima_pipe_num; i++) { if (atomic_read(&ldev->pipe[i].base.hw_rq_count)) return -EBUSY; } err = lima_devfreq_suspend(&ldev->devfreq); if (err) { dev_err(dev, "devfreq suspend fail\n"); return err; } for (i = lima_ip_num - 1; i >= 0; i--) lima_suspend_ip(ldev, i); lima_regulator_disable(ldev); lima_clk_disable(ldev); return 0; }
linux-master
drivers/gpu/drm/lima/lima_device.c
// SPDX-License-Identifier: GPL-2.0 /* * Copyright 2020 Martin Blumenstingl <[email protected]> * * Based on panfrost_devfreq.c: * Copyright 2019 Collabora ltd. */ #include <linux/clk.h> #include <linux/devfreq.h> #include <linux/devfreq_cooling.h> #include <linux/device.h> #include <linux/platform_device.h> #include <linux/pm_opp.h> #include <linux/property.h> #include "lima_device.h" #include "lima_devfreq.h" static void lima_devfreq_update_utilization(struct lima_devfreq *devfreq) { ktime_t now, last; now = ktime_get(); last = devfreq->time_last_update; if (devfreq->busy_count > 0) devfreq->busy_time += ktime_sub(now, last); else devfreq->idle_time += ktime_sub(now, last); devfreq->time_last_update = now; } static int lima_devfreq_target(struct device *dev, unsigned long *freq, u32 flags) { struct dev_pm_opp *opp; opp = devfreq_recommended_opp(dev, freq, flags); if (IS_ERR(opp)) return PTR_ERR(opp); dev_pm_opp_put(opp); return dev_pm_opp_set_rate(dev, *freq); } static void lima_devfreq_reset(struct lima_devfreq *devfreq) { devfreq->busy_time = 0; devfreq->idle_time = 0; devfreq->time_last_update = ktime_get(); } static int lima_devfreq_get_dev_status(struct device *dev, struct devfreq_dev_status *status) { struct lima_device *ldev = dev_get_drvdata(dev); struct lima_devfreq *devfreq = &ldev->devfreq; unsigned long irqflags; status->current_frequency = clk_get_rate(ldev->clk_gpu); spin_lock_irqsave(&devfreq->lock, irqflags); lima_devfreq_update_utilization(devfreq); status->total_time = ktime_to_ns(ktime_add(devfreq->busy_time, devfreq->idle_time)); status->busy_time = ktime_to_ns(devfreq->busy_time); lima_devfreq_reset(devfreq); spin_unlock_irqrestore(&devfreq->lock, irqflags); dev_dbg(ldev->dev, "busy %lu total %lu %lu %% freq %lu MHz\n", status->busy_time, status->total_time, status->busy_time / (status->total_time / 100), status->current_frequency / 1000 / 1000); return 0; } static struct devfreq_dev_profile lima_devfreq_profile = { .timer = DEVFREQ_TIMER_DELAYED, .polling_ms = 50, /* ~3 frames */ .target = lima_devfreq_target, .get_dev_status = lima_devfreq_get_dev_status, }; void lima_devfreq_fini(struct lima_device *ldev) { struct lima_devfreq *devfreq = &ldev->devfreq; if (devfreq->cooling) { devfreq_cooling_unregister(devfreq->cooling); devfreq->cooling = NULL; } if (devfreq->devfreq) { devm_devfreq_remove_device(ldev->dev, devfreq->devfreq); devfreq->devfreq = NULL; } } int lima_devfreq_init(struct lima_device *ldev) { struct thermal_cooling_device *cooling; struct device *dev = ldev->dev; struct devfreq *devfreq; struct lima_devfreq *ldevfreq = &ldev->devfreq; struct dev_pm_opp *opp; unsigned long cur_freq; int ret; const char *regulator_names[] = { "mali", NULL }; if (!device_property_present(dev, "operating-points-v2")) /* Optional, continue without devfreq */ return 0; spin_lock_init(&ldevfreq->lock); /* * clkname is set separately so it is not affected by the optional * regulator setting which may return error. */ ret = devm_pm_opp_set_clkname(dev, "core"); if (ret) return ret; ret = devm_pm_opp_set_regulators(dev, regulator_names); if (ret) { /* Continue if the optional regulator is missing */ if (ret != -ENODEV) return ret; } ret = devm_pm_opp_of_add_table(dev); if (ret) return ret; lima_devfreq_reset(ldevfreq); cur_freq = clk_get_rate(ldev->clk_gpu); opp = devfreq_recommended_opp(dev, &cur_freq, 0); if (IS_ERR(opp)) return PTR_ERR(opp); lima_devfreq_profile.initial_freq = cur_freq; dev_pm_opp_put(opp); /* * Setup default thresholds for the simple_ondemand governor. * The values are chosen based on experiments. */ ldevfreq->gov_data.upthreshold = 30; ldevfreq->gov_data.downdifferential = 5; devfreq = devm_devfreq_add_device(dev, &lima_devfreq_profile, DEVFREQ_GOV_SIMPLE_ONDEMAND, &ldevfreq->gov_data); if (IS_ERR(devfreq)) { dev_err(dev, "Couldn't initialize GPU devfreq\n"); return PTR_ERR(devfreq); } ldevfreq->devfreq = devfreq; cooling = of_devfreq_cooling_register(dev->of_node, devfreq); if (IS_ERR(cooling)) dev_info(dev, "Failed to register cooling device\n"); else ldevfreq->cooling = cooling; return 0; } void lima_devfreq_record_busy(struct lima_devfreq *devfreq) { unsigned long irqflags; if (!devfreq->devfreq) return; spin_lock_irqsave(&devfreq->lock, irqflags); lima_devfreq_update_utilization(devfreq); devfreq->busy_count++; spin_unlock_irqrestore(&devfreq->lock, irqflags); } void lima_devfreq_record_idle(struct lima_devfreq *devfreq) { unsigned long irqflags; if (!devfreq->devfreq) return; spin_lock_irqsave(&devfreq->lock, irqflags); lima_devfreq_update_utilization(devfreq); WARN_ON(--devfreq->busy_count < 0); spin_unlock_irqrestore(&devfreq->lock, irqflags); } int lima_devfreq_resume(struct lima_devfreq *devfreq) { unsigned long irqflags; if (!devfreq->devfreq) return 0; spin_lock_irqsave(&devfreq->lock, irqflags); lima_devfreq_reset(devfreq); spin_unlock_irqrestore(&devfreq->lock, irqflags); return devfreq_resume_device(devfreq->devfreq); } int lima_devfreq_suspend(struct lima_devfreq *devfreq) { if (!devfreq->devfreq) return 0; return devfreq_suspend_device(devfreq->devfreq); }
linux-master
drivers/gpu/drm/lima/lima_devfreq.c
// SPDX-License-Identifier: GPL-2.0 OR MIT /* Copyright 2017-2019 Qiang Yu <[email protected]> */ #include <linux/slab.h> #include <linux/dma-mapping.h> #include "lima_device.h" #include "lima_vm.h" #include "lima_gem.h" #include "lima_regs.h" struct lima_bo_va { struct list_head list; unsigned int ref_count; struct drm_mm_node node; struct lima_vm *vm; }; #define LIMA_VM_PD_SHIFT 22 #define LIMA_VM_PT_SHIFT 12 #define LIMA_VM_PB_SHIFT (LIMA_VM_PD_SHIFT + LIMA_VM_NUM_PT_PER_BT_SHIFT) #define LIMA_VM_BT_SHIFT LIMA_VM_PT_SHIFT #define LIMA_VM_PT_MASK ((1 << LIMA_VM_PD_SHIFT) - 1) #define LIMA_VM_BT_MASK ((1 << LIMA_VM_PB_SHIFT) - 1) #define LIMA_PDE(va) (va >> LIMA_VM_PD_SHIFT) #define LIMA_PTE(va) ((va & LIMA_VM_PT_MASK) >> LIMA_VM_PT_SHIFT) #define LIMA_PBE(va) (va >> LIMA_VM_PB_SHIFT) #define LIMA_BTE(va) ((va & LIMA_VM_BT_MASK) >> LIMA_VM_BT_SHIFT) static void lima_vm_unmap_range(struct lima_vm *vm, u32 start, u32 end) { u32 addr; for (addr = start; addr <= end; addr += LIMA_PAGE_SIZE) { u32 pbe = LIMA_PBE(addr); u32 bte = LIMA_BTE(addr); vm->bts[pbe].cpu[bte] = 0; } } static int lima_vm_map_page(struct lima_vm *vm, dma_addr_t pa, u32 va) { u32 pbe = LIMA_PBE(va); u32 bte = LIMA_BTE(va); if (!vm->bts[pbe].cpu) { dma_addr_t pts; u32 *pd; int j; vm->bts[pbe].cpu = dma_alloc_wc( vm->dev->dev, LIMA_PAGE_SIZE << LIMA_VM_NUM_PT_PER_BT_SHIFT, &vm->bts[pbe].dma, GFP_KERNEL | __GFP_NOWARN | __GFP_ZERO); if (!vm->bts[pbe].cpu) return -ENOMEM; pts = vm->bts[pbe].dma; pd = vm->pd.cpu + (pbe << LIMA_VM_NUM_PT_PER_BT_SHIFT); for (j = 0; j < LIMA_VM_NUM_PT_PER_BT; j++) { pd[j] = pts | LIMA_VM_FLAG_PRESENT; pts += LIMA_PAGE_SIZE; } } vm->bts[pbe].cpu[bte] = pa | LIMA_VM_FLAGS_CACHE; return 0; } static struct lima_bo_va * lima_vm_bo_find(struct lima_vm *vm, struct lima_bo *bo) { struct lima_bo_va *bo_va, *ret = NULL; list_for_each_entry(bo_va, &bo->va, list) { if (bo_va->vm == vm) { ret = bo_va; break; } } return ret; } int lima_vm_bo_add(struct lima_vm *vm, struct lima_bo *bo, bool create) { struct lima_bo_va *bo_va; struct sg_dma_page_iter sg_iter; int offset = 0, err; mutex_lock(&bo->lock); bo_va = lima_vm_bo_find(vm, bo); if (bo_va) { bo_va->ref_count++; mutex_unlock(&bo->lock); return 0; } /* should not create new bo_va if not asked by caller */ if (!create) { mutex_unlock(&bo->lock); return -ENOENT; } bo_va = kzalloc(sizeof(*bo_va), GFP_KERNEL); if (!bo_va) { err = -ENOMEM; goto err_out0; } bo_va->vm = vm; bo_va->ref_count = 1; mutex_lock(&vm->lock); err = drm_mm_insert_node(&vm->mm, &bo_va->node, lima_bo_size(bo)); if (err) goto err_out1; for_each_sgtable_dma_page(bo->base.sgt, &sg_iter, 0) { err = lima_vm_map_page(vm, sg_page_iter_dma_address(&sg_iter), bo_va->node.start + offset); if (err) goto err_out2; offset += PAGE_SIZE; } mutex_unlock(&vm->lock); list_add_tail(&bo_va->list, &bo->va); mutex_unlock(&bo->lock); return 0; err_out2: if (offset) lima_vm_unmap_range(vm, bo_va->node.start, bo_va->node.start + offset - 1); drm_mm_remove_node(&bo_va->node); err_out1: mutex_unlock(&vm->lock); kfree(bo_va); err_out0: mutex_unlock(&bo->lock); return err; } void lima_vm_bo_del(struct lima_vm *vm, struct lima_bo *bo) { struct lima_bo_va *bo_va; u32 size; mutex_lock(&bo->lock); bo_va = lima_vm_bo_find(vm, bo); if (--bo_va->ref_count > 0) { mutex_unlock(&bo->lock); return; } mutex_lock(&vm->lock); size = bo->heap_size ? bo->heap_size : bo_va->node.size; lima_vm_unmap_range(vm, bo_va->node.start, bo_va->node.start + size - 1); drm_mm_remove_node(&bo_va->node); mutex_unlock(&vm->lock); list_del(&bo_va->list); mutex_unlock(&bo->lock); kfree(bo_va); } u32 lima_vm_get_va(struct lima_vm *vm, struct lima_bo *bo) { struct lima_bo_va *bo_va; u32 ret; mutex_lock(&bo->lock); bo_va = lima_vm_bo_find(vm, bo); ret = bo_va->node.start; mutex_unlock(&bo->lock); return ret; } struct lima_vm *lima_vm_create(struct lima_device *dev) { struct lima_vm *vm; vm = kzalloc(sizeof(*vm), GFP_KERNEL); if (!vm) return NULL; vm->dev = dev; mutex_init(&vm->lock); kref_init(&vm->refcount); vm->pd.cpu = dma_alloc_wc(dev->dev, LIMA_PAGE_SIZE, &vm->pd.dma, GFP_KERNEL | __GFP_NOWARN | __GFP_ZERO); if (!vm->pd.cpu) goto err_out0; if (dev->dlbu_cpu) { int err = lima_vm_map_page( vm, dev->dlbu_dma, LIMA_VA_RESERVE_DLBU); if (err) goto err_out1; } drm_mm_init(&vm->mm, dev->va_start, dev->va_end - dev->va_start); return vm; err_out1: dma_free_wc(dev->dev, LIMA_PAGE_SIZE, vm->pd.cpu, vm->pd.dma); err_out0: kfree(vm); return NULL; } void lima_vm_release(struct kref *kref) { struct lima_vm *vm = container_of(kref, struct lima_vm, refcount); int i; drm_mm_takedown(&vm->mm); for (i = 0; i < LIMA_VM_NUM_BT; i++) { if (vm->bts[i].cpu) dma_free_wc(vm->dev->dev, LIMA_PAGE_SIZE << LIMA_VM_NUM_PT_PER_BT_SHIFT, vm->bts[i].cpu, vm->bts[i].dma); } if (vm->pd.cpu) dma_free_wc(vm->dev->dev, LIMA_PAGE_SIZE, vm->pd.cpu, vm->pd.dma); kfree(vm); } void lima_vm_print(struct lima_vm *vm) { int i, j, k; u32 *pd, *pt; if (!vm->pd.cpu) return; pd = vm->pd.cpu; for (i = 0; i < LIMA_VM_NUM_BT; i++) { if (!vm->bts[i].cpu) continue; pt = vm->bts[i].cpu; for (j = 0; j < LIMA_VM_NUM_PT_PER_BT; j++) { int idx = (i << LIMA_VM_NUM_PT_PER_BT_SHIFT) + j; printk(KERN_INFO "lima vm pd %03x:%08x\n", idx, pd[idx]); for (k = 0; k < LIMA_PAGE_ENT_NUM; k++) { u32 pte = *pt++; if (pte) printk(KERN_INFO " pt %03x:%08x\n", k, pte); } } } } int lima_vm_map_bo(struct lima_vm *vm, struct lima_bo *bo, int pageoff) { struct lima_bo_va *bo_va; struct sg_dma_page_iter sg_iter; int offset = 0, err; u32 base; mutex_lock(&bo->lock); bo_va = lima_vm_bo_find(vm, bo); if (!bo_va) { err = -ENOENT; goto err_out0; } mutex_lock(&vm->lock); base = bo_va->node.start + (pageoff << PAGE_SHIFT); for_each_sgtable_dma_page(bo->base.sgt, &sg_iter, pageoff) { err = lima_vm_map_page(vm, sg_page_iter_dma_address(&sg_iter), base + offset); if (err) goto err_out1; offset += PAGE_SIZE; } mutex_unlock(&vm->lock); mutex_unlock(&bo->lock); return 0; err_out1: if (offset) lima_vm_unmap_range(vm, base, base + offset - 1); mutex_unlock(&vm->lock); err_out0: mutex_unlock(&bo->lock); return err; }
linux-master
drivers/gpu/drm/lima/lima_vm.c
// SPDX-License-Identifier: GPL-2.0 OR MIT /* Copyright 2020 Qiang Yu <[email protected]> */ #include "lima_sched.h" #define CREATE_TRACE_POINTS #include "lima_trace.h"
linux-master
drivers/gpu/drm/lima/lima_trace.c
// SPDX-License-Identifier: GPL-2.0 OR MIT /* Copyright 2017-2019 Qiang Yu <[email protected]> */ #include <linux/iosys-map.h> #include <linux/kthread.h> #include <linux/slab.h> #include <linux/vmalloc.h> #include <linux/pm_runtime.h> #include "lima_devfreq.h" #include "lima_drv.h" #include "lima_sched.h" #include "lima_vm.h" #include "lima_mmu.h" #include "lima_l2_cache.h" #include "lima_gem.h" #include "lima_trace.h" struct lima_fence { struct dma_fence base; struct lima_sched_pipe *pipe; }; static struct kmem_cache *lima_fence_slab; static int lima_fence_slab_refcnt; int lima_sched_slab_init(void) { if (!lima_fence_slab) { lima_fence_slab = kmem_cache_create( "lima_fence", sizeof(struct lima_fence), 0, SLAB_HWCACHE_ALIGN, NULL); if (!lima_fence_slab) return -ENOMEM; } lima_fence_slab_refcnt++; return 0; } void lima_sched_slab_fini(void) { if (!--lima_fence_slab_refcnt) { kmem_cache_destroy(lima_fence_slab); lima_fence_slab = NULL; } } static inline struct lima_fence *to_lima_fence(struct dma_fence *fence) { return container_of(fence, struct lima_fence, base); } static const char *lima_fence_get_driver_name(struct dma_fence *fence) { return "lima"; } static const char *lima_fence_get_timeline_name(struct dma_fence *fence) { struct lima_fence *f = to_lima_fence(fence); return f->pipe->base.name; } static void lima_fence_release_rcu(struct rcu_head *rcu) { struct dma_fence *f = container_of(rcu, struct dma_fence, rcu); struct lima_fence *fence = to_lima_fence(f); kmem_cache_free(lima_fence_slab, fence); } static void lima_fence_release(struct dma_fence *fence) { struct lima_fence *f = to_lima_fence(fence); call_rcu(&f->base.rcu, lima_fence_release_rcu); } static const struct dma_fence_ops lima_fence_ops = { .get_driver_name = lima_fence_get_driver_name, .get_timeline_name = lima_fence_get_timeline_name, .release = lima_fence_release, }; static struct lima_fence *lima_fence_create(struct lima_sched_pipe *pipe) { struct lima_fence *fence; fence = kmem_cache_zalloc(lima_fence_slab, GFP_KERNEL); if (!fence) return NULL; fence->pipe = pipe; dma_fence_init(&fence->base, &lima_fence_ops, &pipe->fence_lock, pipe->fence_context, ++pipe->fence_seqno); return fence; } static inline struct lima_sched_task *to_lima_task(struct drm_sched_job *job) { return container_of(job, struct lima_sched_task, base); } static inline struct lima_sched_pipe *to_lima_pipe(struct drm_gpu_scheduler *sched) { return container_of(sched, struct lima_sched_pipe, base); } int lima_sched_task_init(struct lima_sched_task *task, struct lima_sched_context *context, struct lima_bo **bos, int num_bos, struct lima_vm *vm) { int err, i; task->bos = kmemdup(bos, sizeof(*bos) * num_bos, GFP_KERNEL); if (!task->bos) return -ENOMEM; for (i = 0; i < num_bos; i++) drm_gem_object_get(&bos[i]->base.base); err = drm_sched_job_init(&task->base, &context->base, vm); if (err) { kfree(task->bos); return err; } drm_sched_job_arm(&task->base); task->num_bos = num_bos; task->vm = lima_vm_get(vm); return 0; } void lima_sched_task_fini(struct lima_sched_task *task) { int i; drm_sched_job_cleanup(&task->base); if (task->bos) { for (i = 0; i < task->num_bos; i++) drm_gem_object_put(&task->bos[i]->base.base); kfree(task->bos); } lima_vm_put(task->vm); } int lima_sched_context_init(struct lima_sched_pipe *pipe, struct lima_sched_context *context, atomic_t *guilty) { struct drm_gpu_scheduler *sched = &pipe->base; return drm_sched_entity_init(&context->base, DRM_SCHED_PRIORITY_NORMAL, &sched, 1, guilty); } void lima_sched_context_fini(struct lima_sched_pipe *pipe, struct lima_sched_context *context) { drm_sched_entity_destroy(&context->base); } struct dma_fence *lima_sched_context_queue_task(struct lima_sched_task *task) { struct dma_fence *fence = dma_fence_get(&task->base.s_fence->finished); trace_lima_task_submit(task); drm_sched_entity_push_job(&task->base); return fence; } static int lima_pm_busy(struct lima_device *ldev) { int ret; /* resume GPU if it has been suspended by runtime PM */ ret = pm_runtime_resume_and_get(ldev->dev); if (ret < 0) return ret; lima_devfreq_record_busy(&ldev->devfreq); return 0; } static void lima_pm_idle(struct lima_device *ldev) { lima_devfreq_record_idle(&ldev->devfreq); /* GPU can do auto runtime suspend */ pm_runtime_mark_last_busy(ldev->dev); pm_runtime_put_autosuspend(ldev->dev); } static struct dma_fence *lima_sched_run_job(struct drm_sched_job *job) { struct lima_sched_task *task = to_lima_task(job); struct lima_sched_pipe *pipe = to_lima_pipe(job->sched); struct lima_device *ldev = pipe->ldev; struct lima_fence *fence; int i, err; /* after GPU reset */ if (job->s_fence->finished.error < 0) return NULL; fence = lima_fence_create(pipe); if (!fence) return NULL; err = lima_pm_busy(ldev); if (err < 0) { dma_fence_put(&fence->base); return NULL; } task->fence = &fence->base; /* for caller usage of the fence, otherwise irq handler * may consume the fence before caller use it */ dma_fence_get(task->fence); pipe->current_task = task; /* this is needed for MMU to work correctly, otherwise GP/PP * will hang or page fault for unknown reason after running for * a while. * * Need to investigate: * 1. is it related to TLB * 2. how much performance will be affected by L2 cache flush * 3. can we reduce the calling of this function because all * GP/PP use the same L2 cache on mali400 * * TODO: * 1. move this to task fini to save some wait time? * 2. when GP/PP use different l2 cache, need PP wait GP l2 * cache flush? */ for (i = 0; i < pipe->num_l2_cache; i++) lima_l2_cache_flush(pipe->l2_cache[i]); lima_vm_put(pipe->current_vm); pipe->current_vm = lima_vm_get(task->vm); if (pipe->bcast_mmu) lima_mmu_switch_vm(pipe->bcast_mmu, pipe->current_vm); else { for (i = 0; i < pipe->num_mmu; i++) lima_mmu_switch_vm(pipe->mmu[i], pipe->current_vm); } trace_lima_task_run(task); pipe->error = false; pipe->task_run(pipe, task); return task->fence; } static void lima_sched_build_error_task_list(struct lima_sched_task *task) { struct lima_sched_error_task *et; struct lima_sched_pipe *pipe = to_lima_pipe(task->base.sched); struct lima_ip *ip = pipe->processor[0]; int pipe_id = ip->id == lima_ip_gp ? lima_pipe_gp : lima_pipe_pp; struct lima_device *dev = ip->dev; struct lima_sched_context *sched_ctx = container_of(task->base.entity, struct lima_sched_context, base); struct lima_ctx *ctx = container_of(sched_ctx, struct lima_ctx, context[pipe_id]); struct lima_dump_task *dt; struct lima_dump_chunk *chunk; struct lima_dump_chunk_pid *pid_chunk; struct lima_dump_chunk_buffer *buffer_chunk; u32 size, task_size, mem_size; int i; struct iosys_map map; int ret; mutex_lock(&dev->error_task_list_lock); if (dev->dump.num_tasks >= lima_max_error_tasks) { dev_info(dev->dev, "fail to save task state from %s pid %d: " "error task list is full\n", ctx->pname, ctx->pid); goto out; } /* frame chunk */ size = sizeof(struct lima_dump_chunk) + pipe->frame_size; /* process name chunk */ size += sizeof(struct lima_dump_chunk) + sizeof(ctx->pname); /* pid chunk */ size += sizeof(struct lima_dump_chunk); /* buffer chunks */ for (i = 0; i < task->num_bos; i++) { struct lima_bo *bo = task->bos[i]; size += sizeof(struct lima_dump_chunk); size += bo->heap_size ? bo->heap_size : lima_bo_size(bo); } task_size = size + sizeof(struct lima_dump_task); mem_size = task_size + sizeof(*et); et = kvmalloc(mem_size, GFP_KERNEL); if (!et) { dev_err(dev->dev, "fail to alloc task dump buffer of size %x\n", mem_size); goto out; } et->data = et + 1; et->size = task_size; dt = et->data; memset(dt, 0, sizeof(*dt)); dt->id = pipe_id; dt->size = size; chunk = (struct lima_dump_chunk *)(dt + 1); memset(chunk, 0, sizeof(*chunk)); chunk->id = LIMA_DUMP_CHUNK_FRAME; chunk->size = pipe->frame_size; memcpy(chunk + 1, task->frame, pipe->frame_size); dt->num_chunks++; chunk = (void *)(chunk + 1) + chunk->size; memset(chunk, 0, sizeof(*chunk)); chunk->id = LIMA_DUMP_CHUNK_PROCESS_NAME; chunk->size = sizeof(ctx->pname); memcpy(chunk + 1, ctx->pname, sizeof(ctx->pname)); dt->num_chunks++; pid_chunk = (void *)(chunk + 1) + chunk->size; memset(pid_chunk, 0, sizeof(*pid_chunk)); pid_chunk->id = LIMA_DUMP_CHUNK_PROCESS_ID; pid_chunk->pid = ctx->pid; dt->num_chunks++; buffer_chunk = (void *)(pid_chunk + 1) + pid_chunk->size; for (i = 0; i < task->num_bos; i++) { struct lima_bo *bo = task->bos[i]; void *data; memset(buffer_chunk, 0, sizeof(*buffer_chunk)); buffer_chunk->id = LIMA_DUMP_CHUNK_BUFFER; buffer_chunk->va = lima_vm_get_va(task->vm, bo); if (bo->heap_size) { buffer_chunk->size = bo->heap_size; data = vmap(bo->base.pages, bo->heap_size >> PAGE_SHIFT, VM_MAP, pgprot_writecombine(PAGE_KERNEL)); if (!data) { kvfree(et); goto out; } memcpy(buffer_chunk + 1, data, buffer_chunk->size); vunmap(data); } else { buffer_chunk->size = lima_bo_size(bo); ret = drm_gem_vmap_unlocked(&bo->base.base, &map); if (ret) { kvfree(et); goto out; } memcpy(buffer_chunk + 1, map.vaddr, buffer_chunk->size); drm_gem_vunmap_unlocked(&bo->base.base, &map); } buffer_chunk = (void *)(buffer_chunk + 1) + buffer_chunk->size; dt->num_chunks++; } list_add(&et->list, &dev->error_task_list); dev->dump.size += et->size; dev->dump.num_tasks++; dev_info(dev->dev, "save error task state success\n"); out: mutex_unlock(&dev->error_task_list_lock); } static enum drm_gpu_sched_stat lima_sched_timedout_job(struct drm_sched_job *job) { struct lima_sched_pipe *pipe = to_lima_pipe(job->sched); struct lima_sched_task *task = to_lima_task(job); struct lima_device *ldev = pipe->ldev; if (!pipe->error) DRM_ERROR("lima job timeout\n"); drm_sched_stop(&pipe->base, &task->base); drm_sched_increase_karma(&task->base); if (lima_max_error_tasks) lima_sched_build_error_task_list(task); pipe->task_error(pipe); if (pipe->bcast_mmu) lima_mmu_page_fault_resume(pipe->bcast_mmu); else { int i; for (i = 0; i < pipe->num_mmu; i++) lima_mmu_page_fault_resume(pipe->mmu[i]); } lima_vm_put(pipe->current_vm); pipe->current_vm = NULL; pipe->current_task = NULL; lima_pm_idle(ldev); drm_sched_resubmit_jobs(&pipe->base); drm_sched_start(&pipe->base, true); return DRM_GPU_SCHED_STAT_NOMINAL; } static void lima_sched_free_job(struct drm_sched_job *job) { struct lima_sched_task *task = to_lima_task(job); struct lima_sched_pipe *pipe = to_lima_pipe(job->sched); struct lima_vm *vm = task->vm; struct lima_bo **bos = task->bos; int i; dma_fence_put(task->fence); for (i = 0; i < task->num_bos; i++) lima_vm_bo_del(vm, bos[i]); lima_sched_task_fini(task); kmem_cache_free(pipe->task_slab, task); } static const struct drm_sched_backend_ops lima_sched_ops = { .run_job = lima_sched_run_job, .timedout_job = lima_sched_timedout_job, .free_job = lima_sched_free_job, }; static void lima_sched_recover_work(struct work_struct *work) { struct lima_sched_pipe *pipe = container_of(work, struct lima_sched_pipe, recover_work); int i; for (i = 0; i < pipe->num_l2_cache; i++) lima_l2_cache_flush(pipe->l2_cache[i]); if (pipe->bcast_mmu) { lima_mmu_flush_tlb(pipe->bcast_mmu); } else { for (i = 0; i < pipe->num_mmu; i++) lima_mmu_flush_tlb(pipe->mmu[i]); } if (pipe->task_recover(pipe)) drm_sched_fault(&pipe->base); } int lima_sched_pipe_init(struct lima_sched_pipe *pipe, const char *name) { unsigned int timeout = lima_sched_timeout_ms > 0 ? lima_sched_timeout_ms : 500; pipe->fence_context = dma_fence_context_alloc(1); spin_lock_init(&pipe->fence_lock); INIT_WORK(&pipe->recover_work, lima_sched_recover_work); return drm_sched_init(&pipe->base, &lima_sched_ops, 1, lima_job_hang_limit, msecs_to_jiffies(timeout), NULL, NULL, name, pipe->ldev->dev); } void lima_sched_pipe_fini(struct lima_sched_pipe *pipe) { drm_sched_fini(&pipe->base); } void lima_sched_pipe_task_done(struct lima_sched_pipe *pipe) { struct lima_sched_task *task = pipe->current_task; struct lima_device *ldev = pipe->ldev; if (pipe->error) { if (task && task->recoverable) schedule_work(&pipe->recover_work); else drm_sched_fault(&pipe->base); } else { pipe->task_fini(pipe); dma_fence_signal(task->fence); lima_pm_idle(ldev); } }
linux-master
drivers/gpu/drm/lima/lima_sched.c
// SPDX-License-Identifier: GPL-2.0 OR MIT /* Copyright 2017-2019 Qiang Yu <[email protected]> */ #include <linux/module.h> #include <linux/of.h> #include <linux/platform_device.h> #include <linux/uaccess.h> #include <linux/slab.h> #include <linux/pm_runtime.h> #include <drm/drm_ioctl.h> #include <drm/drm_drv.h> #include <drm/drm_prime.h> #include <drm/lima_drm.h> #include "lima_device.h" #include "lima_drv.h" #include "lima_gem.h" #include "lima_vm.h" int lima_sched_timeout_ms; uint lima_heap_init_nr_pages = 8; uint lima_max_error_tasks; uint lima_job_hang_limit; MODULE_PARM_DESC(sched_timeout_ms, "task run timeout in ms"); module_param_named(sched_timeout_ms, lima_sched_timeout_ms, int, 0444); MODULE_PARM_DESC(heap_init_nr_pages, "heap buffer init number of pages"); module_param_named(heap_init_nr_pages, lima_heap_init_nr_pages, uint, 0444); MODULE_PARM_DESC(max_error_tasks, "max number of error tasks to save"); module_param_named(max_error_tasks, lima_max_error_tasks, uint, 0644); MODULE_PARM_DESC(job_hang_limit, "number of times to allow a job to hang before dropping it (default 0)"); module_param_named(job_hang_limit, lima_job_hang_limit, uint, 0444); static int lima_ioctl_get_param(struct drm_device *dev, void *data, struct drm_file *file) { struct drm_lima_get_param *args = data; struct lima_device *ldev = to_lima_dev(dev); if (args->pad) return -EINVAL; switch (args->param) { case DRM_LIMA_PARAM_GPU_ID: switch (ldev->id) { case lima_gpu_mali400: args->value = DRM_LIMA_PARAM_GPU_ID_MALI400; break; case lima_gpu_mali450: args->value = DRM_LIMA_PARAM_GPU_ID_MALI450; break; default: args->value = DRM_LIMA_PARAM_GPU_ID_UNKNOWN; break; } break; case DRM_LIMA_PARAM_NUM_PP: args->value = ldev->pipe[lima_pipe_pp].num_processor; break; case DRM_LIMA_PARAM_GP_VERSION: args->value = ldev->gp_version; break; case DRM_LIMA_PARAM_PP_VERSION: args->value = ldev->pp_version; break; default: return -EINVAL; } return 0; } static int lima_ioctl_gem_create(struct drm_device *dev, void *data, struct drm_file *file) { struct drm_lima_gem_create *args = data; if (args->pad) return -EINVAL; if (args->flags & ~(LIMA_BO_FLAG_HEAP)) return -EINVAL; if (args->size == 0) return -EINVAL; return lima_gem_create_handle(dev, file, args->size, args->flags, &args->handle); } static int lima_ioctl_gem_info(struct drm_device *dev, void *data, struct drm_file *file) { struct drm_lima_gem_info *args = data; return lima_gem_get_info(file, args->handle, &args->va, &args->offset); } static int lima_ioctl_gem_submit(struct drm_device *dev, void *data, struct drm_file *file) { struct drm_lima_gem_submit *args = data; struct lima_device *ldev = to_lima_dev(dev); struct lima_drm_priv *priv = file->driver_priv; struct drm_lima_gem_submit_bo *bos; struct lima_sched_pipe *pipe; struct lima_sched_task *task; struct lima_ctx *ctx; struct lima_submit submit = {0}; size_t size; int err = 0; if (args->pipe >= lima_pipe_num || args->nr_bos == 0) return -EINVAL; if (args->flags & ~(LIMA_SUBMIT_FLAG_EXPLICIT_FENCE)) return -EINVAL; pipe = ldev->pipe + args->pipe; if (args->frame_size != pipe->frame_size) return -EINVAL; bos = kvcalloc(args->nr_bos, sizeof(*submit.bos) + sizeof(*submit.lbos), GFP_KERNEL); if (!bos) return -ENOMEM; size = args->nr_bos * sizeof(*submit.bos); if (copy_from_user(bos, u64_to_user_ptr(args->bos), size)) { err = -EFAULT; goto out0; } task = kmem_cache_zalloc(pipe->task_slab, GFP_KERNEL); if (!task) { err = -ENOMEM; goto out0; } task->frame = task + 1; if (copy_from_user(task->frame, u64_to_user_ptr(args->frame), args->frame_size)) { err = -EFAULT; goto out1; } err = pipe->task_validate(pipe, task); if (err) goto out1; ctx = lima_ctx_get(&priv->ctx_mgr, args->ctx); if (!ctx) { err = -ENOENT; goto out1; } submit.pipe = args->pipe; submit.bos = bos; submit.lbos = (void *)bos + size; submit.nr_bos = args->nr_bos; submit.task = task; submit.ctx = ctx; submit.flags = args->flags; submit.in_sync[0] = args->in_sync[0]; submit.in_sync[1] = args->in_sync[1]; submit.out_sync = args->out_sync; err = lima_gem_submit(file, &submit); lima_ctx_put(ctx); out1: if (err) kmem_cache_free(pipe->task_slab, task); out0: kvfree(bos); return err; } static int lima_ioctl_gem_wait(struct drm_device *dev, void *data, struct drm_file *file) { struct drm_lima_gem_wait *args = data; if (args->op & ~(LIMA_GEM_WAIT_READ|LIMA_GEM_WAIT_WRITE)) return -EINVAL; return lima_gem_wait(file, args->handle, args->op, args->timeout_ns); } static int lima_ioctl_ctx_create(struct drm_device *dev, void *data, struct drm_file *file) { struct drm_lima_ctx_create *args = data; struct lima_drm_priv *priv = file->driver_priv; struct lima_device *ldev = to_lima_dev(dev); if (args->_pad) return -EINVAL; return lima_ctx_create(ldev, &priv->ctx_mgr, &args->id); } static int lima_ioctl_ctx_free(struct drm_device *dev, void *data, struct drm_file *file) { struct drm_lima_ctx_create *args = data; struct lima_drm_priv *priv = file->driver_priv; if (args->_pad) return -EINVAL; return lima_ctx_free(&priv->ctx_mgr, args->id); } static int lima_drm_driver_open(struct drm_device *dev, struct drm_file *file) { int err; struct lima_drm_priv *priv; struct lima_device *ldev = to_lima_dev(dev); priv = kzalloc(sizeof(*priv), GFP_KERNEL); if (!priv) return -ENOMEM; priv->vm = lima_vm_create(ldev); if (!priv->vm) { err = -ENOMEM; goto err_out0; } lima_ctx_mgr_init(&priv->ctx_mgr); file->driver_priv = priv; return 0; err_out0: kfree(priv); return err; } static void lima_drm_driver_postclose(struct drm_device *dev, struct drm_file *file) { struct lima_drm_priv *priv = file->driver_priv; lima_ctx_mgr_fini(&priv->ctx_mgr); lima_vm_put(priv->vm); kfree(priv); } static const struct drm_ioctl_desc lima_drm_driver_ioctls[] = { DRM_IOCTL_DEF_DRV(LIMA_GET_PARAM, lima_ioctl_get_param, DRM_RENDER_ALLOW), DRM_IOCTL_DEF_DRV(LIMA_GEM_CREATE, lima_ioctl_gem_create, DRM_RENDER_ALLOW), DRM_IOCTL_DEF_DRV(LIMA_GEM_INFO, lima_ioctl_gem_info, DRM_RENDER_ALLOW), DRM_IOCTL_DEF_DRV(LIMA_GEM_SUBMIT, lima_ioctl_gem_submit, DRM_RENDER_ALLOW), DRM_IOCTL_DEF_DRV(LIMA_GEM_WAIT, lima_ioctl_gem_wait, DRM_RENDER_ALLOW), DRM_IOCTL_DEF_DRV(LIMA_CTX_CREATE, lima_ioctl_ctx_create, DRM_RENDER_ALLOW), DRM_IOCTL_DEF_DRV(LIMA_CTX_FREE, lima_ioctl_ctx_free, DRM_RENDER_ALLOW), }; DEFINE_DRM_GEM_FOPS(lima_drm_driver_fops); /* * Changelog: * * - 1.1.0 - add heap buffer support */ static const struct drm_driver lima_drm_driver = { .driver_features = DRIVER_RENDER | DRIVER_GEM | DRIVER_SYNCOBJ, .open = lima_drm_driver_open, .postclose = lima_drm_driver_postclose, .ioctls = lima_drm_driver_ioctls, .num_ioctls = ARRAY_SIZE(lima_drm_driver_ioctls), .fops = &lima_drm_driver_fops, .name = "lima", .desc = "lima DRM", .date = "20191231", .major = 1, .minor = 1, .patchlevel = 0, .gem_create_object = lima_gem_create_object, .gem_prime_import_sg_table = drm_gem_shmem_prime_import_sg_table, }; struct lima_block_reader { void *dst; size_t base; size_t count; size_t off; ssize_t read; }; static bool lima_read_block(struct lima_block_reader *reader, void *src, size_t src_size) { size_t max_off = reader->base + src_size; if (reader->off < max_off) { size_t size = min_t(size_t, max_off - reader->off, reader->count); memcpy(reader->dst, src + (reader->off - reader->base), size); reader->dst += size; reader->off += size; reader->read += size; reader->count -= size; } reader->base = max_off; return !!reader->count; } static ssize_t lima_error_state_read(struct file *filp, struct kobject *kobj, struct bin_attribute *attr, char *buf, loff_t off, size_t count) { struct device *dev = kobj_to_dev(kobj); struct lima_device *ldev = dev_get_drvdata(dev); struct lima_sched_error_task *et; struct lima_block_reader reader = { .dst = buf, .count = count, .off = off, }; mutex_lock(&ldev->error_task_list_lock); if (lima_read_block(&reader, &ldev->dump, sizeof(ldev->dump))) { list_for_each_entry(et, &ldev->error_task_list, list) { if (!lima_read_block(&reader, et->data, et->size)) break; } } mutex_unlock(&ldev->error_task_list_lock); return reader.read; } static ssize_t lima_error_state_write(struct file *file, struct kobject *kobj, struct bin_attribute *attr, char *buf, loff_t off, size_t count) { struct device *dev = kobj_to_dev(kobj); struct lima_device *ldev = dev_get_drvdata(dev); struct lima_sched_error_task *et, *tmp; mutex_lock(&ldev->error_task_list_lock); list_for_each_entry_safe(et, tmp, &ldev->error_task_list, list) { list_del(&et->list); kvfree(et); } ldev->dump.size = 0; ldev->dump.num_tasks = 0; mutex_unlock(&ldev->error_task_list_lock); return count; } static const struct bin_attribute lima_error_state_attr = { .attr.name = "error", .attr.mode = 0600, .size = 0, .read = lima_error_state_read, .write = lima_error_state_write, }; static int lima_pdev_probe(struct platform_device *pdev) { struct lima_device *ldev; struct drm_device *ddev; int err; err = lima_sched_slab_init(); if (err) return err; ldev = devm_kzalloc(&pdev->dev, sizeof(*ldev), GFP_KERNEL); if (!ldev) { err = -ENOMEM; goto err_out0; } ldev->dev = &pdev->dev; ldev->id = (enum lima_gpu_id)of_device_get_match_data(&pdev->dev); platform_set_drvdata(pdev, ldev); /* Allocate and initialize the DRM device. */ ddev = drm_dev_alloc(&lima_drm_driver, &pdev->dev); if (IS_ERR(ddev)) { err = PTR_ERR(ddev); goto err_out0; } ddev->dev_private = ldev; ldev->ddev = ddev; err = lima_device_init(ldev); if (err) goto err_out1; err = lima_devfreq_init(ldev); if (err) { dev_err(&pdev->dev, "Fatal error during devfreq init\n"); goto err_out2; } pm_runtime_set_active(ldev->dev); pm_runtime_mark_last_busy(ldev->dev); pm_runtime_set_autosuspend_delay(ldev->dev, 200); pm_runtime_use_autosuspend(ldev->dev); pm_runtime_enable(ldev->dev); /* * Register the DRM device with the core and the connectors with * sysfs. */ err = drm_dev_register(ddev, 0); if (err < 0) goto err_out3; if (sysfs_create_bin_file(&ldev->dev->kobj, &lima_error_state_attr)) dev_warn(ldev->dev, "fail to create error state sysfs\n"); return 0; err_out3: pm_runtime_disable(ldev->dev); lima_devfreq_fini(ldev); err_out2: lima_device_fini(ldev); err_out1: drm_dev_put(ddev); err_out0: lima_sched_slab_fini(); return err; } static void lima_pdev_remove(struct platform_device *pdev) { struct lima_device *ldev = platform_get_drvdata(pdev); struct drm_device *ddev = ldev->ddev; sysfs_remove_bin_file(&ldev->dev->kobj, &lima_error_state_attr); drm_dev_unregister(ddev); /* stop autosuspend to make sure device is in active state */ pm_runtime_set_autosuspend_delay(ldev->dev, -1); pm_runtime_disable(ldev->dev); lima_devfreq_fini(ldev); lima_device_fini(ldev); drm_dev_put(ddev); lima_sched_slab_fini(); } static const struct of_device_id dt_match[] = { { .compatible = "arm,mali-400", .data = (void *)lima_gpu_mali400 }, { .compatible = "arm,mali-450", .data = (void *)lima_gpu_mali450 }, {} }; MODULE_DEVICE_TABLE(of, dt_match); static const struct dev_pm_ops lima_pm_ops = { SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend, pm_runtime_force_resume) SET_RUNTIME_PM_OPS(lima_device_suspend, lima_device_resume, NULL) }; static struct platform_driver lima_platform_driver = { .probe = lima_pdev_probe, .remove_new = lima_pdev_remove, .driver = { .name = "lima", .pm = &lima_pm_ops, .of_match_table = dt_match, }, }; module_platform_driver(lima_platform_driver); MODULE_AUTHOR("Lima Project Developers"); MODULE_DESCRIPTION("Lima DRM Driver"); MODULE_LICENSE("GPL v2");
linux-master
drivers/gpu/drm/lima/lima_drv.c
// SPDX-License-Identifier: GPL-2.0 OR MIT /* Copyright 2017-2019 Qiang Yu <[email protected]> */ #include <linux/mm.h> #include <linux/iosys-map.h> #include <linux/sync_file.h> #include <linux/pagemap.h> #include <linux/shmem_fs.h> #include <linux/dma-mapping.h> #include <drm/drm_file.h> #include <drm/drm_syncobj.h> #include <drm/drm_utils.h> #include <drm/lima_drm.h> #include "lima_drv.h" #include "lima_gem.h" #include "lima_vm.h" int lima_heap_alloc(struct lima_bo *bo, struct lima_vm *vm) { struct page **pages; struct address_space *mapping = bo->base.base.filp->f_mapping; struct device *dev = bo->base.base.dev->dev; size_t old_size = bo->heap_size; size_t new_size = bo->heap_size ? bo->heap_size * 2 : (lima_heap_init_nr_pages << PAGE_SHIFT); struct sg_table sgt; int i, ret; if (bo->heap_size >= bo->base.base.size) return -ENOSPC; new_size = min(new_size, bo->base.base.size); dma_resv_lock(bo->base.base.resv, NULL); if (bo->base.pages) { pages = bo->base.pages; } else { pages = kvmalloc_array(bo->base.base.size >> PAGE_SHIFT, sizeof(*pages), GFP_KERNEL | __GFP_ZERO); if (!pages) { dma_resv_unlock(bo->base.base.resv); return -ENOMEM; } bo->base.pages = pages; bo->base.pages_use_count = 1; mapping_set_unevictable(mapping); } for (i = old_size >> PAGE_SHIFT; i < new_size >> PAGE_SHIFT; i++) { struct page *page = shmem_read_mapping_page(mapping, i); if (IS_ERR(page)) { dma_resv_unlock(bo->base.base.resv); return PTR_ERR(page); } pages[i] = page; } dma_resv_unlock(bo->base.base.resv); ret = sg_alloc_table_from_pages(&sgt, pages, i, 0, new_size, GFP_KERNEL); if (ret) return ret; if (bo->base.sgt) { dma_unmap_sgtable(dev, bo->base.sgt, DMA_BIDIRECTIONAL, 0); sg_free_table(bo->base.sgt); } else { bo->base.sgt = kmalloc(sizeof(*bo->base.sgt), GFP_KERNEL); if (!bo->base.sgt) { sg_free_table(&sgt); return -ENOMEM; } } ret = dma_map_sgtable(dev, &sgt, DMA_BIDIRECTIONAL, 0); if (ret) { sg_free_table(&sgt); kfree(bo->base.sgt); bo->base.sgt = NULL; return ret; } *bo->base.sgt = sgt; if (vm) { ret = lima_vm_map_bo(vm, bo, old_size >> PAGE_SHIFT); if (ret) return ret; } bo->heap_size = new_size; return 0; } int lima_gem_create_handle(struct drm_device *dev, struct drm_file *file, u32 size, u32 flags, u32 *handle) { int err; gfp_t mask; struct drm_gem_shmem_object *shmem; struct drm_gem_object *obj; struct lima_bo *bo; bool is_heap = flags & LIMA_BO_FLAG_HEAP; shmem = drm_gem_shmem_create(dev, size); if (IS_ERR(shmem)) return PTR_ERR(shmem); obj = &shmem->base; /* Mali Utgard GPU can only support 32bit address space */ mask = mapping_gfp_mask(obj->filp->f_mapping); mask &= ~__GFP_HIGHMEM; mask |= __GFP_DMA32; mapping_set_gfp_mask(obj->filp->f_mapping, mask); if (is_heap) { bo = to_lima_bo(obj); err = lima_heap_alloc(bo, NULL); if (err) goto out; } else { struct sg_table *sgt = drm_gem_shmem_get_pages_sgt(shmem); if (IS_ERR(sgt)) { err = PTR_ERR(sgt); goto out; } } err = drm_gem_handle_create(file, obj, handle); out: /* drop reference from allocate - handle holds it now */ drm_gem_object_put(obj); return err; } static void lima_gem_free_object(struct drm_gem_object *obj) { struct lima_bo *bo = to_lima_bo(obj); if (!list_empty(&bo->va)) dev_err(obj->dev->dev, "lima gem free bo still has va\n"); drm_gem_shmem_free(&bo->base); } static int lima_gem_object_open(struct drm_gem_object *obj, struct drm_file *file) { struct lima_bo *bo = to_lima_bo(obj); struct lima_drm_priv *priv = to_lima_drm_priv(file); struct lima_vm *vm = priv->vm; return lima_vm_bo_add(vm, bo, true); } static void lima_gem_object_close(struct drm_gem_object *obj, struct drm_file *file) { struct lima_bo *bo = to_lima_bo(obj); struct lima_drm_priv *priv = to_lima_drm_priv(file); struct lima_vm *vm = priv->vm; lima_vm_bo_del(vm, bo); } static int lima_gem_pin(struct drm_gem_object *obj) { struct lima_bo *bo = to_lima_bo(obj); if (bo->heap_size) return -EINVAL; return drm_gem_shmem_pin(&bo->base); } static int lima_gem_vmap(struct drm_gem_object *obj, struct iosys_map *map) { struct lima_bo *bo = to_lima_bo(obj); if (bo->heap_size) return -EINVAL; return drm_gem_shmem_vmap(&bo->base, map); } static int lima_gem_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma) { struct lima_bo *bo = to_lima_bo(obj); if (bo->heap_size) return -EINVAL; return drm_gem_shmem_mmap(&bo->base, vma); } static const struct drm_gem_object_funcs lima_gem_funcs = { .free = lima_gem_free_object, .open = lima_gem_object_open, .close = lima_gem_object_close, .print_info = drm_gem_shmem_object_print_info, .pin = lima_gem_pin, .unpin = drm_gem_shmem_object_unpin, .get_sg_table = drm_gem_shmem_object_get_sg_table, .vmap = lima_gem_vmap, .vunmap = drm_gem_shmem_object_vunmap, .mmap = lima_gem_mmap, .vm_ops = &drm_gem_shmem_vm_ops, }; struct drm_gem_object *lima_gem_create_object(struct drm_device *dev, size_t size) { struct lima_bo *bo; bo = kzalloc(sizeof(*bo), GFP_KERNEL); if (!bo) return ERR_PTR(-ENOMEM); mutex_init(&bo->lock); INIT_LIST_HEAD(&bo->va); bo->base.map_wc = true; bo->base.base.funcs = &lima_gem_funcs; return &bo->base.base; } int lima_gem_get_info(struct drm_file *file, u32 handle, u32 *va, u64 *offset) { struct drm_gem_object *obj; struct lima_bo *bo; struct lima_drm_priv *priv = to_lima_drm_priv(file); struct lima_vm *vm = priv->vm; obj = drm_gem_object_lookup(file, handle); if (!obj) return -ENOENT; bo = to_lima_bo(obj); *va = lima_vm_get_va(vm, bo); *offset = drm_vma_node_offset_addr(&obj->vma_node); drm_gem_object_put(obj); return 0; } static int lima_gem_sync_bo(struct lima_sched_task *task, struct lima_bo *bo, bool write, bool explicit) { int err; err = dma_resv_reserve_fences(lima_bo_resv(bo), 1); if (err) return err; /* explicit sync use user passed dep fence */ if (explicit) return 0; return drm_sched_job_add_implicit_dependencies(&task->base, &bo->base.base, write); } static int lima_gem_add_deps(struct drm_file *file, struct lima_submit *submit) { int i, err; for (i = 0; i < ARRAY_SIZE(submit->in_sync); i++) { if (!submit->in_sync[i]) continue; err = drm_sched_job_add_syncobj_dependency(&submit->task->base, file, submit->in_sync[i], 0); if (err) return err; } return 0; } int lima_gem_submit(struct drm_file *file, struct lima_submit *submit) { int i, err = 0; struct ww_acquire_ctx ctx; struct lima_drm_priv *priv = to_lima_drm_priv(file); struct lima_vm *vm = priv->vm; struct drm_syncobj *out_sync = NULL; struct dma_fence *fence; struct lima_bo **bos = submit->lbos; if (submit->out_sync) { out_sync = drm_syncobj_find(file, submit->out_sync); if (!out_sync) return -ENOENT; } for (i = 0; i < submit->nr_bos; i++) { struct drm_gem_object *obj; struct lima_bo *bo; obj = drm_gem_object_lookup(file, submit->bos[i].handle); if (!obj) { err = -ENOENT; goto err_out0; } bo = to_lima_bo(obj); /* increase refcnt of gpu va map to prevent unmapped when executing, * will be decreased when task done */ err = lima_vm_bo_add(vm, bo, false); if (err) { drm_gem_object_put(obj); goto err_out0; } bos[i] = bo; } err = drm_gem_lock_reservations((struct drm_gem_object **)bos, submit->nr_bos, &ctx); if (err) goto err_out0; err = lima_sched_task_init( submit->task, submit->ctx->context + submit->pipe, bos, submit->nr_bos, vm); if (err) goto err_out1; err = lima_gem_add_deps(file, submit); if (err) goto err_out2; for (i = 0; i < submit->nr_bos; i++) { err = lima_gem_sync_bo( submit->task, bos[i], submit->bos[i].flags & LIMA_SUBMIT_BO_WRITE, submit->flags & LIMA_SUBMIT_FLAG_EXPLICIT_FENCE); if (err) goto err_out2; } fence = lima_sched_context_queue_task(submit->task); for (i = 0; i < submit->nr_bos; i++) { dma_resv_add_fence(lima_bo_resv(bos[i]), fence, submit->bos[i].flags & LIMA_SUBMIT_BO_WRITE ? DMA_RESV_USAGE_WRITE : DMA_RESV_USAGE_READ); } drm_gem_unlock_reservations((struct drm_gem_object **)bos, submit->nr_bos, &ctx); for (i = 0; i < submit->nr_bos; i++) drm_gem_object_put(&bos[i]->base.base); if (out_sync) { drm_syncobj_replace_fence(out_sync, fence); drm_syncobj_put(out_sync); } dma_fence_put(fence); return 0; err_out2: lima_sched_task_fini(submit->task); err_out1: drm_gem_unlock_reservations((struct drm_gem_object **)bos, submit->nr_bos, &ctx); err_out0: for (i = 0; i < submit->nr_bos; i++) { if (!bos[i]) break; lima_vm_bo_del(vm, bos[i]); drm_gem_object_put(&bos[i]->base.base); } if (out_sync) drm_syncobj_put(out_sync); return err; } int lima_gem_wait(struct drm_file *file, u32 handle, u32 op, s64 timeout_ns) { bool write = op & LIMA_GEM_WAIT_WRITE; long ret, timeout; if (!op) return 0; timeout = drm_timeout_abs_to_jiffies(timeout_ns); ret = drm_gem_dma_resv_wait(file, handle, write, timeout); if (ret == -ETIME) ret = timeout ? -ETIMEDOUT : -EBUSY; return ret; }
linux-master
drivers/gpu/drm/lima/lima_gem.c
// SPDX-License-Identifier: GPL-2.0 OR MIT /* Copyright 2017-2019 Qiang Yu <[email protected]> */ #include <linux/interrupt.h> #include <linux/iopoll.h> #include <linux/device.h> #include "lima_device.h" #include "lima_mmu.h" #include "lima_vm.h" #include "lima_regs.h" #define mmu_write(reg, data) writel(data, ip->iomem + reg) #define mmu_read(reg) readl(ip->iomem + reg) #define lima_mmu_send_command(cmd, addr, val, cond) \ ({ \ int __ret; \ \ mmu_write(LIMA_MMU_COMMAND, cmd); \ __ret = readl_poll_timeout(ip->iomem + (addr), val, \ cond, 0, 100); \ if (__ret) \ dev_err(dev->dev, \ "mmu command %x timeout\n", cmd); \ __ret; \ }) static irqreturn_t lima_mmu_irq_handler(int irq, void *data) { struct lima_ip *ip = data; struct lima_device *dev = ip->dev; u32 status = mmu_read(LIMA_MMU_INT_STATUS); struct lima_sched_pipe *pipe; /* for shared irq case */ if (!status) return IRQ_NONE; if (status & LIMA_MMU_INT_PAGE_FAULT) { u32 fault = mmu_read(LIMA_MMU_PAGE_FAULT_ADDR); dev_err(dev->dev, "mmu page fault at 0x%x from bus id %d of type %s on %s\n", fault, LIMA_MMU_STATUS_BUS_ID(status), status & LIMA_MMU_STATUS_PAGE_FAULT_IS_WRITE ? "write" : "read", lima_ip_name(ip)); } if (status & LIMA_MMU_INT_READ_BUS_ERROR) dev_err(dev->dev, "mmu %s irq bus error\n", lima_ip_name(ip)); /* mask all interrupts before resume */ mmu_write(LIMA_MMU_INT_MASK, 0); mmu_write(LIMA_MMU_INT_CLEAR, status); pipe = dev->pipe + (ip->id == lima_ip_gpmmu ? lima_pipe_gp : lima_pipe_pp); lima_sched_pipe_mmu_error(pipe); return IRQ_HANDLED; } static int lima_mmu_hw_init(struct lima_ip *ip) { struct lima_device *dev = ip->dev; int err; u32 v; mmu_write(LIMA_MMU_COMMAND, LIMA_MMU_COMMAND_HARD_RESET); err = lima_mmu_send_command(LIMA_MMU_COMMAND_HARD_RESET, LIMA_MMU_DTE_ADDR, v, v == 0); if (err) return err; mmu_write(LIMA_MMU_INT_MASK, LIMA_MMU_INT_PAGE_FAULT | LIMA_MMU_INT_READ_BUS_ERROR); mmu_write(LIMA_MMU_DTE_ADDR, dev->empty_vm->pd.dma); return lima_mmu_send_command(LIMA_MMU_COMMAND_ENABLE_PAGING, LIMA_MMU_STATUS, v, v & LIMA_MMU_STATUS_PAGING_ENABLED); } int lima_mmu_resume(struct lima_ip *ip) { if (ip->id == lima_ip_ppmmu_bcast) return 0; return lima_mmu_hw_init(ip); } void lima_mmu_suspend(struct lima_ip *ip) { } int lima_mmu_init(struct lima_ip *ip) { struct lima_device *dev = ip->dev; int err; if (ip->id == lima_ip_ppmmu_bcast) return 0; mmu_write(LIMA_MMU_DTE_ADDR, 0xCAFEBABE); if (mmu_read(LIMA_MMU_DTE_ADDR) != 0xCAFEB000) { dev_err(dev->dev, "mmu %s dte write test fail\n", lima_ip_name(ip)); return -EIO; } err = devm_request_irq(dev->dev, ip->irq, lima_mmu_irq_handler, IRQF_SHARED, lima_ip_name(ip), ip); if (err) { dev_err(dev->dev, "mmu %s fail to request irq\n", lima_ip_name(ip)); return err; } return lima_mmu_hw_init(ip); } void lima_mmu_fini(struct lima_ip *ip) { } void lima_mmu_flush_tlb(struct lima_ip *ip) { mmu_write(LIMA_MMU_COMMAND, LIMA_MMU_COMMAND_ZAP_CACHE); } void lima_mmu_switch_vm(struct lima_ip *ip, struct lima_vm *vm) { struct lima_device *dev = ip->dev; u32 v; lima_mmu_send_command(LIMA_MMU_COMMAND_ENABLE_STALL, LIMA_MMU_STATUS, v, v & LIMA_MMU_STATUS_STALL_ACTIVE); mmu_write(LIMA_MMU_DTE_ADDR, vm->pd.dma); /* flush the TLB */ mmu_write(LIMA_MMU_COMMAND, LIMA_MMU_COMMAND_ZAP_CACHE); lima_mmu_send_command(LIMA_MMU_COMMAND_DISABLE_STALL, LIMA_MMU_STATUS, v, !(v & LIMA_MMU_STATUS_STALL_ACTIVE)); } void lima_mmu_page_fault_resume(struct lima_ip *ip) { struct lima_device *dev = ip->dev; u32 status = mmu_read(LIMA_MMU_STATUS); u32 v; if (status & LIMA_MMU_STATUS_PAGE_FAULT_ACTIVE) { dev_info(dev->dev, "mmu resume\n"); mmu_write(LIMA_MMU_INT_MASK, 0); mmu_write(LIMA_MMU_DTE_ADDR, 0xCAFEBABE); lima_mmu_send_command(LIMA_MMU_COMMAND_HARD_RESET, LIMA_MMU_DTE_ADDR, v, v == 0); mmu_write(LIMA_MMU_INT_MASK, LIMA_MMU_INT_PAGE_FAULT | LIMA_MMU_INT_READ_BUS_ERROR); mmu_write(LIMA_MMU_DTE_ADDR, dev->empty_vm->pd.dma); lima_mmu_send_command(LIMA_MMU_COMMAND_ENABLE_PAGING, LIMA_MMU_STATUS, v, v & LIMA_MMU_STATUS_PAGING_ENABLED); } }
linux-master
drivers/gpu/drm/lima/lima_mmu.c
// SPDX-License-Identifier: GPL-2.0 OR MIT /* * Xen para-virtual DRM device * * Copyright (C) 2016-2018 EPAM Systems Inc. * * Author: Oleksandr Andrushchenko <[email protected]> */ #include <linux/dma-buf.h> #include <linux/scatterlist.h> #include <linux/shmem_fs.h> #include <drm/drm_gem.h> #include <drm/drm_prime.h> #include <drm/drm_probe_helper.h> #include <xen/balloon.h> #include <xen/xen.h> #include "xen_drm_front.h" #include "xen_drm_front_gem.h" struct xen_gem_object { struct drm_gem_object base; size_t num_pages; struct page **pages; /* set for buffers allocated by the backend */ bool be_alloc; /* this is for imported PRIME buffer */ struct sg_table *sgt_imported; }; static inline struct xen_gem_object * to_xen_gem_obj(struct drm_gem_object *gem_obj) { return container_of(gem_obj, struct xen_gem_object, base); } static int gem_alloc_pages_array(struct xen_gem_object *xen_obj, size_t buf_size) { xen_obj->num_pages = DIV_ROUND_UP(buf_size, PAGE_SIZE); xen_obj->pages = kvmalloc_array(xen_obj->num_pages, sizeof(struct page *), GFP_KERNEL); return !xen_obj->pages ? -ENOMEM : 0; } static void gem_free_pages_array(struct xen_gem_object *xen_obj) { kvfree(xen_obj->pages); xen_obj->pages = NULL; } static int xen_drm_front_gem_object_mmap(struct drm_gem_object *gem_obj, struct vm_area_struct *vma) { struct xen_gem_object *xen_obj = to_xen_gem_obj(gem_obj); int ret; vma->vm_ops = gem_obj->funcs->vm_ops; /* * Clear the VM_PFNMAP flag that was set by drm_gem_mmap(), and set the * vm_pgoff (used as a fake buffer offset by DRM) to 0 as we want to map * the whole buffer. */ vm_flags_mod(vma, VM_MIXEDMAP | VM_DONTEXPAND, VM_PFNMAP); vma->vm_pgoff = 0; /* * According to Xen on ARM ABI (xen/include/public/arch-arm.h): * all memory which is shared with other entities in the system * (including the hypervisor and other guests) must reside in memory * which is mapped as Normal Inner Write-Back Outer Write-Back * Inner-Shareable. */ vma->vm_page_prot = vm_get_page_prot(vma->vm_flags); /* * vm_operations_struct.fault handler will be called if CPU access * to VM is here. For GPUs this isn't the case, because CPU doesn't * touch the memory. Insert pages now, so both CPU and GPU are happy. * * FIXME: as we insert all the pages now then no .fault handler must * be called, so don't provide one */ ret = vm_map_pages(vma, xen_obj->pages, xen_obj->num_pages); if (ret < 0) DRM_ERROR("Failed to map pages into vma: %d\n", ret); return ret; } static const struct vm_operations_struct xen_drm_drv_vm_ops = { .open = drm_gem_vm_open, .close = drm_gem_vm_close, }; static const struct drm_gem_object_funcs xen_drm_front_gem_object_funcs = { .free = xen_drm_front_gem_object_free, .get_sg_table = xen_drm_front_gem_get_sg_table, .vmap = xen_drm_front_gem_prime_vmap, .vunmap = xen_drm_front_gem_prime_vunmap, .mmap = xen_drm_front_gem_object_mmap, .vm_ops = &xen_drm_drv_vm_ops, }; static struct xen_gem_object *gem_create_obj(struct drm_device *dev, size_t size) { struct xen_gem_object *xen_obj; int ret; xen_obj = kzalloc(sizeof(*xen_obj), GFP_KERNEL); if (!xen_obj) return ERR_PTR(-ENOMEM); xen_obj->base.funcs = &xen_drm_front_gem_object_funcs; ret = drm_gem_object_init(dev, &xen_obj->base, size); if (ret < 0) { kfree(xen_obj); return ERR_PTR(ret); } return xen_obj; } static struct xen_gem_object *gem_create(struct drm_device *dev, size_t size) { struct xen_drm_front_drm_info *drm_info = dev->dev_private; struct xen_gem_object *xen_obj; int ret; size = round_up(size, PAGE_SIZE); xen_obj = gem_create_obj(dev, size); if (IS_ERR(xen_obj)) return xen_obj; if (drm_info->front_info->cfg.be_alloc) { /* * backend will allocate space for this buffer, so * only allocate array of pointers to pages */ ret = gem_alloc_pages_array(xen_obj, size); if (ret < 0) goto fail; /* * allocate ballooned pages which will be used to map * grant references provided by the backend */ ret = xen_alloc_unpopulated_pages(xen_obj->num_pages, xen_obj->pages); if (ret < 0) { DRM_ERROR("Cannot allocate %zu ballooned pages: %d\n", xen_obj->num_pages, ret); gem_free_pages_array(xen_obj); goto fail; } xen_obj->be_alloc = true; return xen_obj; } /* * need to allocate backing pages now, so we can share those * with the backend */ xen_obj->num_pages = DIV_ROUND_UP(size, PAGE_SIZE); xen_obj->pages = drm_gem_get_pages(&xen_obj->base); if (IS_ERR(xen_obj->pages)) { ret = PTR_ERR(xen_obj->pages); xen_obj->pages = NULL; goto fail; } return xen_obj; fail: DRM_ERROR("Failed to allocate buffer with size %zu\n", size); return ERR_PTR(ret); } struct drm_gem_object *xen_drm_front_gem_create(struct drm_device *dev, size_t size) { struct xen_gem_object *xen_obj; xen_obj = gem_create(dev, size); if (IS_ERR(xen_obj)) return ERR_CAST(xen_obj); return &xen_obj->base; } void xen_drm_front_gem_free_object_unlocked(struct drm_gem_object *gem_obj) { struct xen_gem_object *xen_obj = to_xen_gem_obj(gem_obj); if (xen_obj->base.import_attach) { drm_prime_gem_destroy(&xen_obj->base, xen_obj->sgt_imported); gem_free_pages_array(xen_obj); } else { if (xen_obj->pages) { if (xen_obj->be_alloc) { xen_free_unpopulated_pages(xen_obj->num_pages, xen_obj->pages); gem_free_pages_array(xen_obj); } else { drm_gem_put_pages(&xen_obj->base, xen_obj->pages, true, false); } } } drm_gem_object_release(gem_obj); kfree(xen_obj); } struct page **xen_drm_front_gem_get_pages(struct drm_gem_object *gem_obj) { struct xen_gem_object *xen_obj = to_xen_gem_obj(gem_obj); return xen_obj->pages; } struct sg_table *xen_drm_front_gem_get_sg_table(struct drm_gem_object *gem_obj) { struct xen_gem_object *xen_obj = to_xen_gem_obj(gem_obj); if (!xen_obj->pages) return ERR_PTR(-ENOMEM); return drm_prime_pages_to_sg(gem_obj->dev, xen_obj->pages, xen_obj->num_pages); } struct drm_gem_object * xen_drm_front_gem_import_sg_table(struct drm_device *dev, struct dma_buf_attachment *attach, struct sg_table *sgt) { struct xen_drm_front_drm_info *drm_info = dev->dev_private; struct xen_gem_object *xen_obj; size_t size; int ret; size = attach->dmabuf->size; xen_obj = gem_create_obj(dev, size); if (IS_ERR(xen_obj)) return ERR_CAST(xen_obj); ret = gem_alloc_pages_array(xen_obj, size); if (ret < 0) return ERR_PTR(ret); xen_obj->sgt_imported = sgt; ret = drm_prime_sg_to_page_array(sgt, xen_obj->pages, xen_obj->num_pages); if (ret < 0) return ERR_PTR(ret); ret = xen_drm_front_dbuf_create(drm_info->front_info, xen_drm_front_dbuf_to_cookie(&xen_obj->base), 0, 0, 0, size, sgt->sgl->offset, xen_obj->pages); if (ret < 0) return ERR_PTR(ret); DRM_DEBUG("Imported buffer of size %zu with nents %u\n", size, sgt->orig_nents); return &xen_obj->base; } int xen_drm_front_gem_prime_vmap(struct drm_gem_object *gem_obj, struct iosys_map *map) { struct xen_gem_object *xen_obj = to_xen_gem_obj(gem_obj); void *vaddr; if (!xen_obj->pages) return -ENOMEM; /* Please see comment in gem_mmap_obj on mapping and attributes. */ vaddr = vmap(xen_obj->pages, xen_obj->num_pages, VM_MAP, PAGE_KERNEL); if (!vaddr) return -ENOMEM; iosys_map_set_vaddr(map, vaddr); return 0; } void xen_drm_front_gem_prime_vunmap(struct drm_gem_object *gem_obj, struct iosys_map *map) { vunmap(map->vaddr); }
linux-master
drivers/gpu/drm/xen/xen_drm_front_gem.c
// SPDX-License-Identifier: GPL-2.0 OR MIT /* * Xen para-virtual DRM device * * Copyright (C) 2016-2018 EPAM Systems Inc. * * Author: Oleksandr Andrushchenko <[email protected]> */ #include <linux/errno.h> #include <linux/irq.h> #include <drm/drm_print.h> #include <xen/xenbus.h> #include <xen/events.h> #include <xen/grant_table.h> #include "xen_drm_front.h" #include "xen_drm_front_evtchnl.h" static irqreturn_t evtchnl_interrupt_ctrl(int irq, void *dev_id) { struct xen_drm_front_evtchnl *evtchnl = dev_id; struct xen_drm_front_info *front_info = evtchnl->front_info; struct xendispl_resp *resp; RING_IDX i, rp; unsigned long flags; if (unlikely(evtchnl->state != EVTCHNL_STATE_CONNECTED)) return IRQ_HANDLED; spin_lock_irqsave(&front_info->io_lock, flags); again: rp = evtchnl->u.req.ring.sring->rsp_prod; /* ensure we see queued responses up to rp */ virt_rmb(); for (i = evtchnl->u.req.ring.rsp_cons; i != rp; i++) { resp = RING_GET_RESPONSE(&evtchnl->u.req.ring, i); if (unlikely(resp->id != evtchnl->evt_id)) continue; switch (resp->operation) { case XENDISPL_OP_PG_FLIP: case XENDISPL_OP_FB_ATTACH: case XENDISPL_OP_FB_DETACH: case XENDISPL_OP_DBUF_CREATE: case XENDISPL_OP_DBUF_DESTROY: case XENDISPL_OP_SET_CONFIG: evtchnl->u.req.resp_status = resp->status; complete(&evtchnl->u.req.completion); break; default: DRM_ERROR("Operation %d is not supported\n", resp->operation); break; } } evtchnl->u.req.ring.rsp_cons = i; if (i != evtchnl->u.req.ring.req_prod_pvt) { int more_to_do; RING_FINAL_CHECK_FOR_RESPONSES(&evtchnl->u.req.ring, more_to_do); if (more_to_do) goto again; } else { evtchnl->u.req.ring.sring->rsp_event = i + 1; } spin_unlock_irqrestore(&front_info->io_lock, flags); return IRQ_HANDLED; } static irqreturn_t evtchnl_interrupt_evt(int irq, void *dev_id) { struct xen_drm_front_evtchnl *evtchnl = dev_id; struct xen_drm_front_info *front_info = evtchnl->front_info; struct xendispl_event_page *page = evtchnl->u.evt.page; u32 cons, prod; unsigned long flags; if (unlikely(evtchnl->state != EVTCHNL_STATE_CONNECTED)) return IRQ_HANDLED; spin_lock_irqsave(&front_info->io_lock, flags); prod = page->in_prod; /* ensure we see ring contents up to prod */ virt_rmb(); if (prod == page->in_cons) goto out; for (cons = page->in_cons; cons != prod; cons++) { struct xendispl_evt *event; event = &XENDISPL_IN_RING_REF(page, cons); if (unlikely(event->id != evtchnl->evt_id++)) continue; switch (event->type) { case XENDISPL_EVT_PG_FLIP: xen_drm_front_on_frame_done(front_info, evtchnl->index, event->op.pg_flip.fb_cookie); break; } } page->in_cons = cons; /* ensure ring contents */ virt_wmb(); out: spin_unlock_irqrestore(&front_info->io_lock, flags); return IRQ_HANDLED; } static void evtchnl_free(struct xen_drm_front_info *front_info, struct xen_drm_front_evtchnl *evtchnl) { void *page = NULL; if (evtchnl->type == EVTCHNL_TYPE_REQ) page = evtchnl->u.req.ring.sring; else if (evtchnl->type == EVTCHNL_TYPE_EVT) page = evtchnl->u.evt.page; if (!page) return; evtchnl->state = EVTCHNL_STATE_DISCONNECTED; if (evtchnl->type == EVTCHNL_TYPE_REQ) { /* release all who still waits for response if any */ evtchnl->u.req.resp_status = -EIO; complete_all(&evtchnl->u.req.completion); } if (evtchnl->irq) unbind_from_irqhandler(evtchnl->irq, evtchnl); if (evtchnl->port) xenbus_free_evtchn(front_info->xb_dev, evtchnl->port); /* end access and free the page */ xenbus_teardown_ring(&page, 1, &evtchnl->gref); memset(evtchnl, 0, sizeof(*evtchnl)); } static int evtchnl_alloc(struct xen_drm_front_info *front_info, int index, struct xen_drm_front_evtchnl *evtchnl, enum xen_drm_front_evtchnl_type type) { struct xenbus_device *xb_dev = front_info->xb_dev; void *page; irq_handler_t handler; int ret; memset(evtchnl, 0, sizeof(*evtchnl)); evtchnl->type = type; evtchnl->index = index; evtchnl->front_info = front_info; evtchnl->state = EVTCHNL_STATE_DISCONNECTED; ret = xenbus_setup_ring(xb_dev, GFP_NOIO | __GFP_HIGH, &page, 1, &evtchnl->gref); if (ret) goto fail; if (type == EVTCHNL_TYPE_REQ) { struct xen_displif_sring *sring; init_completion(&evtchnl->u.req.completion); mutex_init(&evtchnl->u.req.req_io_lock); sring = page; XEN_FRONT_RING_INIT(&evtchnl->u.req.ring, sring, XEN_PAGE_SIZE); handler = evtchnl_interrupt_ctrl; } else { evtchnl->u.evt.page = page; handler = evtchnl_interrupt_evt; } ret = xenbus_alloc_evtchn(xb_dev, &evtchnl->port); if (ret < 0) goto fail; ret = bind_evtchn_to_irqhandler(evtchnl->port, handler, 0, xb_dev->devicetype, evtchnl); if (ret < 0) goto fail; evtchnl->irq = ret; return 0; fail: DRM_ERROR("Failed to allocate ring: %d\n", ret); return ret; } int xen_drm_front_evtchnl_create_all(struct xen_drm_front_info *front_info) { struct xen_drm_front_cfg *cfg; int ret, conn; cfg = &front_info->cfg; front_info->evt_pairs = kcalloc(cfg->num_connectors, sizeof(struct xen_drm_front_evtchnl_pair), GFP_KERNEL); if (!front_info->evt_pairs) { ret = -ENOMEM; goto fail; } for (conn = 0; conn < cfg->num_connectors; conn++) { ret = evtchnl_alloc(front_info, conn, &front_info->evt_pairs[conn].req, EVTCHNL_TYPE_REQ); if (ret < 0) { DRM_ERROR("Error allocating control channel\n"); goto fail; } ret = evtchnl_alloc(front_info, conn, &front_info->evt_pairs[conn].evt, EVTCHNL_TYPE_EVT); if (ret < 0) { DRM_ERROR("Error allocating in-event channel\n"); goto fail; } } front_info->num_evt_pairs = cfg->num_connectors; return 0; fail: xen_drm_front_evtchnl_free_all(front_info); return ret; } static int evtchnl_publish(struct xenbus_transaction xbt, struct xen_drm_front_evtchnl *evtchnl, const char *path, const char *node_ring, const char *node_chnl) { struct xenbus_device *xb_dev = evtchnl->front_info->xb_dev; int ret; /* write control channel ring reference */ ret = xenbus_printf(xbt, path, node_ring, "%u", evtchnl->gref); if (ret < 0) { xenbus_dev_error(xb_dev, ret, "writing ring-ref"); return ret; } /* write event channel ring reference */ ret = xenbus_printf(xbt, path, node_chnl, "%u", evtchnl->port); if (ret < 0) { xenbus_dev_error(xb_dev, ret, "writing event channel"); return ret; } return 0; } int xen_drm_front_evtchnl_publish_all(struct xen_drm_front_info *front_info) { struct xenbus_transaction xbt; struct xen_drm_front_cfg *plat_data; int ret, conn; plat_data = &front_info->cfg; again: ret = xenbus_transaction_start(&xbt); if (ret < 0) { xenbus_dev_fatal(front_info->xb_dev, ret, "starting transaction"); return ret; } for (conn = 0; conn < plat_data->num_connectors; conn++) { ret = evtchnl_publish(xbt, &front_info->evt_pairs[conn].req, plat_data->connectors[conn].xenstore_path, XENDISPL_FIELD_REQ_RING_REF, XENDISPL_FIELD_REQ_CHANNEL); if (ret < 0) goto fail; ret = evtchnl_publish(xbt, &front_info->evt_pairs[conn].evt, plat_data->connectors[conn].xenstore_path, XENDISPL_FIELD_EVT_RING_REF, XENDISPL_FIELD_EVT_CHANNEL); if (ret < 0) goto fail; } ret = xenbus_transaction_end(xbt, 0); if (ret < 0) { if (ret == -EAGAIN) goto again; xenbus_dev_fatal(front_info->xb_dev, ret, "completing transaction"); goto fail_to_end; } return 0; fail: xenbus_transaction_end(xbt, 1); fail_to_end: xenbus_dev_fatal(front_info->xb_dev, ret, "writing Xen store"); return ret; } void xen_drm_front_evtchnl_flush(struct xen_drm_front_evtchnl *evtchnl) { int notify; evtchnl->u.req.ring.req_prod_pvt++; RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&evtchnl->u.req.ring, notify); if (notify) notify_remote_via_irq(evtchnl->irq); } void xen_drm_front_evtchnl_set_state(struct xen_drm_front_info *front_info, enum xen_drm_front_evtchnl_state state) { unsigned long flags; int i; if (!front_info->evt_pairs) return; spin_lock_irqsave(&front_info->io_lock, flags); for (i = 0; i < front_info->num_evt_pairs; i++) { front_info->evt_pairs[i].req.state = state; front_info->evt_pairs[i].evt.state = state; } spin_unlock_irqrestore(&front_info->io_lock, flags); } void xen_drm_front_evtchnl_free_all(struct xen_drm_front_info *front_info) { int i; if (!front_info->evt_pairs) return; for (i = 0; i < front_info->num_evt_pairs; i++) { evtchnl_free(front_info, &front_info->evt_pairs[i].req); evtchnl_free(front_info, &front_info->evt_pairs[i].evt); } kfree(front_info->evt_pairs); front_info->evt_pairs = NULL; }
linux-master
drivers/gpu/drm/xen/xen_drm_front_evtchnl.c
// SPDX-License-Identifier: GPL-2.0 OR MIT /* * Xen para-virtual DRM device * * Copyright (C) 2016-2018 EPAM Systems Inc. * * Author: Oleksandr Andrushchenko <[email protected]> */ #include <linux/delay.h> #include <linux/dma-mapping.h> #include <linux/module.h> #include <drm/drm_atomic_helper.h> #include <drm/drm_drv.h> #include <drm/drm_ioctl.h> #include <drm/drm_probe_helper.h> #include <drm/drm_file.h> #include <drm/drm_gem.h> #include <xen/platform_pci.h> #include <xen/xen.h> #include <xen/xenbus.h> #include <xen/xen-front-pgdir-shbuf.h> #include <xen/interface/io/displif.h> #include "xen_drm_front.h" #include "xen_drm_front_cfg.h" #include "xen_drm_front_evtchnl.h" #include "xen_drm_front_gem.h" #include "xen_drm_front_kms.h" struct xen_drm_front_dbuf { struct list_head list; u64 dbuf_cookie; u64 fb_cookie; struct xen_front_pgdir_shbuf shbuf; }; static void dbuf_add_to_list(struct xen_drm_front_info *front_info, struct xen_drm_front_dbuf *dbuf, u64 dbuf_cookie) { dbuf->dbuf_cookie = dbuf_cookie; list_add(&dbuf->list, &front_info->dbuf_list); } static struct xen_drm_front_dbuf *dbuf_get(struct list_head *dbuf_list, u64 dbuf_cookie) { struct xen_drm_front_dbuf *buf, *q; list_for_each_entry_safe(buf, q, dbuf_list, list) if (buf->dbuf_cookie == dbuf_cookie) return buf; return NULL; } static void dbuf_free(struct list_head *dbuf_list, u64 dbuf_cookie) { struct xen_drm_front_dbuf *buf, *q; list_for_each_entry_safe(buf, q, dbuf_list, list) if (buf->dbuf_cookie == dbuf_cookie) { list_del(&buf->list); xen_front_pgdir_shbuf_unmap(&buf->shbuf); xen_front_pgdir_shbuf_free(&buf->shbuf); kfree(buf); break; } } static void dbuf_free_all(struct list_head *dbuf_list) { struct xen_drm_front_dbuf *buf, *q; list_for_each_entry_safe(buf, q, dbuf_list, list) { list_del(&buf->list); xen_front_pgdir_shbuf_unmap(&buf->shbuf); xen_front_pgdir_shbuf_free(&buf->shbuf); kfree(buf); } } static struct xendispl_req * be_prepare_req(struct xen_drm_front_evtchnl *evtchnl, u8 operation) { struct xendispl_req *req; req = RING_GET_REQUEST(&evtchnl->u.req.ring, evtchnl->u.req.ring.req_prod_pvt); req->operation = operation; req->id = evtchnl->evt_next_id++; evtchnl->evt_id = req->id; return req; } static int be_stream_do_io(struct xen_drm_front_evtchnl *evtchnl, struct xendispl_req *req) { reinit_completion(&evtchnl->u.req.completion); if (unlikely(evtchnl->state != EVTCHNL_STATE_CONNECTED)) return -EIO; xen_drm_front_evtchnl_flush(evtchnl); return 0; } static int be_stream_wait_io(struct xen_drm_front_evtchnl *evtchnl) { if (wait_for_completion_timeout(&evtchnl->u.req.completion, msecs_to_jiffies(XEN_DRM_FRONT_WAIT_BACK_MS)) <= 0) return -ETIMEDOUT; return evtchnl->u.req.resp_status; } int xen_drm_front_mode_set(struct xen_drm_front_drm_pipeline *pipeline, u32 x, u32 y, u32 width, u32 height, u32 bpp, u64 fb_cookie) { struct xen_drm_front_evtchnl *evtchnl; struct xen_drm_front_info *front_info; struct xendispl_req *req; unsigned long flags; int ret; front_info = pipeline->drm_info->front_info; evtchnl = &front_info->evt_pairs[pipeline->index].req; if (unlikely(!evtchnl)) return -EIO; mutex_lock(&evtchnl->u.req.req_io_lock); spin_lock_irqsave(&front_info->io_lock, flags); req = be_prepare_req(evtchnl, XENDISPL_OP_SET_CONFIG); req->op.set_config.x = x; req->op.set_config.y = y; req->op.set_config.width = width; req->op.set_config.height = height; req->op.set_config.bpp = bpp; req->op.set_config.fb_cookie = fb_cookie; ret = be_stream_do_io(evtchnl, req); spin_unlock_irqrestore(&front_info->io_lock, flags); if (ret == 0) ret = be_stream_wait_io(evtchnl); mutex_unlock(&evtchnl->u.req.req_io_lock); return ret; } int xen_drm_front_dbuf_create(struct xen_drm_front_info *front_info, u64 dbuf_cookie, u32 width, u32 height, u32 bpp, u64 size, u32 offset, struct page **pages) { struct xen_drm_front_evtchnl *evtchnl; struct xen_drm_front_dbuf *dbuf; struct xendispl_req *req; struct xen_front_pgdir_shbuf_cfg buf_cfg; unsigned long flags; int ret; evtchnl = &front_info->evt_pairs[GENERIC_OP_EVT_CHNL].req; if (unlikely(!evtchnl)) return -EIO; dbuf = kzalloc(sizeof(*dbuf), GFP_KERNEL); if (!dbuf) return -ENOMEM; dbuf_add_to_list(front_info, dbuf, dbuf_cookie); memset(&buf_cfg, 0, sizeof(buf_cfg)); buf_cfg.xb_dev = front_info->xb_dev; buf_cfg.num_pages = DIV_ROUND_UP(size, PAGE_SIZE); buf_cfg.pages = pages; buf_cfg.pgdir = &dbuf->shbuf; buf_cfg.be_alloc = front_info->cfg.be_alloc; ret = xen_front_pgdir_shbuf_alloc(&buf_cfg); if (ret < 0) goto fail_shbuf_alloc; mutex_lock(&evtchnl->u.req.req_io_lock); spin_lock_irqsave(&front_info->io_lock, flags); req = be_prepare_req(evtchnl, XENDISPL_OP_DBUF_CREATE); req->op.dbuf_create.gref_directory = xen_front_pgdir_shbuf_get_dir_start(&dbuf->shbuf); req->op.dbuf_create.buffer_sz = size; req->op.dbuf_create.data_ofs = offset; req->op.dbuf_create.dbuf_cookie = dbuf_cookie; req->op.dbuf_create.width = width; req->op.dbuf_create.height = height; req->op.dbuf_create.bpp = bpp; if (buf_cfg.be_alloc) req->op.dbuf_create.flags |= XENDISPL_DBUF_FLG_REQ_ALLOC; ret = be_stream_do_io(evtchnl, req); spin_unlock_irqrestore(&front_info->io_lock, flags); if (ret < 0) goto fail; ret = be_stream_wait_io(evtchnl); if (ret < 0) goto fail; ret = xen_front_pgdir_shbuf_map(&dbuf->shbuf); if (ret < 0) goto fail; mutex_unlock(&evtchnl->u.req.req_io_lock); return 0; fail: mutex_unlock(&evtchnl->u.req.req_io_lock); fail_shbuf_alloc: dbuf_free(&front_info->dbuf_list, dbuf_cookie); return ret; } static int xen_drm_front_dbuf_destroy(struct xen_drm_front_info *front_info, u64 dbuf_cookie) { struct xen_drm_front_evtchnl *evtchnl; struct xendispl_req *req; unsigned long flags; bool be_alloc; int ret; evtchnl = &front_info->evt_pairs[GENERIC_OP_EVT_CHNL].req; if (unlikely(!evtchnl)) return -EIO; be_alloc = front_info->cfg.be_alloc; /* * For the backend allocated buffer release references now, so backend * can free the buffer. */ if (be_alloc) dbuf_free(&front_info->dbuf_list, dbuf_cookie); mutex_lock(&evtchnl->u.req.req_io_lock); spin_lock_irqsave(&front_info->io_lock, flags); req = be_prepare_req(evtchnl, XENDISPL_OP_DBUF_DESTROY); req->op.dbuf_destroy.dbuf_cookie = dbuf_cookie; ret = be_stream_do_io(evtchnl, req); spin_unlock_irqrestore(&front_info->io_lock, flags); if (ret == 0) ret = be_stream_wait_io(evtchnl); /* * Do this regardless of communication status with the backend: * if we cannot remove remote resources remove what we can locally. */ if (!be_alloc) dbuf_free(&front_info->dbuf_list, dbuf_cookie); mutex_unlock(&evtchnl->u.req.req_io_lock); return ret; } int xen_drm_front_fb_attach(struct xen_drm_front_info *front_info, u64 dbuf_cookie, u64 fb_cookie, u32 width, u32 height, u32 pixel_format) { struct xen_drm_front_evtchnl *evtchnl; struct xen_drm_front_dbuf *buf; struct xendispl_req *req; unsigned long flags; int ret; evtchnl = &front_info->evt_pairs[GENERIC_OP_EVT_CHNL].req; if (unlikely(!evtchnl)) return -EIO; buf = dbuf_get(&front_info->dbuf_list, dbuf_cookie); if (!buf) return -EINVAL; buf->fb_cookie = fb_cookie; mutex_lock(&evtchnl->u.req.req_io_lock); spin_lock_irqsave(&front_info->io_lock, flags); req = be_prepare_req(evtchnl, XENDISPL_OP_FB_ATTACH); req->op.fb_attach.dbuf_cookie = dbuf_cookie; req->op.fb_attach.fb_cookie = fb_cookie; req->op.fb_attach.width = width; req->op.fb_attach.height = height; req->op.fb_attach.pixel_format = pixel_format; ret = be_stream_do_io(evtchnl, req); spin_unlock_irqrestore(&front_info->io_lock, flags); if (ret == 0) ret = be_stream_wait_io(evtchnl); mutex_unlock(&evtchnl->u.req.req_io_lock); return ret; } int xen_drm_front_fb_detach(struct xen_drm_front_info *front_info, u64 fb_cookie) { struct xen_drm_front_evtchnl *evtchnl; struct xendispl_req *req; unsigned long flags; int ret; evtchnl = &front_info->evt_pairs[GENERIC_OP_EVT_CHNL].req; if (unlikely(!evtchnl)) return -EIO; mutex_lock(&evtchnl->u.req.req_io_lock); spin_lock_irqsave(&front_info->io_lock, flags); req = be_prepare_req(evtchnl, XENDISPL_OP_FB_DETACH); req->op.fb_detach.fb_cookie = fb_cookie; ret = be_stream_do_io(evtchnl, req); spin_unlock_irqrestore(&front_info->io_lock, flags); if (ret == 0) ret = be_stream_wait_io(evtchnl); mutex_unlock(&evtchnl->u.req.req_io_lock); return ret; } int xen_drm_front_page_flip(struct xen_drm_front_info *front_info, int conn_idx, u64 fb_cookie) { struct xen_drm_front_evtchnl *evtchnl; struct xendispl_req *req; unsigned long flags; int ret; if (unlikely(conn_idx >= front_info->num_evt_pairs)) return -EINVAL; evtchnl = &front_info->evt_pairs[conn_idx].req; mutex_lock(&evtchnl->u.req.req_io_lock); spin_lock_irqsave(&front_info->io_lock, flags); req = be_prepare_req(evtchnl, XENDISPL_OP_PG_FLIP); req->op.pg_flip.fb_cookie = fb_cookie; ret = be_stream_do_io(evtchnl, req); spin_unlock_irqrestore(&front_info->io_lock, flags); if (ret == 0) ret = be_stream_wait_io(evtchnl); mutex_unlock(&evtchnl->u.req.req_io_lock); return ret; } void xen_drm_front_on_frame_done(struct xen_drm_front_info *front_info, int conn_idx, u64 fb_cookie) { struct xen_drm_front_drm_info *drm_info = front_info->drm_info; if (unlikely(conn_idx >= front_info->cfg.num_connectors)) return; xen_drm_front_kms_on_frame_done(&drm_info->pipeline[conn_idx], fb_cookie); } void xen_drm_front_gem_object_free(struct drm_gem_object *obj) { struct xen_drm_front_drm_info *drm_info = obj->dev->dev_private; int idx; if (drm_dev_enter(obj->dev, &idx)) { xen_drm_front_dbuf_destroy(drm_info->front_info, xen_drm_front_dbuf_to_cookie(obj)); drm_dev_exit(idx); } else { dbuf_free(&drm_info->front_info->dbuf_list, xen_drm_front_dbuf_to_cookie(obj)); } xen_drm_front_gem_free_object_unlocked(obj); } static int xen_drm_drv_dumb_create(struct drm_file *filp, struct drm_device *dev, struct drm_mode_create_dumb *args) { struct xen_drm_front_drm_info *drm_info = dev->dev_private; struct drm_gem_object *obj; int ret; /* * Dumb creation is a two stage process: first we create a fully * constructed GEM object which is communicated to the backend, and * only after that we can create GEM's handle. This is done so, * because of the possible races: once you create a handle it becomes * immediately visible to user-space, so the latter can try accessing * object without pages etc. * For details also see drm_gem_handle_create */ args->pitch = DIV_ROUND_UP(args->width * args->bpp, 8); args->size = args->pitch * args->height; obj = xen_drm_front_gem_create(dev, args->size); if (IS_ERR(obj)) { ret = PTR_ERR(obj); goto fail; } ret = xen_drm_front_dbuf_create(drm_info->front_info, xen_drm_front_dbuf_to_cookie(obj), args->width, args->height, args->bpp, args->size, 0, xen_drm_front_gem_get_pages(obj)); if (ret) goto fail_backend; /* This is the tail of GEM object creation */ ret = drm_gem_handle_create(filp, obj, &args->handle); if (ret) goto fail_handle; /* Drop reference from allocate - handle holds it now */ drm_gem_object_put(obj); return 0; fail_handle: xen_drm_front_dbuf_destroy(drm_info->front_info, xen_drm_front_dbuf_to_cookie(obj)); fail_backend: /* drop reference from allocate */ drm_gem_object_put(obj); fail: DRM_ERROR("Failed to create dumb buffer: %d\n", ret); return ret; } static void xen_drm_drv_release(struct drm_device *dev) { struct xen_drm_front_drm_info *drm_info = dev->dev_private; struct xen_drm_front_info *front_info = drm_info->front_info; xen_drm_front_kms_fini(drm_info); drm_atomic_helper_shutdown(dev); drm_mode_config_cleanup(dev); if (front_info->cfg.be_alloc) xenbus_switch_state(front_info->xb_dev, XenbusStateInitialising); kfree(drm_info); } DEFINE_DRM_GEM_FOPS(xen_drm_dev_fops); static const struct drm_driver xen_drm_driver = { .driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC, .release = xen_drm_drv_release, .gem_prime_import_sg_table = xen_drm_front_gem_import_sg_table, .dumb_create = xen_drm_drv_dumb_create, .fops = &xen_drm_dev_fops, .name = "xendrm-du", .desc = "Xen PV DRM Display Unit", .date = "20180221", .major = 1, .minor = 0, }; static int xen_drm_drv_init(struct xen_drm_front_info *front_info) { struct device *dev = &front_info->xb_dev->dev; struct xen_drm_front_drm_info *drm_info; struct drm_device *drm_dev; int ret; if (drm_firmware_drivers_only()) return -ENODEV; DRM_INFO("Creating %s\n", xen_drm_driver.desc); drm_info = kzalloc(sizeof(*drm_info), GFP_KERNEL); if (!drm_info) { ret = -ENOMEM; goto fail; } drm_info->front_info = front_info; front_info->drm_info = drm_info; drm_dev = drm_dev_alloc(&xen_drm_driver, dev); if (IS_ERR(drm_dev)) { ret = PTR_ERR(drm_dev); goto fail_dev; } drm_info->drm_dev = drm_dev; drm_dev->dev_private = drm_info; ret = xen_drm_front_kms_init(drm_info); if (ret) { DRM_ERROR("Failed to initialize DRM/KMS, ret %d\n", ret); goto fail_modeset; } ret = drm_dev_register(drm_dev, 0); if (ret) goto fail_register; DRM_INFO("Initialized %s %d.%d.%d %s on minor %d\n", xen_drm_driver.name, xen_drm_driver.major, xen_drm_driver.minor, xen_drm_driver.patchlevel, xen_drm_driver.date, drm_dev->primary->index); return 0; fail_register: drm_dev_unregister(drm_dev); fail_modeset: drm_kms_helper_poll_fini(drm_dev); drm_mode_config_cleanup(drm_dev); drm_dev_put(drm_dev); fail_dev: kfree(drm_info); front_info->drm_info = NULL; fail: return ret; } static void xen_drm_drv_fini(struct xen_drm_front_info *front_info) { struct xen_drm_front_drm_info *drm_info = front_info->drm_info; struct drm_device *dev; if (!drm_info) return; dev = drm_info->drm_dev; if (!dev) return; /* Nothing to do if device is already unplugged */ if (drm_dev_is_unplugged(dev)) return; drm_kms_helper_poll_fini(dev); drm_dev_unplug(dev); drm_dev_put(dev); front_info->drm_info = NULL; xen_drm_front_evtchnl_free_all(front_info); dbuf_free_all(&front_info->dbuf_list); /* * If we are not using backend allocated buffers, then tell the * backend we are ready to (re)initialize. Otherwise, wait for * drm_driver.release. */ if (!front_info->cfg.be_alloc) xenbus_switch_state(front_info->xb_dev, XenbusStateInitialising); } static int displback_initwait(struct xen_drm_front_info *front_info) { struct xen_drm_front_cfg *cfg = &front_info->cfg; int ret; cfg->front_info = front_info; ret = xen_drm_front_cfg_card(front_info, cfg); if (ret < 0) return ret; DRM_INFO("Have %d connector(s)\n", cfg->num_connectors); /* Create event channels for all connectors and publish */ ret = xen_drm_front_evtchnl_create_all(front_info); if (ret < 0) return ret; return xen_drm_front_evtchnl_publish_all(front_info); } static int displback_connect(struct xen_drm_front_info *front_info) { xen_drm_front_evtchnl_set_state(front_info, EVTCHNL_STATE_CONNECTED); return xen_drm_drv_init(front_info); } static void displback_disconnect(struct xen_drm_front_info *front_info) { if (!front_info->drm_info) return; /* Tell the backend to wait until we release the DRM driver. */ xenbus_switch_state(front_info->xb_dev, XenbusStateReconfiguring); xen_drm_drv_fini(front_info); } static void displback_changed(struct xenbus_device *xb_dev, enum xenbus_state backend_state) { struct xen_drm_front_info *front_info = dev_get_drvdata(&xb_dev->dev); int ret; DRM_DEBUG("Backend state is %s, front is %s\n", xenbus_strstate(backend_state), xenbus_strstate(xb_dev->state)); switch (backend_state) { case XenbusStateReconfiguring: case XenbusStateReconfigured: case XenbusStateInitialised: break; case XenbusStateInitialising: if (xb_dev->state == XenbusStateReconfiguring) break; /* recovering after backend unexpected closure */ displback_disconnect(front_info); break; case XenbusStateInitWait: if (xb_dev->state == XenbusStateReconfiguring) break; /* recovering after backend unexpected closure */ displback_disconnect(front_info); if (xb_dev->state != XenbusStateInitialising) break; ret = displback_initwait(front_info); if (ret < 0) xenbus_dev_fatal(xb_dev, ret, "initializing frontend"); else xenbus_switch_state(xb_dev, XenbusStateInitialised); break; case XenbusStateConnected: if (xb_dev->state != XenbusStateInitialised) break; ret = displback_connect(front_info); if (ret < 0) { displback_disconnect(front_info); xenbus_dev_fatal(xb_dev, ret, "connecting backend"); } else { xenbus_switch_state(xb_dev, XenbusStateConnected); } break; case XenbusStateClosing: /* * in this state backend starts freeing resources, * so let it go into closed state, so we can also * remove ours */ break; case XenbusStateUnknown: case XenbusStateClosed: if (xb_dev->state == XenbusStateClosed) break; displback_disconnect(front_info); break; } } static int xen_drv_probe(struct xenbus_device *xb_dev, const struct xenbus_device_id *id) { struct xen_drm_front_info *front_info; struct device *dev = &xb_dev->dev; int ret; ret = dma_coerce_mask_and_coherent(dev, DMA_BIT_MASK(64)); if (ret < 0) { DRM_ERROR("Cannot setup DMA mask, ret %d", ret); return ret; } front_info = devm_kzalloc(&xb_dev->dev, sizeof(*front_info), GFP_KERNEL); if (!front_info) return -ENOMEM; front_info->xb_dev = xb_dev; spin_lock_init(&front_info->io_lock); INIT_LIST_HEAD(&front_info->dbuf_list); dev_set_drvdata(&xb_dev->dev, front_info); return xenbus_switch_state(xb_dev, XenbusStateInitialising); } static void xen_drv_remove(struct xenbus_device *dev) { struct xen_drm_front_info *front_info = dev_get_drvdata(&dev->dev); int to = 100; xenbus_switch_state(dev, XenbusStateClosing); /* * On driver removal it is disconnected from XenBus, * so no backend state change events come via .otherend_changed * callback. This prevents us from exiting gracefully, e.g. * signaling the backend to free event channels, waiting for its * state to change to XenbusStateClosed and cleaning at our end. * Normally when front driver removed backend will finally go into * XenbusStateInitWait state. * * Workaround: read backend's state manually and wait with time-out. */ while ((xenbus_read_unsigned(front_info->xb_dev->otherend, "state", XenbusStateUnknown) != XenbusStateInitWait) && --to) msleep(10); if (!to) { unsigned int state; state = xenbus_read_unsigned(front_info->xb_dev->otherend, "state", XenbusStateUnknown); DRM_ERROR("Backend state is %s while removing driver\n", xenbus_strstate(state)); } xen_drm_drv_fini(front_info); xenbus_frontend_closed(dev); } static const struct xenbus_device_id xen_driver_ids[] = { { XENDISPL_DRIVER_NAME }, { "" } }; static struct xenbus_driver xen_driver = { .ids = xen_driver_ids, .probe = xen_drv_probe, .remove = xen_drv_remove, .otherend_changed = displback_changed, .not_essential = true, }; static int __init xen_drv_init(void) { /* At the moment we only support case with XEN_PAGE_SIZE == PAGE_SIZE */ if (XEN_PAGE_SIZE != PAGE_SIZE) { DRM_ERROR(XENDISPL_DRIVER_NAME ": different kernel and Xen page sizes are not supported: XEN_PAGE_SIZE (%lu) != PAGE_SIZE (%lu)\n", XEN_PAGE_SIZE, PAGE_SIZE); return -ENODEV; } if (!xen_domain()) return -ENODEV; if (!xen_has_pv_devices()) return -ENODEV; DRM_INFO("Registering XEN PV " XENDISPL_DRIVER_NAME "\n"); return xenbus_register_frontend(&xen_driver); } static void __exit xen_drv_fini(void) { DRM_INFO("Unregistering XEN PV " XENDISPL_DRIVER_NAME "\n"); xenbus_unregister_driver(&xen_driver); } module_init(xen_drv_init); module_exit(xen_drv_fini); MODULE_DESCRIPTION("Xen para-virtualized display device frontend"); MODULE_LICENSE("GPL"); MODULE_ALIAS("xen:" XENDISPL_DRIVER_NAME);
linux-master
drivers/gpu/drm/xen/xen_drm_front.c
// SPDX-License-Identifier: GPL-2.0 OR MIT /* * Xen para-virtual DRM device * * Copyright (C) 2016-2018 EPAM Systems Inc. * * Author: Oleksandr Andrushchenko <[email protected]> */ #include <drm/drm_atomic_helper.h> #include <drm/drm_drv.h> #include <drm/drm_fourcc.h> #include <drm/drm_probe_helper.h> #include <video/videomode.h> #include "xen_drm_front.h" #include "xen_drm_front_conn.h" #include "xen_drm_front_kms.h" static struct xen_drm_front_drm_pipeline * to_xen_drm_pipeline(struct drm_connector *connector) { return container_of(connector, struct xen_drm_front_drm_pipeline, conn); } static const u32 plane_formats[] = { DRM_FORMAT_RGB565, DRM_FORMAT_RGB888, DRM_FORMAT_XRGB8888, DRM_FORMAT_ARGB8888, DRM_FORMAT_XRGB4444, DRM_FORMAT_ARGB4444, DRM_FORMAT_XRGB1555, DRM_FORMAT_ARGB1555, DRM_FORMAT_YUYV, }; const u32 *xen_drm_front_conn_get_formats(int *format_count) { *format_count = ARRAY_SIZE(plane_formats); return plane_formats; } static int connector_detect(struct drm_connector *connector, struct drm_modeset_acquire_ctx *ctx, bool force) { struct xen_drm_front_drm_pipeline *pipeline = to_xen_drm_pipeline(connector); if (drm_dev_is_unplugged(connector->dev)) pipeline->conn_connected = false; return pipeline->conn_connected ? connector_status_connected : connector_status_disconnected; } #define XEN_DRM_CRTC_VREFRESH_HZ 60 static int connector_get_modes(struct drm_connector *connector) { struct xen_drm_front_drm_pipeline *pipeline = to_xen_drm_pipeline(connector); struct drm_display_mode *mode; struct videomode videomode; int width, height; mode = drm_mode_create(connector->dev); if (!mode) return 0; memset(&videomode, 0, sizeof(videomode)); videomode.hactive = pipeline->width; videomode.vactive = pipeline->height; width = videomode.hactive + videomode.hfront_porch + videomode.hback_porch + videomode.hsync_len; height = videomode.vactive + videomode.vfront_porch + videomode.vback_porch + videomode.vsync_len; videomode.pixelclock = width * height * XEN_DRM_CRTC_VREFRESH_HZ; mode->type = DRM_MODE_TYPE_PREFERRED | DRM_MODE_TYPE_DRIVER; drm_display_mode_from_videomode(&videomode, mode); drm_mode_probed_add(connector, mode); return 1; } static const struct drm_connector_helper_funcs connector_helper_funcs = { .get_modes = connector_get_modes, .detect_ctx = connector_detect, }; static const struct drm_connector_funcs connector_funcs = { .fill_modes = drm_helper_probe_single_connector_modes, .destroy = drm_connector_cleanup, .reset = drm_atomic_helper_connector_reset, .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state, .atomic_destroy_state = drm_atomic_helper_connector_destroy_state, }; int xen_drm_front_conn_init(struct xen_drm_front_drm_info *drm_info, struct drm_connector *connector) { struct xen_drm_front_drm_pipeline *pipeline = to_xen_drm_pipeline(connector); drm_connector_helper_add(connector, &connector_helper_funcs); pipeline->conn_connected = true; connector->polled = DRM_CONNECTOR_POLL_CONNECT | DRM_CONNECTOR_POLL_DISCONNECT; return drm_connector_init(drm_info->drm_dev, connector, &connector_funcs, DRM_MODE_CONNECTOR_VIRTUAL); }
linux-master
drivers/gpu/drm/xen/xen_drm_front_conn.c
// SPDX-License-Identifier: GPL-2.0 OR MIT /* * Xen para-virtual DRM device * * Copyright (C) 2016-2018 EPAM Systems Inc. * * Author: Oleksandr Andrushchenko <[email protected]> */ #include <drm/drm_atomic.h> #include <drm/drm_atomic_helper.h> #include <drm/drm_drv.h> #include <drm/drm_fourcc.h> #include <drm/drm_framebuffer.h> #include <drm/drm_gem.h> #include <drm/drm_gem_atomic_helper.h> #include <drm/drm_gem_framebuffer_helper.h> #include <drm/drm_probe_helper.h> #include <drm/drm_vblank.h> #include "xen_drm_front.h" #include "xen_drm_front_conn.h" #include "xen_drm_front_kms.h" /* * Timeout in ms to wait for frame done event from the backend: * must be a bit more than IO time-out */ #define FRAME_DONE_TO_MS (XEN_DRM_FRONT_WAIT_BACK_MS + 100) static struct xen_drm_front_drm_pipeline * to_xen_drm_pipeline(struct drm_simple_display_pipe *pipe) { return container_of(pipe, struct xen_drm_front_drm_pipeline, pipe); } static void fb_destroy(struct drm_framebuffer *fb) { struct xen_drm_front_drm_info *drm_info = fb->dev->dev_private; int idx; if (drm_dev_enter(fb->dev, &idx)) { xen_drm_front_fb_detach(drm_info->front_info, xen_drm_front_fb_to_cookie(fb)); drm_dev_exit(idx); } drm_gem_fb_destroy(fb); } static const struct drm_framebuffer_funcs fb_funcs = { .destroy = fb_destroy, }; static struct drm_framebuffer * fb_create(struct drm_device *dev, struct drm_file *filp, const struct drm_mode_fb_cmd2 *mode_cmd) { struct xen_drm_front_drm_info *drm_info = dev->dev_private; struct drm_framebuffer *fb; struct drm_gem_object *gem_obj; int ret; fb = drm_gem_fb_create_with_funcs(dev, filp, mode_cmd, &fb_funcs); if (IS_ERR(fb)) return fb; gem_obj = fb->obj[0]; ret = xen_drm_front_fb_attach(drm_info->front_info, xen_drm_front_dbuf_to_cookie(gem_obj), xen_drm_front_fb_to_cookie(fb), fb->width, fb->height, fb->format->format); if (ret < 0) { DRM_ERROR("Back failed to attach FB %p: %d\n", fb, ret); goto fail; } return fb; fail: drm_gem_fb_destroy(fb); return ERR_PTR(ret); } static const struct drm_mode_config_funcs mode_config_funcs = { .fb_create = fb_create, .atomic_check = drm_atomic_helper_check, .atomic_commit = drm_atomic_helper_commit, }; static void send_pending_event(struct xen_drm_front_drm_pipeline *pipeline) { struct drm_crtc *crtc = &pipeline->pipe.crtc; struct drm_device *dev = crtc->dev; unsigned long flags; spin_lock_irqsave(&dev->event_lock, flags); if (pipeline->pending_event) drm_crtc_send_vblank_event(crtc, pipeline->pending_event); pipeline->pending_event = NULL; spin_unlock_irqrestore(&dev->event_lock, flags); } static void display_enable(struct drm_simple_display_pipe *pipe, struct drm_crtc_state *crtc_state, struct drm_plane_state *plane_state) { struct xen_drm_front_drm_pipeline *pipeline = to_xen_drm_pipeline(pipe); struct drm_crtc *crtc = &pipe->crtc; struct drm_framebuffer *fb = plane_state->fb; int ret, idx; if (!drm_dev_enter(pipe->crtc.dev, &idx)) return; ret = xen_drm_front_mode_set(pipeline, crtc->x, crtc->y, fb->width, fb->height, fb->format->cpp[0] * 8, xen_drm_front_fb_to_cookie(fb)); if (ret) { DRM_ERROR("Failed to enable display: %d\n", ret); pipeline->conn_connected = false; } drm_dev_exit(idx); } static void display_disable(struct drm_simple_display_pipe *pipe) { struct xen_drm_front_drm_pipeline *pipeline = to_xen_drm_pipeline(pipe); int ret = 0, idx; if (drm_dev_enter(pipe->crtc.dev, &idx)) { ret = xen_drm_front_mode_set(pipeline, 0, 0, 0, 0, 0, xen_drm_front_fb_to_cookie(NULL)); drm_dev_exit(idx); } if (ret) DRM_ERROR("Failed to disable display: %d\n", ret); /* Make sure we can restart with enabled connector next time */ pipeline->conn_connected = true; /* release stalled event if any */ send_pending_event(pipeline); } void xen_drm_front_kms_on_frame_done(struct xen_drm_front_drm_pipeline *pipeline, u64 fb_cookie) { /* * This runs in interrupt context, e.g. under * drm_info->front_info->io_lock, so we cannot call _sync version * to cancel the work */ cancel_delayed_work(&pipeline->pflip_to_worker); send_pending_event(pipeline); } static void pflip_to_worker(struct work_struct *work) { struct delayed_work *delayed_work = to_delayed_work(work); struct xen_drm_front_drm_pipeline *pipeline = container_of(delayed_work, struct xen_drm_front_drm_pipeline, pflip_to_worker); DRM_ERROR("Frame done timed-out, releasing"); send_pending_event(pipeline); } static bool display_send_page_flip(struct drm_simple_display_pipe *pipe, struct drm_plane_state *old_plane_state) { struct drm_plane_state *plane_state = drm_atomic_get_new_plane_state(old_plane_state->state, &pipe->plane); /* * If old_plane_state->fb is NULL and plane_state->fb is not, * then this is an atomic commit which will enable display. * If old_plane_state->fb is not NULL and plane_state->fb is, * then this is an atomic commit which will disable display. * Ignore these and do not send page flip as this framebuffer will be * sent to the backend as a part of display_set_config call. */ if (old_plane_state->fb && plane_state->fb) { struct xen_drm_front_drm_pipeline *pipeline = to_xen_drm_pipeline(pipe); struct xen_drm_front_drm_info *drm_info = pipeline->drm_info; int ret; schedule_delayed_work(&pipeline->pflip_to_worker, msecs_to_jiffies(FRAME_DONE_TO_MS)); ret = xen_drm_front_page_flip(drm_info->front_info, pipeline->index, xen_drm_front_fb_to_cookie(plane_state->fb)); if (ret) { DRM_ERROR("Failed to send page flip request to backend: %d\n", ret); pipeline->conn_connected = false; /* * Report the flip not handled, so pending event is * sent, unblocking user-space. */ return false; } /* * Signal that page flip was handled, pending event will be sent * on frame done event from the backend. */ return true; } return false; } static int display_check(struct drm_simple_display_pipe *pipe, struct drm_plane_state *plane_state, struct drm_crtc_state *crtc_state) { /* * Xen doesn't initialize vblanking via drm_vblank_init(), so * DRM helpers assume that it doesn't handle vblanking and start * sending out fake VBLANK events automatically. * * As xen contains it's own logic for sending out VBLANK events * in send_pending_event(), disable no_vblank (i.e., the xen * driver has vblanking support). */ crtc_state->no_vblank = false; return 0; } static void display_update(struct drm_simple_display_pipe *pipe, struct drm_plane_state *old_plane_state) { struct xen_drm_front_drm_pipeline *pipeline = to_xen_drm_pipeline(pipe); struct drm_crtc *crtc = &pipe->crtc; struct drm_pending_vblank_event *event; int idx; event = crtc->state->event; if (event) { struct drm_device *dev = crtc->dev; unsigned long flags; WARN_ON(pipeline->pending_event); spin_lock_irqsave(&dev->event_lock, flags); crtc->state->event = NULL; pipeline->pending_event = event; spin_unlock_irqrestore(&dev->event_lock, flags); } if (!drm_dev_enter(pipe->crtc.dev, &idx)) { send_pending_event(pipeline); return; } /* * Send page flip request to the backend *after* we have event cached * above, so on page flip done event from the backend we can * deliver it and there is no race condition between this code and * event from the backend. * If this is not a page flip, e.g. no flip done event from the backend * is expected, then send now. */ if (!display_send_page_flip(pipe, old_plane_state)) send_pending_event(pipeline); drm_dev_exit(idx); } static enum drm_mode_status display_mode_valid(struct drm_simple_display_pipe *pipe, const struct drm_display_mode *mode) { struct xen_drm_front_drm_pipeline *pipeline = container_of(pipe, struct xen_drm_front_drm_pipeline, pipe); if (mode->hdisplay != pipeline->width) return MODE_ERROR; if (mode->vdisplay != pipeline->height) return MODE_ERROR; return MODE_OK; } static const struct drm_simple_display_pipe_funcs display_funcs = { .mode_valid = display_mode_valid, .enable = display_enable, .disable = display_disable, .check = display_check, .update = display_update, }; static int display_pipe_init(struct xen_drm_front_drm_info *drm_info, int index, struct xen_drm_front_cfg_connector *cfg, struct xen_drm_front_drm_pipeline *pipeline) { struct drm_device *dev = drm_info->drm_dev; const u32 *formats; int format_count; int ret; pipeline->drm_info = drm_info; pipeline->index = index; pipeline->height = cfg->height; pipeline->width = cfg->width; INIT_DELAYED_WORK(&pipeline->pflip_to_worker, pflip_to_worker); ret = xen_drm_front_conn_init(drm_info, &pipeline->conn); if (ret) return ret; formats = xen_drm_front_conn_get_formats(&format_count); return drm_simple_display_pipe_init(dev, &pipeline->pipe, &display_funcs, formats, format_count, NULL, &pipeline->conn); } int xen_drm_front_kms_init(struct xen_drm_front_drm_info *drm_info) { struct drm_device *dev = drm_info->drm_dev; int i, ret; drm_mode_config_init(dev); dev->mode_config.min_width = 0; dev->mode_config.min_height = 0; dev->mode_config.max_width = 4095; dev->mode_config.max_height = 2047; dev->mode_config.funcs = &mode_config_funcs; for (i = 0; i < drm_info->front_info->cfg.num_connectors; i++) { struct xen_drm_front_cfg_connector *cfg = &drm_info->front_info->cfg.connectors[i]; struct xen_drm_front_drm_pipeline *pipeline = &drm_info->pipeline[i]; ret = display_pipe_init(drm_info, i, cfg, pipeline); if (ret) { drm_mode_config_cleanup(dev); return ret; } } drm_mode_config_reset(dev); drm_kms_helper_poll_init(dev); return 0; } void xen_drm_front_kms_fini(struct xen_drm_front_drm_info *drm_info) { int i; for (i = 0; i < drm_info->front_info->cfg.num_connectors; i++) { struct xen_drm_front_drm_pipeline *pipeline = &drm_info->pipeline[i]; cancel_delayed_work_sync(&pipeline->pflip_to_worker); send_pending_event(pipeline); } }
linux-master
drivers/gpu/drm/xen/xen_drm_front_kms.c
// SPDX-License-Identifier: GPL-2.0 OR MIT /* * Xen para-virtual DRM device * * Copyright (C) 2016-2018 EPAM Systems Inc. * * Author: Oleksandr Andrushchenko <[email protected]> */ #include <linux/device.h> #include <drm/drm_print.h> #include <xen/interface/io/displif.h> #include <xen/xenbus.h> #include "xen_drm_front.h" #include "xen_drm_front_cfg.h" static int cfg_connector(struct xen_drm_front_info *front_info, struct xen_drm_front_cfg_connector *connector, const char *path, int index) { char *connector_path; connector_path = devm_kasprintf(&front_info->xb_dev->dev, GFP_KERNEL, "%s/%d", path, index); if (!connector_path) return -ENOMEM; if (xenbus_scanf(XBT_NIL, connector_path, XENDISPL_FIELD_RESOLUTION, "%d" XENDISPL_RESOLUTION_SEPARATOR "%d", &connector->width, &connector->height) < 0) { /* either no entry configured or wrong resolution set */ connector->width = 0; connector->height = 0; return -EINVAL; } connector->xenstore_path = connector_path; DRM_INFO("Connector %s: resolution %dx%d\n", connector_path, connector->width, connector->height); return 0; } int xen_drm_front_cfg_card(struct xen_drm_front_info *front_info, struct xen_drm_front_cfg *cfg) { struct xenbus_device *xb_dev = front_info->xb_dev; int ret, i; if (xenbus_read_unsigned(front_info->xb_dev->nodename, XENDISPL_FIELD_BE_ALLOC, 0)) { DRM_INFO("Backend can provide display buffers\n"); cfg->be_alloc = true; } cfg->num_connectors = 0; for (i = 0; i < ARRAY_SIZE(cfg->connectors); i++) { ret = cfg_connector(front_info, &cfg->connectors[i], xb_dev->nodename, i); if (ret < 0) break; cfg->num_connectors++; } if (!cfg->num_connectors) { DRM_ERROR("No connector(s) configured at %s\n", xb_dev->nodename); return -ENODEV; } return 0; }
linux-master
drivers/gpu/drm/xen/xen_drm_front_cfg.c
// SPDX-License-Identifier: GPL-2.0-only /* * Copyright (C) 2014 Traphandler * Copyright (C) 2014 Free Electrons * Copyright (C) 2014 Atmel * * Author: Jean-Jacques Hiblot <[email protected]> * Author: Boris BREZILLON <[email protected]> */ #include <linux/clk.h> #include <linux/irq.h> #include <linux/irqchip.h> #include <linux/mfd/atmel-hlcdc.h> #include <linux/module.h> #include <linux/pm_runtime.h> #include <linux/platform_device.h> #include <drm/drm_atomic.h> #include <drm/drm_atomic_helper.h> #include <drm/drm_drv.h> #include <drm/drm_fbdev_dma.h> #include <drm/drm_gem_dma_helper.h> #include <drm/drm_gem_framebuffer_helper.h> #include <drm/drm_module.h> #include <drm/drm_probe_helper.h> #include <drm/drm_vblank.h> #include "atmel_hlcdc_dc.h" #define ATMEL_HLCDC_LAYER_IRQS_OFFSET 8 static const struct atmel_hlcdc_layer_desc atmel_hlcdc_at91sam9n12_layers[] = { { .name = "base", .formats = &atmel_hlcdc_plane_rgb_formats, .regs_offset = 0x40, .id = 0, .type = ATMEL_HLCDC_BASE_LAYER, .cfgs_offset = 0x2c, .layout = { .xstride = { 2 }, .default_color = 3, .general_config = 4, }, .clut_offset = 0x400, }, }; static const struct atmel_hlcdc_dc_desc atmel_hlcdc_dc_at91sam9n12 = { .min_width = 0, .min_height = 0, .max_width = 1280, .max_height = 860, .max_spw = 0x3f, .max_vpw = 0x3f, .max_hpw = 0xff, .conflicting_output_formats = true, .nlayers = ARRAY_SIZE(atmel_hlcdc_at91sam9n12_layers), .layers = atmel_hlcdc_at91sam9n12_layers, }; static const struct atmel_hlcdc_layer_desc atmel_hlcdc_at91sam9x5_layers[] = { { .name = "base", .formats = &atmel_hlcdc_plane_rgb_formats, .regs_offset = 0x40, .id = 0, .type = ATMEL_HLCDC_BASE_LAYER, .cfgs_offset = 0x2c, .layout = { .xstride = { 2 }, .default_color = 3, .general_config = 4, .disc_pos = 5, .disc_size = 6, }, .clut_offset = 0x400, }, { .name = "overlay1", .formats = &atmel_hlcdc_plane_rgb_formats, .regs_offset = 0x100, .id = 1, .type = ATMEL_HLCDC_OVERLAY_LAYER, .cfgs_offset = 0x2c, .layout = { .pos = 2, .size = 3, .xstride = { 4 }, .pstride = { 5 }, .default_color = 6, .chroma_key = 7, .chroma_key_mask = 8, .general_config = 9, }, .clut_offset = 0x800, }, { .name = "high-end-overlay", .formats = &atmel_hlcdc_plane_rgb_and_yuv_formats, .regs_offset = 0x280, .id = 2, .type = ATMEL_HLCDC_OVERLAY_LAYER, .cfgs_offset = 0x4c, .layout = { .pos = 2, .size = 3, .memsize = 4, .xstride = { 5, 7 }, .pstride = { 6, 8 }, .default_color = 9, .chroma_key = 10, .chroma_key_mask = 11, .general_config = 12, .scaler_config = 13, .csc = 14, }, .clut_offset = 0x1000, }, { .name = "cursor", .formats = &atmel_hlcdc_plane_rgb_formats, .regs_offset = 0x340, .id = 3, .type = ATMEL_HLCDC_CURSOR_LAYER, .max_width = 128, .max_height = 128, .cfgs_offset = 0x2c, .layout = { .pos = 2, .size = 3, .xstride = { 4 }, .default_color = 6, .chroma_key = 7, .chroma_key_mask = 8, .general_config = 9, }, .clut_offset = 0x1400, }, }; static const struct atmel_hlcdc_dc_desc atmel_hlcdc_dc_at91sam9x5 = { .min_width = 0, .min_height = 0, .max_width = 800, .max_height = 600, .max_spw = 0x3f, .max_vpw = 0x3f, .max_hpw = 0xff, .conflicting_output_formats = true, .nlayers = ARRAY_SIZE(atmel_hlcdc_at91sam9x5_layers), .layers = atmel_hlcdc_at91sam9x5_layers, }; static const struct atmel_hlcdc_layer_desc atmel_hlcdc_sama5d3_layers[] = { { .name = "base", .formats = &atmel_hlcdc_plane_rgb_formats, .regs_offset = 0x40, .id = 0, .type = ATMEL_HLCDC_BASE_LAYER, .cfgs_offset = 0x2c, .layout = { .xstride = { 2 }, .default_color = 3, .general_config = 4, .disc_pos = 5, .disc_size = 6, }, .clut_offset = 0x600, }, { .name = "overlay1", .formats = &atmel_hlcdc_plane_rgb_formats, .regs_offset = 0x140, .id = 1, .type = ATMEL_HLCDC_OVERLAY_LAYER, .cfgs_offset = 0x2c, .layout = { .pos = 2, .size = 3, .xstride = { 4 }, .pstride = { 5 }, .default_color = 6, .chroma_key = 7, .chroma_key_mask = 8, .general_config = 9, }, .clut_offset = 0xa00, }, { .name = "overlay2", .formats = &atmel_hlcdc_plane_rgb_formats, .regs_offset = 0x240, .id = 2, .type = ATMEL_HLCDC_OVERLAY_LAYER, .cfgs_offset = 0x2c, .layout = { .pos = 2, .size = 3, .xstride = { 4 }, .pstride = { 5 }, .default_color = 6, .chroma_key = 7, .chroma_key_mask = 8, .general_config = 9, }, .clut_offset = 0xe00, }, { .name = "high-end-overlay", .formats = &atmel_hlcdc_plane_rgb_and_yuv_formats, .regs_offset = 0x340, .id = 3, .type = ATMEL_HLCDC_OVERLAY_LAYER, .cfgs_offset = 0x4c, .layout = { .pos = 2, .size = 3, .memsize = 4, .xstride = { 5, 7 }, .pstride = { 6, 8 }, .default_color = 9, .chroma_key = 10, .chroma_key_mask = 11, .general_config = 12, .scaler_config = 13, .phicoeffs = { .x = 17, .y = 33, }, .csc = 14, }, .clut_offset = 0x1200, }, { .name = "cursor", .formats = &atmel_hlcdc_plane_rgb_formats, .regs_offset = 0x440, .id = 4, .type = ATMEL_HLCDC_CURSOR_LAYER, .max_width = 128, .max_height = 128, .cfgs_offset = 0x2c, .layout = { .pos = 2, .size = 3, .xstride = { 4 }, .pstride = { 5 }, .default_color = 6, .chroma_key = 7, .chroma_key_mask = 8, .general_config = 9, .scaler_config = 13, }, .clut_offset = 0x1600, }, }; static const struct atmel_hlcdc_dc_desc atmel_hlcdc_dc_sama5d3 = { .min_width = 0, .min_height = 0, .max_width = 2048, .max_height = 2048, .max_spw = 0x3f, .max_vpw = 0x3f, .max_hpw = 0x1ff, .conflicting_output_formats = true, .nlayers = ARRAY_SIZE(atmel_hlcdc_sama5d3_layers), .layers = atmel_hlcdc_sama5d3_layers, }; static const struct atmel_hlcdc_layer_desc atmel_hlcdc_sama5d4_layers[] = { { .name = "base", .formats = &atmel_hlcdc_plane_rgb_formats, .regs_offset = 0x40, .id = 0, .type = ATMEL_HLCDC_BASE_LAYER, .cfgs_offset = 0x2c, .layout = { .xstride = { 2 }, .default_color = 3, .general_config = 4, .disc_pos = 5, .disc_size = 6, }, .clut_offset = 0x600, }, { .name = "overlay1", .formats = &atmel_hlcdc_plane_rgb_formats, .regs_offset = 0x140, .id = 1, .type = ATMEL_HLCDC_OVERLAY_LAYER, .cfgs_offset = 0x2c, .layout = { .pos = 2, .size = 3, .xstride = { 4 }, .pstride = { 5 }, .default_color = 6, .chroma_key = 7, .chroma_key_mask = 8, .general_config = 9, }, .clut_offset = 0xa00, }, { .name = "overlay2", .formats = &atmel_hlcdc_plane_rgb_formats, .regs_offset = 0x240, .id = 2, .type = ATMEL_HLCDC_OVERLAY_LAYER, .cfgs_offset = 0x2c, .layout = { .pos = 2, .size = 3, .xstride = { 4 }, .pstride = { 5 }, .default_color = 6, .chroma_key = 7, .chroma_key_mask = 8, .general_config = 9, }, .clut_offset = 0xe00, }, { .name = "high-end-overlay", .formats = &atmel_hlcdc_plane_rgb_and_yuv_formats, .regs_offset = 0x340, .id = 3, .type = ATMEL_HLCDC_OVERLAY_LAYER, .cfgs_offset = 0x4c, .layout = { .pos = 2, .size = 3, .memsize = 4, .xstride = { 5, 7 }, .pstride = { 6, 8 }, .default_color = 9, .chroma_key = 10, .chroma_key_mask = 11, .general_config = 12, .scaler_config = 13, .phicoeffs = { .x = 17, .y = 33, }, .csc = 14, }, .clut_offset = 0x1200, }, }; static const struct atmel_hlcdc_dc_desc atmel_hlcdc_dc_sama5d4 = { .min_width = 0, .min_height = 0, .max_width = 2048, .max_height = 2048, .max_spw = 0xff, .max_vpw = 0xff, .max_hpw = 0x3ff, .nlayers = ARRAY_SIZE(atmel_hlcdc_sama5d4_layers), .layers = atmel_hlcdc_sama5d4_layers, }; static const struct atmel_hlcdc_layer_desc atmel_hlcdc_sam9x60_layers[] = { { .name = "base", .formats = &atmel_hlcdc_plane_rgb_formats, .regs_offset = 0x60, .id = 0, .type = ATMEL_HLCDC_BASE_LAYER, .cfgs_offset = 0x2c, .layout = { .xstride = { 2 }, .default_color = 3, .general_config = 4, .disc_pos = 5, .disc_size = 6, }, .clut_offset = 0x600, }, { .name = "overlay1", .formats = &atmel_hlcdc_plane_rgb_formats, .regs_offset = 0x160, .id = 1, .type = ATMEL_HLCDC_OVERLAY_LAYER, .cfgs_offset = 0x2c, .layout = { .pos = 2, .size = 3, .xstride = { 4 }, .pstride = { 5 }, .default_color = 6, .chroma_key = 7, .chroma_key_mask = 8, .general_config = 9, }, .clut_offset = 0xa00, }, { .name = "overlay2", .formats = &atmel_hlcdc_plane_rgb_formats, .regs_offset = 0x260, .id = 2, .type = ATMEL_HLCDC_OVERLAY_LAYER, .cfgs_offset = 0x2c, .layout = { .pos = 2, .size = 3, .xstride = { 4 }, .pstride = { 5 }, .default_color = 6, .chroma_key = 7, .chroma_key_mask = 8, .general_config = 9, }, .clut_offset = 0xe00, }, { .name = "high-end-overlay", .formats = &atmel_hlcdc_plane_rgb_and_yuv_formats, .regs_offset = 0x360, .id = 3, .type = ATMEL_HLCDC_OVERLAY_LAYER, .cfgs_offset = 0x4c, .layout = { .pos = 2, .size = 3, .memsize = 4, .xstride = { 5, 7 }, .pstride = { 6, 8 }, .default_color = 9, .chroma_key = 10, .chroma_key_mask = 11, .general_config = 12, .scaler_config = 13, .phicoeffs = { .x = 17, .y = 33, }, .csc = 14, }, .clut_offset = 0x1200, }, }; static const struct atmel_hlcdc_dc_desc atmel_hlcdc_dc_sam9x60 = { .min_width = 0, .min_height = 0, .max_width = 2048, .max_height = 2048, .max_spw = 0xff, .max_vpw = 0xff, .max_hpw = 0x3ff, .fixed_clksrc = true, .nlayers = ARRAY_SIZE(atmel_hlcdc_sam9x60_layers), .layers = atmel_hlcdc_sam9x60_layers, }; static const struct of_device_id atmel_hlcdc_of_match[] = { { .compatible = "atmel,at91sam9n12-hlcdc", .data = &atmel_hlcdc_dc_at91sam9n12, }, { .compatible = "atmel,at91sam9x5-hlcdc", .data = &atmel_hlcdc_dc_at91sam9x5, }, { .compatible = "atmel,sama5d2-hlcdc", .data = &atmel_hlcdc_dc_sama5d4, }, { .compatible = "atmel,sama5d3-hlcdc", .data = &atmel_hlcdc_dc_sama5d3, }, { .compatible = "atmel,sama5d4-hlcdc", .data = &atmel_hlcdc_dc_sama5d4, }, { .compatible = "microchip,sam9x60-hlcdc", .data = &atmel_hlcdc_dc_sam9x60, }, { /* sentinel */ }, }; MODULE_DEVICE_TABLE(of, atmel_hlcdc_of_match); enum drm_mode_status atmel_hlcdc_dc_mode_valid(struct atmel_hlcdc_dc *dc, const struct drm_display_mode *mode) { int vfront_porch = mode->vsync_start - mode->vdisplay; int vback_porch = mode->vtotal - mode->vsync_end; int vsync_len = mode->vsync_end - mode->vsync_start; int hfront_porch = mode->hsync_start - mode->hdisplay; int hback_porch = mode->htotal - mode->hsync_end; int hsync_len = mode->hsync_end - mode->hsync_start; if (hsync_len > dc->desc->max_spw + 1 || hsync_len < 1) return MODE_HSYNC; if (vsync_len > dc->desc->max_spw + 1 || vsync_len < 1) return MODE_VSYNC; if (hfront_porch > dc->desc->max_hpw + 1 || hfront_porch < 1 || hback_porch > dc->desc->max_hpw + 1 || hback_porch < 1 || mode->hdisplay < 1) return MODE_H_ILLEGAL; if (vfront_porch > dc->desc->max_vpw + 1 || vfront_porch < 1 || vback_porch > dc->desc->max_vpw || vback_porch < 0 || mode->vdisplay < 1) return MODE_V_ILLEGAL; return MODE_OK; } static void atmel_hlcdc_layer_irq(struct atmel_hlcdc_layer *layer) { if (!layer) return; if (layer->desc->type == ATMEL_HLCDC_BASE_LAYER || layer->desc->type == ATMEL_HLCDC_OVERLAY_LAYER || layer->desc->type == ATMEL_HLCDC_CURSOR_LAYER) atmel_hlcdc_plane_irq(atmel_hlcdc_layer_to_plane(layer)); } static irqreturn_t atmel_hlcdc_dc_irq_handler(int irq, void *data) { struct drm_device *dev = data; struct atmel_hlcdc_dc *dc = dev->dev_private; unsigned long status; unsigned int imr, isr; int i; regmap_read(dc->hlcdc->regmap, ATMEL_HLCDC_IMR, &imr); regmap_read(dc->hlcdc->regmap, ATMEL_HLCDC_ISR, &isr); status = imr & isr; if (!status) return IRQ_NONE; if (status & ATMEL_HLCDC_SOF) atmel_hlcdc_crtc_irq(dc->crtc); for (i = 0; i < ATMEL_HLCDC_MAX_LAYERS; i++) { if (ATMEL_HLCDC_LAYER_STATUS(i) & status) atmel_hlcdc_layer_irq(dc->layers[i]); } return IRQ_HANDLED; } static void atmel_hlcdc_dc_irq_postinstall(struct drm_device *dev) { struct atmel_hlcdc_dc *dc = dev->dev_private; unsigned int cfg = 0; int i; /* Enable interrupts on activated layers */ for (i = 0; i < ATMEL_HLCDC_MAX_LAYERS; i++) { if (dc->layers[i]) cfg |= ATMEL_HLCDC_LAYER_STATUS(i); } regmap_write(dc->hlcdc->regmap, ATMEL_HLCDC_IER, cfg); } static void atmel_hlcdc_dc_irq_disable(struct drm_device *dev) { struct atmel_hlcdc_dc *dc = dev->dev_private; unsigned int isr; regmap_write(dc->hlcdc->regmap, ATMEL_HLCDC_IDR, 0xffffffff); regmap_read(dc->hlcdc->regmap, ATMEL_HLCDC_ISR, &isr); } static int atmel_hlcdc_dc_irq_install(struct drm_device *dev, unsigned int irq) { int ret; atmel_hlcdc_dc_irq_disable(dev); ret = devm_request_irq(dev->dev, irq, atmel_hlcdc_dc_irq_handler, 0, dev->driver->name, dev); if (ret) return ret; atmel_hlcdc_dc_irq_postinstall(dev); return 0; } static void atmel_hlcdc_dc_irq_uninstall(struct drm_device *dev) { atmel_hlcdc_dc_irq_disable(dev); } static const struct drm_mode_config_funcs mode_config_funcs = { .fb_create = drm_gem_fb_create, .atomic_check = drm_atomic_helper_check, .atomic_commit = drm_atomic_helper_commit, }; static int atmel_hlcdc_dc_modeset_init(struct drm_device *dev) { struct atmel_hlcdc_dc *dc = dev->dev_private; int ret; drm_mode_config_init(dev); ret = atmel_hlcdc_create_outputs(dev); if (ret) { dev_err(dev->dev, "failed to create HLCDC outputs: %d\n", ret); return ret; } ret = atmel_hlcdc_create_planes(dev); if (ret) { dev_err(dev->dev, "failed to create planes: %d\n", ret); return ret; } ret = atmel_hlcdc_crtc_create(dev); if (ret) { dev_err(dev->dev, "failed to create crtc\n"); return ret; } dev->mode_config.min_width = dc->desc->min_width; dev->mode_config.min_height = dc->desc->min_height; dev->mode_config.max_width = dc->desc->max_width; dev->mode_config.max_height = dc->desc->max_height; dev->mode_config.funcs = &mode_config_funcs; dev->mode_config.async_page_flip = true; return 0; } static int atmel_hlcdc_dc_load(struct drm_device *dev) { struct platform_device *pdev = to_platform_device(dev->dev); const struct of_device_id *match; struct atmel_hlcdc_dc *dc; int ret; match = of_match_node(atmel_hlcdc_of_match, dev->dev->parent->of_node); if (!match) { dev_err(&pdev->dev, "invalid compatible string\n"); return -ENODEV; } if (!match->data) { dev_err(&pdev->dev, "invalid hlcdc description\n"); return -EINVAL; } dc = devm_kzalloc(dev->dev, sizeof(*dc), GFP_KERNEL); if (!dc) return -ENOMEM; dc->desc = match->data; dc->hlcdc = dev_get_drvdata(dev->dev->parent); dev->dev_private = dc; ret = clk_prepare_enable(dc->hlcdc->periph_clk); if (ret) { dev_err(dev->dev, "failed to enable periph_clk\n"); return ret; } pm_runtime_enable(dev->dev); ret = drm_vblank_init(dev, 1); if (ret < 0) { dev_err(dev->dev, "failed to initialize vblank\n"); goto err_periph_clk_disable; } ret = atmel_hlcdc_dc_modeset_init(dev); if (ret < 0) { dev_err(dev->dev, "failed to initialize mode setting\n"); goto err_periph_clk_disable; } drm_mode_config_reset(dev); pm_runtime_get_sync(dev->dev); ret = atmel_hlcdc_dc_irq_install(dev, dc->hlcdc->irq); pm_runtime_put_sync(dev->dev); if (ret < 0) { dev_err(dev->dev, "failed to install IRQ handler\n"); goto err_periph_clk_disable; } platform_set_drvdata(pdev, dev); drm_kms_helper_poll_init(dev); return 0; err_periph_clk_disable: pm_runtime_disable(dev->dev); clk_disable_unprepare(dc->hlcdc->periph_clk); return ret; } static void atmel_hlcdc_dc_unload(struct drm_device *dev) { struct atmel_hlcdc_dc *dc = dev->dev_private; drm_kms_helper_poll_fini(dev); drm_atomic_helper_shutdown(dev); drm_mode_config_cleanup(dev); pm_runtime_get_sync(dev->dev); atmel_hlcdc_dc_irq_uninstall(dev); pm_runtime_put_sync(dev->dev); dev->dev_private = NULL; pm_runtime_disable(dev->dev); clk_disable_unprepare(dc->hlcdc->periph_clk); } DEFINE_DRM_GEM_DMA_FOPS(fops); static const struct drm_driver atmel_hlcdc_dc_driver = { .driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC, DRM_GEM_DMA_DRIVER_OPS, .fops = &fops, .name = "atmel-hlcdc", .desc = "Atmel HLCD Controller DRM", .date = "20141504", .major = 1, .minor = 0, }; static int atmel_hlcdc_dc_drm_probe(struct platform_device *pdev) { struct drm_device *ddev; int ret; ddev = drm_dev_alloc(&atmel_hlcdc_dc_driver, &pdev->dev); if (IS_ERR(ddev)) return PTR_ERR(ddev); ret = atmel_hlcdc_dc_load(ddev); if (ret) goto err_put; ret = drm_dev_register(ddev, 0); if (ret) goto err_unload; drm_fbdev_dma_setup(ddev, 24); return 0; err_unload: atmel_hlcdc_dc_unload(ddev); err_put: drm_dev_put(ddev); return ret; } static void atmel_hlcdc_dc_drm_remove(struct platform_device *pdev) { struct drm_device *ddev = platform_get_drvdata(pdev); drm_dev_unregister(ddev); atmel_hlcdc_dc_unload(ddev); drm_dev_put(ddev); } static int atmel_hlcdc_dc_drm_suspend(struct device *dev) { struct drm_device *drm_dev = dev_get_drvdata(dev); struct atmel_hlcdc_dc *dc = drm_dev->dev_private; struct regmap *regmap = dc->hlcdc->regmap; struct drm_atomic_state *state; state = drm_atomic_helper_suspend(drm_dev); if (IS_ERR(state)) return PTR_ERR(state); dc->suspend.state = state; regmap_read(regmap, ATMEL_HLCDC_IMR, &dc->suspend.imr); regmap_write(regmap, ATMEL_HLCDC_IDR, dc->suspend.imr); clk_disable_unprepare(dc->hlcdc->periph_clk); return 0; } static int atmel_hlcdc_dc_drm_resume(struct device *dev) { struct drm_device *drm_dev = dev_get_drvdata(dev); struct atmel_hlcdc_dc *dc = drm_dev->dev_private; clk_prepare_enable(dc->hlcdc->periph_clk); regmap_write(dc->hlcdc->regmap, ATMEL_HLCDC_IER, dc->suspend.imr); return drm_atomic_helper_resume(drm_dev, dc->suspend.state); } static DEFINE_SIMPLE_DEV_PM_OPS(atmel_hlcdc_dc_drm_pm_ops, atmel_hlcdc_dc_drm_suspend, atmel_hlcdc_dc_drm_resume); static const struct of_device_id atmel_hlcdc_dc_of_match[] = { { .compatible = "atmel,hlcdc-display-controller" }, { }, }; static struct platform_driver atmel_hlcdc_dc_platform_driver = { .probe = atmel_hlcdc_dc_drm_probe, .remove_new = atmel_hlcdc_dc_drm_remove, .driver = { .name = "atmel-hlcdc-display-controller", .pm = pm_sleep_ptr(&atmel_hlcdc_dc_drm_pm_ops), .of_match_table = atmel_hlcdc_dc_of_match, }, }; drm_module_platform_driver(atmel_hlcdc_dc_platform_driver); MODULE_AUTHOR("Jean-Jacques Hiblot <[email protected]>"); MODULE_AUTHOR("Boris Brezillon <[email protected]>"); MODULE_DESCRIPTION("Atmel HLCDC Display Controller DRM Driver"); MODULE_LICENSE("GPL"); MODULE_ALIAS("platform:atmel-hlcdc-dc");
linux-master
drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c
// SPDX-License-Identifier: GPL-2.0-only /* * Copyright (C) 2014 Traphandler * Copyright (C) 2014 Free Electrons * * Author: Jean-Jacques Hiblot <[email protected]> * Author: Boris BREZILLON <[email protected]> */ #include <linux/clk.h> #include <linux/media-bus-format.h> #include <linux/mfd/atmel-hlcdc.h> #include <linux/pinctrl/consumer.h> #include <linux/pm.h> #include <linux/pm_runtime.h> #include <video/videomode.h> #include <drm/drm_atomic.h> #include <drm/drm_atomic_helper.h> #include <drm/drm_crtc.h> #include <drm/drm_modeset_helper_vtables.h> #include <drm/drm_probe_helper.h> #include <drm/drm_vblank.h> #include "atmel_hlcdc_dc.h" /** * struct atmel_hlcdc_crtc_state - Atmel HLCDC CRTC state structure * * @base: base CRTC state * @output_mode: RGBXXX output mode */ struct atmel_hlcdc_crtc_state { struct drm_crtc_state base; unsigned int output_mode; }; static inline struct atmel_hlcdc_crtc_state * drm_crtc_state_to_atmel_hlcdc_crtc_state(struct drm_crtc_state *state) { return container_of(state, struct atmel_hlcdc_crtc_state, base); } /** * struct atmel_hlcdc_crtc - Atmel HLCDC CRTC structure * * @base: base DRM CRTC structure * @dc: pointer to the atmel_hlcdc structure provided by the MFD device * @event: pointer to the current page flip event * @id: CRTC id (returned by drm_crtc_index) */ struct atmel_hlcdc_crtc { struct drm_crtc base; struct atmel_hlcdc_dc *dc; struct drm_pending_vblank_event *event; int id; }; static inline struct atmel_hlcdc_crtc * drm_crtc_to_atmel_hlcdc_crtc(struct drm_crtc *crtc) { return container_of(crtc, struct atmel_hlcdc_crtc, base); } static void atmel_hlcdc_crtc_mode_set_nofb(struct drm_crtc *c) { struct atmel_hlcdc_crtc *crtc = drm_crtc_to_atmel_hlcdc_crtc(c); struct regmap *regmap = crtc->dc->hlcdc->regmap; struct drm_display_mode *adj = &c->state->adjusted_mode; struct drm_encoder *encoder = NULL, *en_iter; struct drm_connector *connector = NULL; struct atmel_hlcdc_crtc_state *state; struct drm_device *ddev = c->dev; struct drm_connector_list_iter iter; unsigned long mode_rate; struct videomode vm; unsigned long prate; unsigned int mask = ATMEL_HLCDC_CLKDIV_MASK | ATMEL_HLCDC_CLKPOL; unsigned int cfg = 0; int div, ret; /* get encoder from crtc */ drm_for_each_encoder(en_iter, ddev) { if (en_iter->crtc == c) { encoder = en_iter; break; } } if (encoder) { /* Get the connector from encoder */ drm_connector_list_iter_begin(ddev, &iter); drm_for_each_connector_iter(connector, &iter) if (connector->encoder == encoder) break; drm_connector_list_iter_end(&iter); } ret = clk_prepare_enable(crtc->dc->hlcdc->sys_clk); if (ret) return; vm.vfront_porch = adj->crtc_vsync_start - adj->crtc_vdisplay; vm.vback_porch = adj->crtc_vtotal - adj->crtc_vsync_end; vm.vsync_len = adj->crtc_vsync_end - adj->crtc_vsync_start; vm.hfront_porch = adj->crtc_hsync_start - adj->crtc_hdisplay; vm.hback_porch = adj->crtc_htotal - adj->crtc_hsync_end; vm.hsync_len = adj->crtc_hsync_end - adj->crtc_hsync_start; regmap_write(regmap, ATMEL_HLCDC_CFG(1), (vm.hsync_len - 1) | ((vm.vsync_len - 1) << 16)); regmap_write(regmap, ATMEL_HLCDC_CFG(2), (vm.vfront_porch - 1) | (vm.vback_porch << 16)); regmap_write(regmap, ATMEL_HLCDC_CFG(3), (vm.hfront_porch - 1) | ((vm.hback_porch - 1) << 16)); regmap_write(regmap, ATMEL_HLCDC_CFG(4), (adj->crtc_hdisplay - 1) | ((adj->crtc_vdisplay - 1) << 16)); prate = clk_get_rate(crtc->dc->hlcdc->sys_clk); mode_rate = adj->crtc_clock * 1000; if (!crtc->dc->desc->fixed_clksrc) { prate *= 2; cfg |= ATMEL_HLCDC_CLKSEL; mask |= ATMEL_HLCDC_CLKSEL; } div = DIV_ROUND_UP(prate, mode_rate); if (div < 2) { div = 2; } else if (ATMEL_HLCDC_CLKDIV(div) & ~ATMEL_HLCDC_CLKDIV_MASK) { /* The divider ended up too big, try a lower base rate. */ cfg &= ~ATMEL_HLCDC_CLKSEL; prate /= 2; div = DIV_ROUND_UP(prate, mode_rate); if (ATMEL_HLCDC_CLKDIV(div) & ~ATMEL_HLCDC_CLKDIV_MASK) div = ATMEL_HLCDC_CLKDIV_MASK; } else { int div_low = prate / mode_rate; if (div_low >= 2 && (10 * (prate / div_low - mode_rate) < (mode_rate - prate / div))) /* * At least 10 times better when using a higher * frequency than requested, instead of a lower. * So, go with that. */ div = div_low; } cfg |= ATMEL_HLCDC_CLKDIV(div); if (connector && connector->display_info.bus_flags & DRM_BUS_FLAG_PIXDATA_DRIVE_NEGEDGE) cfg |= ATMEL_HLCDC_CLKPOL; regmap_update_bits(regmap, ATMEL_HLCDC_CFG(0), mask, cfg); state = drm_crtc_state_to_atmel_hlcdc_crtc_state(c->state); cfg = state->output_mode << 8; if (adj->flags & DRM_MODE_FLAG_NVSYNC) cfg |= ATMEL_HLCDC_VSPOL; if (adj->flags & DRM_MODE_FLAG_NHSYNC) cfg |= ATMEL_HLCDC_HSPOL; regmap_update_bits(regmap, ATMEL_HLCDC_CFG(5), ATMEL_HLCDC_HSPOL | ATMEL_HLCDC_VSPOL | ATMEL_HLCDC_VSPDLYS | ATMEL_HLCDC_VSPDLYE | ATMEL_HLCDC_DISPPOL | ATMEL_HLCDC_DISPDLY | ATMEL_HLCDC_VSPSU | ATMEL_HLCDC_VSPHO | ATMEL_HLCDC_GUARDTIME_MASK | ATMEL_HLCDC_MODE_MASK, cfg); clk_disable_unprepare(crtc->dc->hlcdc->sys_clk); } static enum drm_mode_status atmel_hlcdc_crtc_mode_valid(struct drm_crtc *c, const struct drm_display_mode *mode) { struct atmel_hlcdc_crtc *crtc = drm_crtc_to_atmel_hlcdc_crtc(c); return atmel_hlcdc_dc_mode_valid(crtc->dc, mode); } static void atmel_hlcdc_crtc_atomic_disable(struct drm_crtc *c, struct drm_atomic_state *state) { struct drm_device *dev = c->dev; struct atmel_hlcdc_crtc *crtc = drm_crtc_to_atmel_hlcdc_crtc(c); struct regmap *regmap = crtc->dc->hlcdc->regmap; unsigned int status; drm_crtc_vblank_off(c); pm_runtime_get_sync(dev->dev); regmap_write(regmap, ATMEL_HLCDC_DIS, ATMEL_HLCDC_DISP); while (!regmap_read(regmap, ATMEL_HLCDC_SR, &status) && (status & ATMEL_HLCDC_DISP)) cpu_relax(); regmap_write(regmap, ATMEL_HLCDC_DIS, ATMEL_HLCDC_SYNC); while (!regmap_read(regmap, ATMEL_HLCDC_SR, &status) && (status & ATMEL_HLCDC_SYNC)) cpu_relax(); regmap_write(regmap, ATMEL_HLCDC_DIS, ATMEL_HLCDC_PIXEL_CLK); while (!regmap_read(regmap, ATMEL_HLCDC_SR, &status) && (status & ATMEL_HLCDC_PIXEL_CLK)) cpu_relax(); clk_disable_unprepare(crtc->dc->hlcdc->sys_clk); pinctrl_pm_select_sleep_state(dev->dev); pm_runtime_allow(dev->dev); pm_runtime_put_sync(dev->dev); } static void atmel_hlcdc_crtc_atomic_enable(struct drm_crtc *c, struct drm_atomic_state *state) { struct drm_device *dev = c->dev; struct atmel_hlcdc_crtc *crtc = drm_crtc_to_atmel_hlcdc_crtc(c); struct regmap *regmap = crtc->dc->hlcdc->regmap; unsigned int status; pm_runtime_get_sync(dev->dev); pm_runtime_forbid(dev->dev); pinctrl_pm_select_default_state(dev->dev); clk_prepare_enable(crtc->dc->hlcdc->sys_clk); regmap_write(regmap, ATMEL_HLCDC_EN, ATMEL_HLCDC_PIXEL_CLK); while (!regmap_read(regmap, ATMEL_HLCDC_SR, &status) && !(status & ATMEL_HLCDC_PIXEL_CLK)) cpu_relax(); regmap_write(regmap, ATMEL_HLCDC_EN, ATMEL_HLCDC_SYNC); while (!regmap_read(regmap, ATMEL_HLCDC_SR, &status) && !(status & ATMEL_HLCDC_SYNC)) cpu_relax(); regmap_write(regmap, ATMEL_HLCDC_EN, ATMEL_HLCDC_DISP); while (!regmap_read(regmap, ATMEL_HLCDC_SR, &status) && !(status & ATMEL_HLCDC_DISP)) cpu_relax(); pm_runtime_put_sync(dev->dev); } #define ATMEL_HLCDC_RGB444_OUTPUT BIT(0) #define ATMEL_HLCDC_RGB565_OUTPUT BIT(1) #define ATMEL_HLCDC_RGB666_OUTPUT BIT(2) #define ATMEL_HLCDC_RGB888_OUTPUT BIT(3) #define ATMEL_HLCDC_OUTPUT_MODE_MASK GENMASK(3, 0) static int atmel_hlcdc_connector_output_mode(struct drm_connector_state *state) { struct drm_connector *connector = state->connector; struct drm_display_info *info = &connector->display_info; struct drm_encoder *encoder; unsigned int supported_fmts = 0; int j; encoder = state->best_encoder; if (!encoder) encoder = connector->encoder; switch (atmel_hlcdc_encoder_get_bus_fmt(encoder)) { case 0: break; case MEDIA_BUS_FMT_RGB444_1X12: return ATMEL_HLCDC_RGB444_OUTPUT; case MEDIA_BUS_FMT_RGB565_1X16: return ATMEL_HLCDC_RGB565_OUTPUT; case MEDIA_BUS_FMT_RGB666_1X18: return ATMEL_HLCDC_RGB666_OUTPUT; case MEDIA_BUS_FMT_RGB888_1X24: return ATMEL_HLCDC_RGB888_OUTPUT; default: return -EINVAL; } for (j = 0; j < info->num_bus_formats; j++) { switch (info->bus_formats[j]) { case MEDIA_BUS_FMT_RGB444_1X12: supported_fmts |= ATMEL_HLCDC_RGB444_OUTPUT; break; case MEDIA_BUS_FMT_RGB565_1X16: supported_fmts |= ATMEL_HLCDC_RGB565_OUTPUT; break; case MEDIA_BUS_FMT_RGB666_1X18: supported_fmts |= ATMEL_HLCDC_RGB666_OUTPUT; break; case MEDIA_BUS_FMT_RGB888_1X24: supported_fmts |= ATMEL_HLCDC_RGB888_OUTPUT; break; default: break; } } return supported_fmts; } static int atmel_hlcdc_crtc_select_output_mode(struct drm_crtc_state *state) { unsigned int output_fmts = ATMEL_HLCDC_OUTPUT_MODE_MASK; struct atmel_hlcdc_crtc_state *hstate; struct drm_connector_state *cstate; struct drm_connector *connector; struct atmel_hlcdc_crtc *crtc; int i; crtc = drm_crtc_to_atmel_hlcdc_crtc(state->crtc); for_each_new_connector_in_state(state->state, connector, cstate, i) { unsigned int supported_fmts = 0; if (!cstate->crtc) continue; supported_fmts = atmel_hlcdc_connector_output_mode(cstate); if (crtc->dc->desc->conflicting_output_formats) output_fmts &= supported_fmts; else output_fmts |= supported_fmts; } if (!output_fmts) return -EINVAL; hstate = drm_crtc_state_to_atmel_hlcdc_crtc_state(state); hstate->output_mode = fls(output_fmts) - 1; return 0; } static int atmel_hlcdc_crtc_atomic_check(struct drm_crtc *c, struct drm_atomic_state *state) { struct drm_crtc_state *s = drm_atomic_get_new_crtc_state(state, c); int ret; ret = atmel_hlcdc_crtc_select_output_mode(s); if (ret) return ret; ret = atmel_hlcdc_plane_prepare_disc_area(s); if (ret) return ret; return atmel_hlcdc_plane_prepare_ahb_routing(s); } static void atmel_hlcdc_crtc_atomic_begin(struct drm_crtc *c, struct drm_atomic_state *state) { drm_crtc_vblank_on(c); } static void atmel_hlcdc_crtc_atomic_flush(struct drm_crtc *c, struct drm_atomic_state *state) { struct atmel_hlcdc_crtc *crtc = drm_crtc_to_atmel_hlcdc_crtc(c); unsigned long flags; spin_lock_irqsave(&c->dev->event_lock, flags); if (c->state->event) { c->state->event->pipe = drm_crtc_index(c); WARN_ON(drm_crtc_vblank_get(c) != 0); crtc->event = c->state->event; c->state->event = NULL; } spin_unlock_irqrestore(&c->dev->event_lock, flags); } static const struct drm_crtc_helper_funcs lcdc_crtc_helper_funcs = { .mode_valid = atmel_hlcdc_crtc_mode_valid, .mode_set_nofb = atmel_hlcdc_crtc_mode_set_nofb, .atomic_check = atmel_hlcdc_crtc_atomic_check, .atomic_begin = atmel_hlcdc_crtc_atomic_begin, .atomic_flush = atmel_hlcdc_crtc_atomic_flush, .atomic_enable = atmel_hlcdc_crtc_atomic_enable, .atomic_disable = atmel_hlcdc_crtc_atomic_disable, }; static void atmel_hlcdc_crtc_destroy(struct drm_crtc *c) { struct atmel_hlcdc_crtc *crtc = drm_crtc_to_atmel_hlcdc_crtc(c); drm_crtc_cleanup(c); kfree(crtc); } static void atmel_hlcdc_crtc_finish_page_flip(struct atmel_hlcdc_crtc *crtc) { struct drm_device *dev = crtc->base.dev; unsigned long flags; spin_lock_irqsave(&dev->event_lock, flags); if (crtc->event) { drm_crtc_send_vblank_event(&crtc->base, crtc->event); drm_crtc_vblank_put(&crtc->base); crtc->event = NULL; } spin_unlock_irqrestore(&dev->event_lock, flags); } void atmel_hlcdc_crtc_irq(struct drm_crtc *c) { drm_crtc_handle_vblank(c); atmel_hlcdc_crtc_finish_page_flip(drm_crtc_to_atmel_hlcdc_crtc(c)); } static void atmel_hlcdc_crtc_reset(struct drm_crtc *crtc) { struct atmel_hlcdc_crtc_state *state; if (crtc->state) { __drm_atomic_helper_crtc_destroy_state(crtc->state); state = drm_crtc_state_to_atmel_hlcdc_crtc_state(crtc->state); kfree(state); crtc->state = NULL; } state = kzalloc(sizeof(*state), GFP_KERNEL); if (state) __drm_atomic_helper_crtc_reset(crtc, &state->base); } static struct drm_crtc_state * atmel_hlcdc_crtc_duplicate_state(struct drm_crtc *crtc) { struct atmel_hlcdc_crtc_state *state, *cur; if (WARN_ON(!crtc->state)) return NULL; state = kmalloc(sizeof(*state), GFP_KERNEL); if (!state) return NULL; __drm_atomic_helper_crtc_duplicate_state(crtc, &state->base); cur = drm_crtc_state_to_atmel_hlcdc_crtc_state(crtc->state); state->output_mode = cur->output_mode; return &state->base; } static void atmel_hlcdc_crtc_destroy_state(struct drm_crtc *crtc, struct drm_crtc_state *s) { struct atmel_hlcdc_crtc_state *state; state = drm_crtc_state_to_atmel_hlcdc_crtc_state(s); __drm_atomic_helper_crtc_destroy_state(s); kfree(state); } static int atmel_hlcdc_crtc_enable_vblank(struct drm_crtc *c) { struct atmel_hlcdc_crtc *crtc = drm_crtc_to_atmel_hlcdc_crtc(c); struct regmap *regmap = crtc->dc->hlcdc->regmap; /* Enable SOF (Start Of Frame) interrupt for vblank counting */ regmap_write(regmap, ATMEL_HLCDC_IER, ATMEL_HLCDC_SOF); return 0; } static void atmel_hlcdc_crtc_disable_vblank(struct drm_crtc *c) { struct atmel_hlcdc_crtc *crtc = drm_crtc_to_atmel_hlcdc_crtc(c); struct regmap *regmap = crtc->dc->hlcdc->regmap; regmap_write(regmap, ATMEL_HLCDC_IDR, ATMEL_HLCDC_SOF); } static const struct drm_crtc_funcs atmel_hlcdc_crtc_funcs = { .page_flip = drm_atomic_helper_page_flip, .set_config = drm_atomic_helper_set_config, .destroy = atmel_hlcdc_crtc_destroy, .reset = atmel_hlcdc_crtc_reset, .atomic_duplicate_state = atmel_hlcdc_crtc_duplicate_state, .atomic_destroy_state = atmel_hlcdc_crtc_destroy_state, .enable_vblank = atmel_hlcdc_crtc_enable_vblank, .disable_vblank = atmel_hlcdc_crtc_disable_vblank, }; int atmel_hlcdc_crtc_create(struct drm_device *dev) { struct atmel_hlcdc_plane *primary = NULL, *cursor = NULL; struct atmel_hlcdc_dc *dc = dev->dev_private; struct atmel_hlcdc_crtc *crtc; int ret; int i; crtc = kzalloc(sizeof(*crtc), GFP_KERNEL); if (!crtc) return -ENOMEM; crtc->dc = dc; for (i = 0; i < ATMEL_HLCDC_MAX_LAYERS; i++) { if (!dc->layers[i]) continue; switch (dc->layers[i]->desc->type) { case ATMEL_HLCDC_BASE_LAYER: primary = atmel_hlcdc_layer_to_plane(dc->layers[i]); break; case ATMEL_HLCDC_CURSOR_LAYER: cursor = atmel_hlcdc_layer_to_plane(dc->layers[i]); break; default: break; } } ret = drm_crtc_init_with_planes(dev, &crtc->base, &primary->base, &cursor->base, &atmel_hlcdc_crtc_funcs, NULL); if (ret < 0) goto fail; crtc->id = drm_crtc_index(&crtc->base); for (i = 0; i < ATMEL_HLCDC_MAX_LAYERS; i++) { struct atmel_hlcdc_plane *overlay; if (dc->layers[i] && dc->layers[i]->desc->type == ATMEL_HLCDC_OVERLAY_LAYER) { overlay = atmel_hlcdc_layer_to_plane(dc->layers[i]); overlay->base.possible_crtcs = 1 << crtc->id; } } drm_crtc_helper_add(&crtc->base, &lcdc_crtc_helper_funcs); drm_mode_crtc_set_gamma_size(&crtc->base, ATMEL_HLCDC_CLUT_SIZE); drm_crtc_enable_color_mgmt(&crtc->base, 0, false, ATMEL_HLCDC_CLUT_SIZE); dc->crtc = &crtc->base; return 0; fail: atmel_hlcdc_crtc_destroy(&crtc->base); return ret; }
linux-master
drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c
// SPDX-License-Identifier: GPL-2.0-only /* * Copyright (C) 2014 Free Electrons * Copyright (C) 2014 Atmel * * Author: Boris BREZILLON <[email protected]> */ #include <linux/dmapool.h> #include <linux/mfd/atmel-hlcdc.h> #include <drm/drm_atomic.h> #include <drm/drm_atomic_helper.h> #include <drm/drm_blend.h> #include <drm/drm_fb_dma_helper.h> #include <drm/drm_fourcc.h> #include <drm/drm_framebuffer.h> #include <drm/drm_gem_dma_helper.h> #include "atmel_hlcdc_dc.h" /** * struct atmel_hlcdc_plane_state - Atmel HLCDC Plane state structure. * * @base: DRM plane state * @crtc_x: x position of the plane relative to the CRTC * @crtc_y: y position of the plane relative to the CRTC * @crtc_w: visible width of the plane * @crtc_h: visible height of the plane * @src_x: x buffer position * @src_y: y buffer position * @src_w: buffer width * @src_h: buffer height * @disc_x: x discard position * @disc_y: y discard position * @disc_w: discard width * @disc_h: discard height * @ahb_id: AHB identification number * @bpp: bytes per pixel deduced from pixel_format * @offsets: offsets to apply to the GEM buffers * @xstride: value to add to the pixel pointer between each line * @pstride: value to add to the pixel pointer between each pixel * @nplanes: number of planes (deduced from pixel_format) * @dscrs: DMA descriptors */ struct atmel_hlcdc_plane_state { struct drm_plane_state base; int crtc_x; int crtc_y; unsigned int crtc_w; unsigned int crtc_h; uint32_t src_x; uint32_t src_y; uint32_t src_w; uint32_t src_h; int disc_x; int disc_y; int disc_w; int disc_h; int ahb_id; /* These fields are private and should not be touched */ int bpp[ATMEL_HLCDC_LAYER_MAX_PLANES]; unsigned int offsets[ATMEL_HLCDC_LAYER_MAX_PLANES]; int xstride[ATMEL_HLCDC_LAYER_MAX_PLANES]; int pstride[ATMEL_HLCDC_LAYER_MAX_PLANES]; int nplanes; /* DMA descriptors. */ struct atmel_hlcdc_dma_channel_dscr *dscrs[ATMEL_HLCDC_LAYER_MAX_PLANES]; }; static inline struct atmel_hlcdc_plane_state * drm_plane_state_to_atmel_hlcdc_plane_state(struct drm_plane_state *s) { return container_of(s, struct atmel_hlcdc_plane_state, base); } #define SUBPIXEL_MASK 0xffff static uint32_t rgb_formats[] = { DRM_FORMAT_C8, DRM_FORMAT_XRGB4444, DRM_FORMAT_ARGB4444, DRM_FORMAT_RGBA4444, DRM_FORMAT_ARGB1555, DRM_FORMAT_RGB565, DRM_FORMAT_RGB888, DRM_FORMAT_XRGB8888, DRM_FORMAT_ARGB8888, DRM_FORMAT_RGBA8888, }; struct atmel_hlcdc_formats atmel_hlcdc_plane_rgb_formats = { .formats = rgb_formats, .nformats = ARRAY_SIZE(rgb_formats), }; static uint32_t rgb_and_yuv_formats[] = { DRM_FORMAT_C8, DRM_FORMAT_XRGB4444, DRM_FORMAT_ARGB4444, DRM_FORMAT_RGBA4444, DRM_FORMAT_ARGB1555, DRM_FORMAT_RGB565, DRM_FORMAT_RGB888, DRM_FORMAT_XRGB8888, DRM_FORMAT_ARGB8888, DRM_FORMAT_RGBA8888, DRM_FORMAT_AYUV, DRM_FORMAT_YUYV, DRM_FORMAT_UYVY, DRM_FORMAT_YVYU, DRM_FORMAT_VYUY, DRM_FORMAT_NV21, DRM_FORMAT_NV61, DRM_FORMAT_YUV422, DRM_FORMAT_YUV420, }; struct atmel_hlcdc_formats atmel_hlcdc_plane_rgb_and_yuv_formats = { .formats = rgb_and_yuv_formats, .nformats = ARRAY_SIZE(rgb_and_yuv_formats), }; static int atmel_hlcdc_format_to_plane_mode(u32 format, u32 *mode) { switch (format) { case DRM_FORMAT_C8: *mode = ATMEL_HLCDC_C8_MODE; break; case DRM_FORMAT_XRGB4444: *mode = ATMEL_HLCDC_XRGB4444_MODE; break; case DRM_FORMAT_ARGB4444: *mode = ATMEL_HLCDC_ARGB4444_MODE; break; case DRM_FORMAT_RGBA4444: *mode = ATMEL_HLCDC_RGBA4444_MODE; break; case DRM_FORMAT_RGB565: *mode = ATMEL_HLCDC_RGB565_MODE; break; case DRM_FORMAT_RGB888: *mode = ATMEL_HLCDC_RGB888_MODE; break; case DRM_FORMAT_ARGB1555: *mode = ATMEL_HLCDC_ARGB1555_MODE; break; case DRM_FORMAT_XRGB8888: *mode = ATMEL_HLCDC_XRGB8888_MODE; break; case DRM_FORMAT_ARGB8888: *mode = ATMEL_HLCDC_ARGB8888_MODE; break; case DRM_FORMAT_RGBA8888: *mode = ATMEL_HLCDC_RGBA8888_MODE; break; case DRM_FORMAT_AYUV: *mode = ATMEL_HLCDC_AYUV_MODE; break; case DRM_FORMAT_YUYV: *mode = ATMEL_HLCDC_YUYV_MODE; break; case DRM_FORMAT_UYVY: *mode = ATMEL_HLCDC_UYVY_MODE; break; case DRM_FORMAT_YVYU: *mode = ATMEL_HLCDC_YVYU_MODE; break; case DRM_FORMAT_VYUY: *mode = ATMEL_HLCDC_VYUY_MODE; break; case DRM_FORMAT_NV21: *mode = ATMEL_HLCDC_NV21_MODE; break; case DRM_FORMAT_NV61: *mode = ATMEL_HLCDC_NV61_MODE; break; case DRM_FORMAT_YUV420: *mode = ATMEL_HLCDC_YUV420_MODE; break; case DRM_FORMAT_YUV422: *mode = ATMEL_HLCDC_YUV422_MODE; break; default: return -ENOTSUPP; } return 0; } static u32 heo_downscaling_xcoef[] = { 0x11343311, 0x000000f7, 0x1635300c, 0x000000f9, 0x1b362c08, 0x000000fb, 0x1f372804, 0x000000fe, 0x24382400, 0x00000000, 0x28371ffe, 0x00000004, 0x2c361bfb, 0x00000008, 0x303516f9, 0x0000000c, }; static u32 heo_downscaling_ycoef[] = { 0x00123737, 0x00173732, 0x001b382d, 0x001f3928, 0x00243824, 0x0028391f, 0x002d381b, 0x00323717, }; static u32 heo_upscaling_xcoef[] = { 0xf74949f7, 0x00000000, 0xf55f33fb, 0x000000fe, 0xf5701efe, 0x000000ff, 0xf87c0dff, 0x00000000, 0x00800000, 0x00000000, 0x0d7cf800, 0x000000ff, 0x1e70f5ff, 0x000000fe, 0x335ff5fe, 0x000000fb, }; static u32 heo_upscaling_ycoef[] = { 0x00004040, 0x00075920, 0x00056f0c, 0x00027b03, 0x00008000, 0x00037b02, 0x000c6f05, 0x00205907, }; #define ATMEL_HLCDC_XPHIDEF 4 #define ATMEL_HLCDC_YPHIDEF 4 static u32 atmel_hlcdc_plane_phiscaler_get_factor(u32 srcsize, u32 dstsize, u32 phidef) { u32 factor, max_memsize; factor = (256 * ((8 * (srcsize - 1)) - phidef)) / (dstsize - 1); max_memsize = ((factor * (dstsize - 1)) + (256 * phidef)) / 2048; if (max_memsize > srcsize - 1) factor--; return factor; } static void atmel_hlcdc_plane_scaler_set_phicoeff(struct atmel_hlcdc_plane *plane, const u32 *coeff_tab, int size, unsigned int cfg_offs) { int i; for (i = 0; i < size; i++) atmel_hlcdc_layer_write_cfg(&plane->layer, cfg_offs + i, coeff_tab[i]); } static void atmel_hlcdc_plane_setup_scaler(struct atmel_hlcdc_plane *plane, struct atmel_hlcdc_plane_state *state) { const struct atmel_hlcdc_layer_desc *desc = plane->layer.desc; u32 xfactor, yfactor; if (!desc->layout.scaler_config) return; if (state->crtc_w == state->src_w && state->crtc_h == state->src_h) { atmel_hlcdc_layer_write_cfg(&plane->layer, desc->layout.scaler_config, 0); return; } if (desc->layout.phicoeffs.x) { xfactor = atmel_hlcdc_plane_phiscaler_get_factor(state->src_w, state->crtc_w, ATMEL_HLCDC_XPHIDEF); yfactor = atmel_hlcdc_plane_phiscaler_get_factor(state->src_h, state->crtc_h, ATMEL_HLCDC_YPHIDEF); atmel_hlcdc_plane_scaler_set_phicoeff(plane, state->crtc_w < state->src_w ? heo_downscaling_xcoef : heo_upscaling_xcoef, ARRAY_SIZE(heo_upscaling_xcoef), desc->layout.phicoeffs.x); atmel_hlcdc_plane_scaler_set_phicoeff(plane, state->crtc_h < state->src_h ? heo_downscaling_ycoef : heo_upscaling_ycoef, ARRAY_SIZE(heo_upscaling_ycoef), desc->layout.phicoeffs.y); } else { xfactor = (1024 * state->src_w) / state->crtc_w; yfactor = (1024 * state->src_h) / state->crtc_h; } atmel_hlcdc_layer_write_cfg(&plane->layer, desc->layout.scaler_config, ATMEL_HLCDC_LAYER_SCALER_ENABLE | ATMEL_HLCDC_LAYER_SCALER_FACTORS(xfactor, yfactor)); } static void atmel_hlcdc_plane_update_pos_and_size(struct atmel_hlcdc_plane *plane, struct atmel_hlcdc_plane_state *state) { const struct atmel_hlcdc_layer_desc *desc = plane->layer.desc; if (desc->layout.size) atmel_hlcdc_layer_write_cfg(&plane->layer, desc->layout.size, ATMEL_HLCDC_LAYER_SIZE(state->crtc_w, state->crtc_h)); if (desc->layout.memsize) atmel_hlcdc_layer_write_cfg(&plane->layer, desc->layout.memsize, ATMEL_HLCDC_LAYER_SIZE(state->src_w, state->src_h)); if (desc->layout.pos) atmel_hlcdc_layer_write_cfg(&plane->layer, desc->layout.pos, ATMEL_HLCDC_LAYER_POS(state->crtc_x, state->crtc_y)); atmel_hlcdc_plane_setup_scaler(plane, state); } static void atmel_hlcdc_plane_update_general_settings(struct atmel_hlcdc_plane *plane, struct atmel_hlcdc_plane_state *state) { unsigned int cfg = ATMEL_HLCDC_LAYER_DMA_BLEN_INCR16 | state->ahb_id; const struct atmel_hlcdc_layer_desc *desc = plane->layer.desc; const struct drm_format_info *format = state->base.fb->format; /* * Rotation optimization is not working on RGB888 (rotation is still * working but without any optimization). */ if (format->format == DRM_FORMAT_RGB888) cfg |= ATMEL_HLCDC_LAYER_DMA_ROTDIS; atmel_hlcdc_layer_write_cfg(&plane->layer, ATMEL_HLCDC_LAYER_DMA_CFG, cfg); cfg = ATMEL_HLCDC_LAYER_DMA | ATMEL_HLCDC_LAYER_REP; if (plane->base.type != DRM_PLANE_TYPE_PRIMARY) { cfg |= ATMEL_HLCDC_LAYER_OVR | ATMEL_HLCDC_LAYER_ITER2BL | ATMEL_HLCDC_LAYER_ITER; if (format->has_alpha) cfg |= ATMEL_HLCDC_LAYER_LAEN; else cfg |= ATMEL_HLCDC_LAYER_GAEN | ATMEL_HLCDC_LAYER_GA(state->base.alpha); } if (state->disc_h && state->disc_w) cfg |= ATMEL_HLCDC_LAYER_DISCEN; atmel_hlcdc_layer_write_cfg(&plane->layer, desc->layout.general_config, cfg); } static void atmel_hlcdc_plane_update_format(struct atmel_hlcdc_plane *plane, struct atmel_hlcdc_plane_state *state) { u32 cfg; int ret; ret = atmel_hlcdc_format_to_plane_mode(state->base.fb->format->format, &cfg); if (ret) return; if ((state->base.fb->format->format == DRM_FORMAT_YUV422 || state->base.fb->format->format == DRM_FORMAT_NV61) && drm_rotation_90_or_270(state->base.rotation)) cfg |= ATMEL_HLCDC_YUV422ROT; atmel_hlcdc_layer_write_cfg(&plane->layer, ATMEL_HLCDC_LAYER_FORMAT_CFG, cfg); } static void atmel_hlcdc_plane_update_clut(struct atmel_hlcdc_plane *plane, struct atmel_hlcdc_plane_state *state) { struct drm_crtc *crtc = state->base.crtc; struct drm_color_lut *lut; int idx; if (!crtc || !crtc->state) return; if (!crtc->state->color_mgmt_changed || !crtc->state->gamma_lut) return; lut = (struct drm_color_lut *)crtc->state->gamma_lut->data; for (idx = 0; idx < ATMEL_HLCDC_CLUT_SIZE; idx++, lut++) { u32 val = ((lut->red << 8) & 0xff0000) | (lut->green & 0xff00) | (lut->blue >> 8); atmel_hlcdc_layer_write_clut(&plane->layer, idx, val); } } static void atmel_hlcdc_plane_update_buffers(struct atmel_hlcdc_plane *plane, struct atmel_hlcdc_plane_state *state) { const struct atmel_hlcdc_layer_desc *desc = plane->layer.desc; struct drm_framebuffer *fb = state->base.fb; u32 sr; int i; sr = atmel_hlcdc_layer_read_reg(&plane->layer, ATMEL_HLCDC_LAYER_CHSR); for (i = 0; i < state->nplanes; i++) { struct drm_gem_dma_object *gem = drm_fb_dma_get_gem_obj(fb, i); state->dscrs[i]->addr = gem->dma_addr + state->offsets[i]; atmel_hlcdc_layer_write_reg(&plane->layer, ATMEL_HLCDC_LAYER_PLANE_HEAD(i), state->dscrs[i]->self); if (!(sr & ATMEL_HLCDC_LAYER_EN)) { atmel_hlcdc_layer_write_reg(&plane->layer, ATMEL_HLCDC_LAYER_PLANE_ADDR(i), state->dscrs[i]->addr); atmel_hlcdc_layer_write_reg(&plane->layer, ATMEL_HLCDC_LAYER_PLANE_CTRL(i), state->dscrs[i]->ctrl); atmel_hlcdc_layer_write_reg(&plane->layer, ATMEL_HLCDC_LAYER_PLANE_NEXT(i), state->dscrs[i]->self); } if (desc->layout.xstride[i]) atmel_hlcdc_layer_write_cfg(&plane->layer, desc->layout.xstride[i], state->xstride[i]); if (desc->layout.pstride[i]) atmel_hlcdc_layer_write_cfg(&plane->layer, desc->layout.pstride[i], state->pstride[i]); } } int atmel_hlcdc_plane_prepare_ahb_routing(struct drm_crtc_state *c_state) { unsigned int ahb_load[2] = { }; struct drm_plane *plane; drm_atomic_crtc_state_for_each_plane(plane, c_state) { struct atmel_hlcdc_plane_state *plane_state; struct drm_plane_state *plane_s; unsigned int pixels, load = 0; int i; plane_s = drm_atomic_get_plane_state(c_state->state, plane); if (IS_ERR(plane_s)) return PTR_ERR(plane_s); plane_state = drm_plane_state_to_atmel_hlcdc_plane_state(plane_s); pixels = (plane_state->src_w * plane_state->src_h) - (plane_state->disc_w * plane_state->disc_h); for (i = 0; i < plane_state->nplanes; i++) load += pixels * plane_state->bpp[i]; if (ahb_load[0] <= ahb_load[1]) plane_state->ahb_id = 0; else plane_state->ahb_id = 1; ahb_load[plane_state->ahb_id] += load; } return 0; } int atmel_hlcdc_plane_prepare_disc_area(struct drm_crtc_state *c_state) { int disc_x = 0, disc_y = 0, disc_w = 0, disc_h = 0; const struct atmel_hlcdc_layer_cfg_layout *layout; struct atmel_hlcdc_plane_state *primary_state; struct drm_plane_state *primary_s; struct atmel_hlcdc_plane *primary; struct drm_plane *ovl; primary = drm_plane_to_atmel_hlcdc_plane(c_state->crtc->primary); layout = &primary->layer.desc->layout; if (!layout->disc_pos || !layout->disc_size) return 0; primary_s = drm_atomic_get_plane_state(c_state->state, &primary->base); if (IS_ERR(primary_s)) return PTR_ERR(primary_s); primary_state = drm_plane_state_to_atmel_hlcdc_plane_state(primary_s); drm_atomic_crtc_state_for_each_plane(ovl, c_state) { struct atmel_hlcdc_plane_state *ovl_state; struct drm_plane_state *ovl_s; if (ovl == c_state->crtc->primary) continue; ovl_s = drm_atomic_get_plane_state(c_state->state, ovl); if (IS_ERR(ovl_s)) return PTR_ERR(ovl_s); ovl_state = drm_plane_state_to_atmel_hlcdc_plane_state(ovl_s); if (!ovl_s->visible || !ovl_s->fb || ovl_s->fb->format->has_alpha || ovl_s->alpha != DRM_BLEND_ALPHA_OPAQUE) continue; /* TODO: implement a smarter hidden area detection */ if (ovl_state->crtc_h * ovl_state->crtc_w < disc_h * disc_w) continue; disc_x = ovl_state->crtc_x; disc_y = ovl_state->crtc_y; disc_h = ovl_state->crtc_h; disc_w = ovl_state->crtc_w; } primary_state->disc_x = disc_x; primary_state->disc_y = disc_y; primary_state->disc_w = disc_w; primary_state->disc_h = disc_h; return 0; } static void atmel_hlcdc_plane_update_disc_area(struct atmel_hlcdc_plane *plane, struct atmel_hlcdc_plane_state *state) { const struct atmel_hlcdc_layer_cfg_layout *layout; layout = &plane->layer.desc->layout; if (!layout->disc_pos || !layout->disc_size) return; atmel_hlcdc_layer_write_cfg(&plane->layer, layout->disc_pos, ATMEL_HLCDC_LAYER_DISC_POS(state->disc_x, state->disc_y)); atmel_hlcdc_layer_write_cfg(&plane->layer, layout->disc_size, ATMEL_HLCDC_LAYER_DISC_SIZE(state->disc_w, state->disc_h)); } static int atmel_hlcdc_plane_atomic_check(struct drm_plane *p, struct drm_atomic_state *state) { struct drm_plane_state *s = drm_atomic_get_new_plane_state(state, p); struct atmel_hlcdc_plane *plane = drm_plane_to_atmel_hlcdc_plane(p); struct atmel_hlcdc_plane_state *hstate = drm_plane_state_to_atmel_hlcdc_plane_state(s); const struct atmel_hlcdc_layer_desc *desc = plane->layer.desc; struct drm_framebuffer *fb = hstate->base.fb; const struct drm_display_mode *mode; struct drm_crtc_state *crtc_state; int ret; int i; if (!hstate->base.crtc || WARN_ON(!fb)) return 0; crtc_state = drm_atomic_get_existing_crtc_state(state, s->crtc); mode = &crtc_state->adjusted_mode; ret = drm_atomic_helper_check_plane_state(s, crtc_state, (1 << 16) / 2048, INT_MAX, true, true); if (ret || !s->visible) return ret; hstate->src_x = s->src.x1; hstate->src_y = s->src.y1; hstate->src_w = drm_rect_width(&s->src); hstate->src_h = drm_rect_height(&s->src); hstate->crtc_x = s->dst.x1; hstate->crtc_y = s->dst.y1; hstate->crtc_w = drm_rect_width(&s->dst); hstate->crtc_h = drm_rect_height(&s->dst); if ((hstate->src_x | hstate->src_y | hstate->src_w | hstate->src_h) & SUBPIXEL_MASK) return -EINVAL; hstate->src_x >>= 16; hstate->src_y >>= 16; hstate->src_w >>= 16; hstate->src_h >>= 16; hstate->nplanes = fb->format->num_planes; if (hstate->nplanes > ATMEL_HLCDC_LAYER_MAX_PLANES) return -EINVAL; for (i = 0; i < hstate->nplanes; i++) { unsigned int offset = 0; int xdiv = i ? fb->format->hsub : 1; int ydiv = i ? fb->format->vsub : 1; hstate->bpp[i] = fb->format->cpp[i]; if (!hstate->bpp[i]) return -EINVAL; switch (hstate->base.rotation & DRM_MODE_ROTATE_MASK) { case DRM_MODE_ROTATE_90: offset = (hstate->src_y / ydiv) * fb->pitches[i]; offset += ((hstate->src_x + hstate->src_w - 1) / xdiv) * hstate->bpp[i]; hstate->xstride[i] = -(((hstate->src_h - 1) / ydiv) * fb->pitches[i]) - (2 * hstate->bpp[i]); hstate->pstride[i] = fb->pitches[i] - hstate->bpp[i]; break; case DRM_MODE_ROTATE_180: offset = ((hstate->src_y + hstate->src_h - 1) / ydiv) * fb->pitches[i]; offset += ((hstate->src_x + hstate->src_w - 1) / xdiv) * hstate->bpp[i]; hstate->xstride[i] = ((((hstate->src_w - 1) / xdiv) - 1) * hstate->bpp[i]) - fb->pitches[i]; hstate->pstride[i] = -2 * hstate->bpp[i]; break; case DRM_MODE_ROTATE_270: offset = ((hstate->src_y + hstate->src_h - 1) / ydiv) * fb->pitches[i]; offset += (hstate->src_x / xdiv) * hstate->bpp[i]; hstate->xstride[i] = ((hstate->src_h - 1) / ydiv) * fb->pitches[i]; hstate->pstride[i] = -fb->pitches[i] - hstate->bpp[i]; break; case DRM_MODE_ROTATE_0: default: offset = (hstate->src_y / ydiv) * fb->pitches[i]; offset += (hstate->src_x / xdiv) * hstate->bpp[i]; hstate->xstride[i] = fb->pitches[i] - ((hstate->src_w / xdiv) * hstate->bpp[i]); hstate->pstride[i] = 0; break; } hstate->offsets[i] = offset + fb->offsets[i]; } /* * Swap width and size in case of 90 or 270 degrees rotation */ if (drm_rotation_90_or_270(hstate->base.rotation)) { swap(hstate->src_w, hstate->src_h); } if (!desc->layout.size && (mode->hdisplay != hstate->crtc_w || mode->vdisplay != hstate->crtc_h)) return -EINVAL; if ((hstate->crtc_h != hstate->src_h || hstate->crtc_w != hstate->src_w) && (!desc->layout.memsize || hstate->base.fb->format->has_alpha)) return -EINVAL; return 0; } static void atmel_hlcdc_plane_atomic_disable(struct drm_plane *p, struct drm_atomic_state *state) { struct atmel_hlcdc_plane *plane = drm_plane_to_atmel_hlcdc_plane(p); /* Disable interrupts */ atmel_hlcdc_layer_write_reg(&plane->layer, ATMEL_HLCDC_LAYER_IDR, 0xffffffff); /* Disable the layer */ atmel_hlcdc_layer_write_reg(&plane->layer, ATMEL_HLCDC_LAYER_CHDR, ATMEL_HLCDC_LAYER_RST | ATMEL_HLCDC_LAYER_A2Q | ATMEL_HLCDC_LAYER_UPDATE); /* Clear all pending interrupts */ atmel_hlcdc_layer_read_reg(&plane->layer, ATMEL_HLCDC_LAYER_ISR); } static void atmel_hlcdc_plane_atomic_update(struct drm_plane *p, struct drm_atomic_state *state) { struct drm_plane_state *new_s = drm_atomic_get_new_plane_state(state, p); struct atmel_hlcdc_plane *plane = drm_plane_to_atmel_hlcdc_plane(p); struct atmel_hlcdc_plane_state *hstate = drm_plane_state_to_atmel_hlcdc_plane_state(new_s); u32 sr; if (!new_s->crtc || !new_s->fb) return; if (!hstate->base.visible) { atmel_hlcdc_plane_atomic_disable(p, state); return; } atmel_hlcdc_plane_update_pos_and_size(plane, hstate); atmel_hlcdc_plane_update_general_settings(plane, hstate); atmel_hlcdc_plane_update_format(plane, hstate); atmel_hlcdc_plane_update_clut(plane, hstate); atmel_hlcdc_plane_update_buffers(plane, hstate); atmel_hlcdc_plane_update_disc_area(plane, hstate); /* Enable the overrun interrupts. */ atmel_hlcdc_layer_write_reg(&plane->layer, ATMEL_HLCDC_LAYER_IER, ATMEL_HLCDC_LAYER_OVR_IRQ(0) | ATMEL_HLCDC_LAYER_OVR_IRQ(1) | ATMEL_HLCDC_LAYER_OVR_IRQ(2)); /* Apply the new config at the next SOF event. */ sr = atmel_hlcdc_layer_read_reg(&plane->layer, ATMEL_HLCDC_LAYER_CHSR); atmel_hlcdc_layer_write_reg(&plane->layer, ATMEL_HLCDC_LAYER_CHER, ATMEL_HLCDC_LAYER_UPDATE | (sr & ATMEL_HLCDC_LAYER_EN ? ATMEL_HLCDC_LAYER_A2Q : ATMEL_HLCDC_LAYER_EN)); } static int atmel_hlcdc_plane_init_properties(struct atmel_hlcdc_plane *plane) { const struct atmel_hlcdc_layer_desc *desc = plane->layer.desc; if (desc->type == ATMEL_HLCDC_OVERLAY_LAYER || desc->type == ATMEL_HLCDC_CURSOR_LAYER) { int ret; ret = drm_plane_create_alpha_property(&plane->base); if (ret) return ret; } if (desc->layout.xstride[0] && desc->layout.pstride[0]) { int ret; ret = drm_plane_create_rotation_property(&plane->base, DRM_MODE_ROTATE_0, DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_90 | DRM_MODE_ROTATE_180 | DRM_MODE_ROTATE_270); if (ret) return ret; } if (desc->layout.csc) { /* * TODO: decare a "yuv-to-rgb-conv-factors" property to let * userspace modify these factors (using a BLOB property ?). */ atmel_hlcdc_layer_write_cfg(&plane->layer, desc->layout.csc, 0x4c900091); atmel_hlcdc_layer_write_cfg(&plane->layer, desc->layout.csc + 1, 0x7a5f5090); atmel_hlcdc_layer_write_cfg(&plane->layer, desc->layout.csc + 2, 0x40040890); } return 0; } void atmel_hlcdc_plane_irq(struct atmel_hlcdc_plane *plane) { const struct atmel_hlcdc_layer_desc *desc = plane->layer.desc; u32 isr; isr = atmel_hlcdc_layer_read_reg(&plane->layer, ATMEL_HLCDC_LAYER_ISR); /* * There's not much we can do in case of overrun except informing * the user. However, we are in interrupt context here, hence the * use of dev_dbg(). */ if (isr & (ATMEL_HLCDC_LAYER_OVR_IRQ(0) | ATMEL_HLCDC_LAYER_OVR_IRQ(1) | ATMEL_HLCDC_LAYER_OVR_IRQ(2))) dev_dbg(plane->base.dev->dev, "overrun on plane %s\n", desc->name); } static const struct drm_plane_helper_funcs atmel_hlcdc_layer_plane_helper_funcs = { .atomic_check = atmel_hlcdc_plane_atomic_check, .atomic_update = atmel_hlcdc_plane_atomic_update, .atomic_disable = atmel_hlcdc_plane_atomic_disable, }; static int atmel_hlcdc_plane_alloc_dscrs(struct drm_plane *p, struct atmel_hlcdc_plane_state *state) { struct atmel_hlcdc_dc *dc = p->dev->dev_private; int i; for (i = 0; i < ARRAY_SIZE(state->dscrs); i++) { struct atmel_hlcdc_dma_channel_dscr *dscr; dma_addr_t dscr_dma; dscr = dma_pool_alloc(dc->dscrpool, GFP_KERNEL, &dscr_dma); if (!dscr) goto err; dscr->addr = 0; dscr->next = dscr_dma; dscr->self = dscr_dma; dscr->ctrl = ATMEL_HLCDC_LAYER_DFETCH; state->dscrs[i] = dscr; } return 0; err: for (i--; i >= 0; i--) { dma_pool_free(dc->dscrpool, state->dscrs[i], state->dscrs[i]->self); } return -ENOMEM; } static void atmel_hlcdc_plane_reset(struct drm_plane *p) { struct atmel_hlcdc_plane_state *state; if (p->state) { state = drm_plane_state_to_atmel_hlcdc_plane_state(p->state); if (state->base.fb) drm_framebuffer_put(state->base.fb); kfree(state); p->state = NULL; } state = kzalloc(sizeof(*state), GFP_KERNEL); if (state) { if (atmel_hlcdc_plane_alloc_dscrs(p, state)) { kfree(state); dev_err(p->dev->dev, "Failed to allocate initial plane state\n"); return; } __drm_atomic_helper_plane_reset(p, &state->base); } } static struct drm_plane_state * atmel_hlcdc_plane_atomic_duplicate_state(struct drm_plane *p) { struct atmel_hlcdc_plane_state *state = drm_plane_state_to_atmel_hlcdc_plane_state(p->state); struct atmel_hlcdc_plane_state *copy; copy = kmemdup(state, sizeof(*state), GFP_KERNEL); if (!copy) return NULL; if (atmel_hlcdc_plane_alloc_dscrs(p, copy)) { kfree(copy); return NULL; } if (copy->base.fb) drm_framebuffer_get(copy->base.fb); return &copy->base; } static void atmel_hlcdc_plane_atomic_destroy_state(struct drm_plane *p, struct drm_plane_state *s) { struct atmel_hlcdc_plane_state *state = drm_plane_state_to_atmel_hlcdc_plane_state(s); struct atmel_hlcdc_dc *dc = p->dev->dev_private; int i; for (i = 0; i < ARRAY_SIZE(state->dscrs); i++) { dma_pool_free(dc->dscrpool, state->dscrs[i], state->dscrs[i]->self); } if (s->fb) drm_framebuffer_put(s->fb); kfree(state); } static const struct drm_plane_funcs layer_plane_funcs = { .update_plane = drm_atomic_helper_update_plane, .disable_plane = drm_atomic_helper_disable_plane, .destroy = drm_plane_cleanup, .reset = atmel_hlcdc_plane_reset, .atomic_duplicate_state = atmel_hlcdc_plane_atomic_duplicate_state, .atomic_destroy_state = atmel_hlcdc_plane_atomic_destroy_state, }; static int atmel_hlcdc_plane_create(struct drm_device *dev, const struct atmel_hlcdc_layer_desc *desc) { struct atmel_hlcdc_dc *dc = dev->dev_private; struct atmel_hlcdc_plane *plane; enum drm_plane_type type; int ret; plane = devm_kzalloc(dev->dev, sizeof(*plane), GFP_KERNEL); if (!plane) return -ENOMEM; atmel_hlcdc_layer_init(&plane->layer, desc, dc->hlcdc->regmap); if (desc->type == ATMEL_HLCDC_BASE_LAYER) type = DRM_PLANE_TYPE_PRIMARY; else if (desc->type == ATMEL_HLCDC_CURSOR_LAYER) type = DRM_PLANE_TYPE_CURSOR; else type = DRM_PLANE_TYPE_OVERLAY; ret = drm_universal_plane_init(dev, &plane->base, 0, &layer_plane_funcs, desc->formats->formats, desc->formats->nformats, NULL, type, NULL); if (ret) return ret; drm_plane_helper_add(&plane->base, &atmel_hlcdc_layer_plane_helper_funcs); /* Set default property values*/ ret = atmel_hlcdc_plane_init_properties(plane); if (ret) return ret; dc->layers[desc->id] = &plane->layer; return 0; } int atmel_hlcdc_create_planes(struct drm_device *dev) { struct atmel_hlcdc_dc *dc = dev->dev_private; const struct atmel_hlcdc_layer_desc *descs = dc->desc->layers; int nlayers = dc->desc->nlayers; int i, ret; dc->dscrpool = dmam_pool_create("atmel-hlcdc-dscr", dev->dev, sizeof(struct atmel_hlcdc_dma_channel_dscr), sizeof(u64), 0); if (!dc->dscrpool) return -ENOMEM; for (i = 0; i < nlayers; i++) { if (descs[i].type != ATMEL_HLCDC_BASE_LAYER && descs[i].type != ATMEL_HLCDC_OVERLAY_LAYER && descs[i].type != ATMEL_HLCDC_CURSOR_LAYER) continue; ret = atmel_hlcdc_plane_create(dev, &descs[i]); if (ret) return ret; } return 0; }
linux-master
drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_plane.c
// SPDX-License-Identifier: GPL-2.0-only /* * Copyright (C) 2014 Traphandler * Copyright (C) 2014 Free Electrons * Copyright (C) 2014 Atmel * * Author: Jean-Jacques Hiblot <[email protected]> * Author: Boris BREZILLON <[email protected]> */ #include <linux/media-bus-format.h> #include <linux/of.h> #include <linux/of_graph.h> #include <drm/drm_bridge.h> #include <drm/drm_encoder.h> #include <drm/drm_of.h> #include <drm/drm_simple_kms_helper.h> #include "atmel_hlcdc_dc.h" struct atmel_hlcdc_rgb_output { struct drm_encoder encoder; int bus_fmt; }; static struct atmel_hlcdc_rgb_output * atmel_hlcdc_encoder_to_rgb_output(struct drm_encoder *encoder) { return container_of(encoder, struct atmel_hlcdc_rgb_output, encoder); } int atmel_hlcdc_encoder_get_bus_fmt(struct drm_encoder *encoder) { struct atmel_hlcdc_rgb_output *output; output = atmel_hlcdc_encoder_to_rgb_output(encoder); return output->bus_fmt; } static int atmel_hlcdc_of_bus_fmt(const struct device_node *ep) { u32 bus_width; int ret; ret = of_property_read_u32(ep, "bus-width", &bus_width); if (ret == -EINVAL) return 0; if (ret) return ret; switch (bus_width) { case 12: return MEDIA_BUS_FMT_RGB444_1X12; case 16: return MEDIA_BUS_FMT_RGB565_1X16; case 18: return MEDIA_BUS_FMT_RGB666_1X18; case 24: return MEDIA_BUS_FMT_RGB888_1X24; default: return -EINVAL; } } static int atmel_hlcdc_attach_endpoint(struct drm_device *dev, int endpoint) { struct atmel_hlcdc_rgb_output *output; struct device_node *ep; struct drm_panel *panel; struct drm_bridge *bridge; int ret; ep = of_graph_get_endpoint_by_regs(dev->dev->of_node, 0, endpoint); if (!ep) return -ENODEV; ret = drm_of_find_panel_or_bridge(dev->dev->of_node, 0, endpoint, &panel, &bridge); if (ret) { of_node_put(ep); return ret; } output = devm_kzalloc(dev->dev, sizeof(*output), GFP_KERNEL); if (!output) { of_node_put(ep); return -ENOMEM; } output->bus_fmt = atmel_hlcdc_of_bus_fmt(ep); of_node_put(ep); if (output->bus_fmt < 0) { dev_err(dev->dev, "endpoint %d: invalid bus width\n", endpoint); return -EINVAL; } ret = drm_simple_encoder_init(dev, &output->encoder, DRM_MODE_ENCODER_NONE); if (ret) return ret; output->encoder.possible_crtcs = 0x1; if (panel) { bridge = drm_panel_bridge_add_typed(panel, DRM_MODE_CONNECTOR_Unknown); if (IS_ERR(bridge)) return PTR_ERR(bridge); } if (bridge) { ret = drm_bridge_attach(&output->encoder, bridge, NULL, 0); if (!ret) return 0; if (panel) drm_panel_bridge_remove(bridge); } drm_encoder_cleanup(&output->encoder); return ret; } int atmel_hlcdc_create_outputs(struct drm_device *dev) { int endpoint, ret = 0; int attached = 0; /* * Always scan the first few endpoints even if we get -ENODEV, * but keep going after that as long as we keep getting hits. */ for (endpoint = 0; !ret || endpoint < 4; endpoint++) { ret = atmel_hlcdc_attach_endpoint(dev, endpoint); if (ret == -ENODEV) continue; if (ret) break; attached++; } /* At least one device was successfully attached.*/ if (ret == -ENODEV && attached) return 0; return ret; }
linux-master
drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_output.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * Copyright (C) 2016 Marek Vasut <[email protected]> * * This code is based on drivers/video/fbdev/mxsfb.c : * Copyright (C) 2010 Juergen Beisert, Pengutronix * Copyright (C) 2008-2009 Freescale Semiconductor, Inc. All Rights Reserved. * Copyright (C) 2008 Embedded Alley Solutions, Inc All Rights Reserved. */ #include <linux/clk.h> #include <linux/dma-mapping.h> #include <linux/io.h> #include <linux/module.h> #include <linux/of_device.h> #include <linux/platform_device.h> #include <linux/pm_runtime.h> #include <drm/drm_atomic_helper.h> #include <drm/drm_bridge.h> #include <drm/drm_connector.h> #include <drm/drm_drv.h> #include <drm/drm_fbdev_dma.h> #include <drm/drm_fourcc.h> #include <drm/drm_gem_dma_helper.h> #include <drm/drm_gem_framebuffer_helper.h> #include <drm/drm_mode_config.h> #include <drm/drm_module.h> #include <drm/drm_of.h> #include <drm/drm_probe_helper.h> #include <drm/drm_vblank.h> #include "mxsfb_drv.h" #include "mxsfb_regs.h" enum mxsfb_devtype { MXSFB_V3, MXSFB_V4, /* * Starting at i.MX6 the hardware version register is gone, use the * i.MX family number as the version. */ MXSFB_V6, }; static const struct mxsfb_devdata mxsfb_devdata[] = { [MXSFB_V3] = { .transfer_count = LCDC_V3_TRANSFER_COUNT, .cur_buf = LCDC_V3_CUR_BUF, .next_buf = LCDC_V3_NEXT_BUF, .hs_wdth_mask = 0xff, .hs_wdth_shift = 24, .has_overlay = false, .has_ctrl2 = false, .has_crc32 = false, }, [MXSFB_V4] = { .transfer_count = LCDC_V4_TRANSFER_COUNT, .cur_buf = LCDC_V4_CUR_BUF, .next_buf = LCDC_V4_NEXT_BUF, .hs_wdth_mask = 0x3fff, .hs_wdth_shift = 18, .has_overlay = false, .has_ctrl2 = true, .has_crc32 = true, }, [MXSFB_V6] = { .transfer_count = LCDC_V4_TRANSFER_COUNT, .cur_buf = LCDC_V4_CUR_BUF, .next_buf = LCDC_V4_NEXT_BUF, .hs_wdth_mask = 0x3fff, .hs_wdth_shift = 18, .has_overlay = true, .has_ctrl2 = true, .has_crc32 = true, }, }; void mxsfb_enable_axi_clk(struct mxsfb_drm_private *mxsfb) { clk_prepare_enable(mxsfb->clk_axi); } void mxsfb_disable_axi_clk(struct mxsfb_drm_private *mxsfb) { clk_disable_unprepare(mxsfb->clk_axi); } static struct drm_framebuffer * mxsfb_fb_create(struct drm_device *dev, struct drm_file *file_priv, const struct drm_mode_fb_cmd2 *mode_cmd) { const struct drm_format_info *info; info = drm_get_format_info(dev, mode_cmd); if (!info) return ERR_PTR(-EINVAL); if (mode_cmd->width * info->cpp[0] != mode_cmd->pitches[0]) { dev_dbg(dev->dev, "Invalid pitch: fb width must match pitch\n"); return ERR_PTR(-EINVAL); } return drm_gem_fb_create(dev, file_priv, mode_cmd); } static const struct drm_mode_config_funcs mxsfb_mode_config_funcs = { .fb_create = mxsfb_fb_create, .atomic_check = drm_atomic_helper_check, .atomic_commit = drm_atomic_helper_commit, }; static const struct drm_mode_config_helper_funcs mxsfb_mode_config_helpers = { .atomic_commit_tail = drm_atomic_helper_commit_tail_rpm, }; static int mxsfb_attach_bridge(struct mxsfb_drm_private *mxsfb) { struct drm_device *drm = mxsfb->drm; struct drm_connector_list_iter iter; struct drm_panel *panel; struct drm_bridge *bridge; int ret; ret = drm_of_find_panel_or_bridge(drm->dev->of_node, 0, 0, &panel, &bridge); if (ret) return ret; if (panel) { bridge = devm_drm_panel_bridge_add_typed(drm->dev, panel, DRM_MODE_CONNECTOR_DPI); if (IS_ERR(bridge)) return PTR_ERR(bridge); } if (!bridge) return -ENODEV; ret = drm_bridge_attach(&mxsfb->encoder, bridge, NULL, 0); if (ret) return dev_err_probe(drm->dev, ret, "Failed to attach bridge\n"); mxsfb->bridge = bridge; /* * Get hold of the connector. This is a bit of a hack, until the bridge * API gives us bus flags and formats. */ drm_connector_list_iter_begin(drm, &iter); mxsfb->connector = drm_connector_list_iter_next(&iter); drm_connector_list_iter_end(&iter); return 0; } static irqreturn_t mxsfb_irq_handler(int irq, void *data) { struct drm_device *drm = data; struct mxsfb_drm_private *mxsfb = drm->dev_private; u32 reg, crc; u64 vbc; reg = readl(mxsfb->base + LCDC_CTRL1); if (reg & CTRL1_CUR_FRAME_DONE_IRQ) { drm_crtc_handle_vblank(&mxsfb->crtc); if (mxsfb->crc_active) { crc = readl(mxsfb->base + LCDC_V4_CRC_STAT); vbc = drm_crtc_accurate_vblank_count(&mxsfb->crtc); drm_crtc_add_crc_entry(&mxsfb->crtc, true, vbc, &crc); } } writel(CTRL1_CUR_FRAME_DONE_IRQ, mxsfb->base + LCDC_CTRL1 + REG_CLR); return IRQ_HANDLED; } static void mxsfb_irq_disable(struct drm_device *drm) { struct mxsfb_drm_private *mxsfb = drm->dev_private; mxsfb_enable_axi_clk(mxsfb); /* Disable and clear VBLANK IRQ */ writel(CTRL1_CUR_FRAME_DONE_IRQ_EN, mxsfb->base + LCDC_CTRL1 + REG_CLR); writel(CTRL1_CUR_FRAME_DONE_IRQ, mxsfb->base + LCDC_CTRL1 + REG_CLR); mxsfb_disable_axi_clk(mxsfb); } static int mxsfb_irq_install(struct drm_device *dev, int irq) { if (irq == IRQ_NOTCONNECTED) return -ENOTCONN; mxsfb_irq_disable(dev); return request_irq(irq, mxsfb_irq_handler, 0, dev->driver->name, dev); } static void mxsfb_irq_uninstall(struct drm_device *dev) { struct mxsfb_drm_private *mxsfb = dev->dev_private; mxsfb_irq_disable(dev); free_irq(mxsfb->irq, dev); } static int mxsfb_load(struct drm_device *drm, const struct mxsfb_devdata *devdata) { struct platform_device *pdev = to_platform_device(drm->dev); struct mxsfb_drm_private *mxsfb; struct resource *res; int ret; mxsfb = devm_kzalloc(&pdev->dev, sizeof(*mxsfb), GFP_KERNEL); if (!mxsfb) return -ENOMEM; mxsfb->drm = drm; drm->dev_private = mxsfb; mxsfb->devdata = devdata; res = platform_get_resource(pdev, IORESOURCE_MEM, 0); mxsfb->base = devm_ioremap_resource(drm->dev, res); if (IS_ERR(mxsfb->base)) return PTR_ERR(mxsfb->base); mxsfb->clk = devm_clk_get(drm->dev, NULL); if (IS_ERR(mxsfb->clk)) return PTR_ERR(mxsfb->clk); mxsfb->clk_axi = devm_clk_get_optional(drm->dev, "axi"); if (IS_ERR(mxsfb->clk_axi)) return PTR_ERR(mxsfb->clk_axi); mxsfb->clk_disp_axi = devm_clk_get(drm->dev, "disp_axi"); if (IS_ERR(mxsfb->clk_disp_axi)) mxsfb->clk_disp_axi = NULL; ret = dma_set_mask_and_coherent(drm->dev, DMA_BIT_MASK(32)); if (ret) return ret; pm_runtime_enable(drm->dev); /* Modeset init */ drm_mode_config_init(drm); ret = mxsfb_kms_init(mxsfb); if (ret < 0) { dev_err(drm->dev, "Failed to initialize KMS pipeline\n"); goto err_vblank; } ret = drm_vblank_init(drm, drm->mode_config.num_crtc); if (ret < 0) { dev_err(drm->dev, "Failed to initialise vblank\n"); goto err_vblank; } /* Start with vertical blanking interrupt reporting disabled. */ drm_crtc_vblank_off(&mxsfb->crtc); ret = mxsfb_attach_bridge(mxsfb); if (ret) { dev_err_probe(drm->dev, ret, "Cannot connect bridge\n"); goto err_vblank; } drm->mode_config.min_width = MXSFB_MIN_XRES; drm->mode_config.min_height = MXSFB_MIN_YRES; drm->mode_config.max_width = MXSFB_MAX_XRES; drm->mode_config.max_height = MXSFB_MAX_YRES; drm->mode_config.funcs = &mxsfb_mode_config_funcs; drm->mode_config.helper_private = &mxsfb_mode_config_helpers; drm_mode_config_reset(drm); ret = platform_get_irq(pdev, 0); if (ret < 0) goto err_vblank; mxsfb->irq = ret; pm_runtime_get_sync(drm->dev); ret = mxsfb_irq_install(drm, mxsfb->irq); pm_runtime_put_sync(drm->dev); if (ret < 0) { dev_err(drm->dev, "Failed to install IRQ handler\n"); goto err_vblank; } drm_kms_helper_poll_init(drm); platform_set_drvdata(pdev, drm); drm_helper_hpd_irq_event(drm); return 0; err_vblank: pm_runtime_disable(drm->dev); return ret; } static void mxsfb_unload(struct drm_device *drm) { drm_kms_helper_poll_fini(drm); drm_mode_config_cleanup(drm); pm_runtime_get_sync(drm->dev); mxsfb_irq_uninstall(drm); pm_runtime_put_sync(drm->dev); drm->dev_private = NULL; pm_runtime_disable(drm->dev); } DEFINE_DRM_GEM_DMA_FOPS(fops); static const struct drm_driver mxsfb_driver = { .driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC, DRM_GEM_DMA_DRIVER_OPS, .fops = &fops, .name = "mxsfb-drm", .desc = "MXSFB Controller DRM", .date = "20160824", .major = 1, .minor = 0, }; static const struct of_device_id mxsfb_dt_ids[] = { { .compatible = "fsl,imx23-lcdif", .data = &mxsfb_devdata[MXSFB_V3], }, { .compatible = "fsl,imx28-lcdif", .data = &mxsfb_devdata[MXSFB_V4], }, { .compatible = "fsl,imx6sx-lcdif", .data = &mxsfb_devdata[MXSFB_V6], }, { /* sentinel */ } }; MODULE_DEVICE_TABLE(of, mxsfb_dt_ids); static int mxsfb_probe(struct platform_device *pdev) { struct drm_device *drm; const struct of_device_id *of_id = of_match_device(mxsfb_dt_ids, &pdev->dev); int ret; if (!pdev->dev.of_node) return -ENODEV; drm = drm_dev_alloc(&mxsfb_driver, &pdev->dev); if (IS_ERR(drm)) return PTR_ERR(drm); ret = mxsfb_load(drm, of_id->data); if (ret) goto err_free; ret = drm_dev_register(drm, 0); if (ret) goto err_unload; drm_fbdev_dma_setup(drm, 32); return 0; err_unload: mxsfb_unload(drm); err_free: drm_dev_put(drm); return ret; } static void mxsfb_remove(struct platform_device *pdev) { struct drm_device *drm = platform_get_drvdata(pdev); drm_dev_unregister(drm); drm_atomic_helper_shutdown(drm); mxsfb_unload(drm); drm_dev_put(drm); } static void mxsfb_shutdown(struct platform_device *pdev) { struct drm_device *drm = platform_get_drvdata(pdev); drm_atomic_helper_shutdown(drm); } #ifdef CONFIG_PM_SLEEP static int mxsfb_suspend(struct device *dev) { struct drm_device *drm = dev_get_drvdata(dev); return drm_mode_config_helper_suspend(drm); } static int mxsfb_resume(struct device *dev) { struct drm_device *drm = dev_get_drvdata(dev); return drm_mode_config_helper_resume(drm); } #endif static const struct dev_pm_ops mxsfb_pm_ops = { SET_SYSTEM_SLEEP_PM_OPS(mxsfb_suspend, mxsfb_resume) }; static struct platform_driver mxsfb_platform_driver = { .probe = mxsfb_probe, .remove_new = mxsfb_remove, .shutdown = mxsfb_shutdown, .driver = { .name = "mxsfb", .of_match_table = mxsfb_dt_ids, .pm = &mxsfb_pm_ops, }, }; drm_module_platform_driver(mxsfb_platform_driver); MODULE_AUTHOR("Marek Vasut <[email protected]>"); MODULE_DESCRIPTION("Freescale MXS DRM/KMS driver"); MODULE_LICENSE("GPL");
linux-master
drivers/gpu/drm/mxsfb/mxsfb_drv.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * Copyright (C) 2022 Marek Vasut <[email protected]> * * This code is based on drivers/gpu/drm/mxsfb/mxsfb* */ #include <linux/clk.h> #include <linux/dma-mapping.h> #include <linux/io.h> #include <linux/module.h> #include <linux/of.h> #include <linux/of_graph.h> #include <linux/platform_device.h> #include <linux/pm_runtime.h> #include <drm/drm_atomic_helper.h> #include <drm/drm_bridge.h> #include <drm/drm_drv.h> #include <drm/drm_encoder.h> #include <drm/drm_fbdev_dma.h> #include <drm/drm_gem_dma_helper.h> #include <drm/drm_gem_framebuffer_helper.h> #include <drm/drm_mode_config.h> #include <drm/drm_module.h> #include <drm/drm_of.h> #include <drm/drm_probe_helper.h> #include <drm/drm_vblank.h> #include "lcdif_drv.h" #include "lcdif_regs.h" static const struct drm_mode_config_funcs lcdif_mode_config_funcs = { .fb_create = drm_gem_fb_create, .atomic_check = drm_atomic_helper_check, .atomic_commit = drm_atomic_helper_commit, }; static const struct drm_mode_config_helper_funcs lcdif_mode_config_helpers = { .atomic_commit_tail = drm_atomic_helper_commit_tail_rpm, }; static const struct drm_encoder_funcs lcdif_encoder_funcs = { .destroy = drm_encoder_cleanup, }; static int lcdif_attach_bridge(struct lcdif_drm_private *lcdif) { struct device *dev = lcdif->drm->dev; struct device_node *ep; struct drm_bridge *bridge; int ret; for_each_endpoint_of_node(dev->of_node, ep) { struct device_node *remote; struct of_endpoint of_ep; struct drm_encoder *encoder; remote = of_graph_get_remote_port_parent(ep); if (!of_device_is_available(remote)) { of_node_put(remote); continue; } of_node_put(remote); ret = of_graph_parse_endpoint(ep, &of_ep); if (ret < 0) { dev_err(dev, "Failed to parse endpoint %pOF\n", ep); of_node_put(ep); return ret; } bridge = devm_drm_of_get_bridge(dev, dev->of_node, 0, of_ep.id); if (IS_ERR(bridge)) { of_node_put(ep); return dev_err_probe(dev, PTR_ERR(bridge), "Failed to get bridge for endpoint%u\n", of_ep.id); } encoder = devm_kzalloc(dev, sizeof(*encoder), GFP_KERNEL); if (!encoder) { dev_err(dev, "Failed to allocate encoder for endpoint%u\n", of_ep.id); of_node_put(ep); return -ENOMEM; } encoder->possible_crtcs = drm_crtc_mask(&lcdif->crtc); ret = drm_encoder_init(lcdif->drm, encoder, &lcdif_encoder_funcs, DRM_MODE_ENCODER_NONE, NULL); if (ret) { dev_err(dev, "Failed to initialize encoder for endpoint%u: %d\n", of_ep.id, ret); of_node_put(ep); return ret; } ret = drm_bridge_attach(encoder, bridge, NULL, 0); if (ret) { of_node_put(ep); return dev_err_probe(dev, ret, "Failed to attach bridge for endpoint%u\n", of_ep.id); } } return 0; } static irqreturn_t lcdif_irq_handler(int irq, void *data) { struct drm_device *drm = data; struct lcdif_drm_private *lcdif = drm->dev_private; u32 reg, stat; stat = readl(lcdif->base + LCDC_V8_INT_STATUS_D0); if (!stat) return IRQ_NONE; if (stat & INT_STATUS_D0_VS_BLANK) { reg = readl(lcdif->base + LCDC_V8_CTRLDESCL0_5); if (!(reg & CTRLDESCL0_5_SHADOW_LOAD_EN)) drm_crtc_handle_vblank(&lcdif->crtc); } writel(stat, lcdif->base + LCDC_V8_INT_STATUS_D0); return IRQ_HANDLED; } static int lcdif_load(struct drm_device *drm) { struct platform_device *pdev = to_platform_device(drm->dev); struct lcdif_drm_private *lcdif; struct resource *res; int ret; lcdif = devm_kzalloc(&pdev->dev, sizeof(*lcdif), GFP_KERNEL); if (!lcdif) return -ENOMEM; lcdif->drm = drm; drm->dev_private = lcdif; res = platform_get_resource(pdev, IORESOURCE_MEM, 0); lcdif->base = devm_ioremap_resource(drm->dev, res); if (IS_ERR(lcdif->base)) return PTR_ERR(lcdif->base); lcdif->clk = devm_clk_get(drm->dev, "pix"); if (IS_ERR(lcdif->clk)) return PTR_ERR(lcdif->clk); lcdif->clk_axi = devm_clk_get(drm->dev, "axi"); if (IS_ERR(lcdif->clk_axi)) return PTR_ERR(lcdif->clk_axi); lcdif->clk_disp_axi = devm_clk_get(drm->dev, "disp_axi"); if (IS_ERR(lcdif->clk_disp_axi)) return PTR_ERR(lcdif->clk_disp_axi); platform_set_drvdata(pdev, drm); ret = dma_set_mask_and_coherent(drm->dev, DMA_BIT_MASK(36)); if (ret) return ret; /* Modeset init */ drm_mode_config_init(drm); ret = lcdif_kms_init(lcdif); if (ret < 0) { dev_err(drm->dev, "Failed to initialize KMS pipeline\n"); return ret; } ret = drm_vblank_init(drm, drm->mode_config.num_crtc); if (ret < 0) { dev_err(drm->dev, "Failed to initialise vblank\n"); return ret; } /* Start with vertical blanking interrupt reporting disabled. */ drm_crtc_vblank_off(&lcdif->crtc); ret = lcdif_attach_bridge(lcdif); if (ret) return dev_err_probe(drm->dev, ret, "Cannot connect bridge\n"); drm->mode_config.min_width = LCDIF_MIN_XRES; drm->mode_config.min_height = LCDIF_MIN_YRES; drm->mode_config.max_width = LCDIF_MAX_XRES; drm->mode_config.max_height = LCDIF_MAX_YRES; drm->mode_config.funcs = &lcdif_mode_config_funcs; drm->mode_config.helper_private = &lcdif_mode_config_helpers; drm_mode_config_reset(drm); ret = platform_get_irq(pdev, 0); if (ret < 0) return ret; lcdif->irq = ret; ret = devm_request_irq(drm->dev, lcdif->irq, lcdif_irq_handler, 0, drm->driver->name, drm); if (ret < 0) { dev_err(drm->dev, "Failed to install IRQ handler\n"); return ret; } drm_kms_helper_poll_init(drm); drm_helper_hpd_irq_event(drm); pm_runtime_enable(drm->dev); return 0; } static void lcdif_unload(struct drm_device *drm) { struct lcdif_drm_private *lcdif = drm->dev_private; pm_runtime_get_sync(drm->dev); drm_crtc_vblank_off(&lcdif->crtc); drm_kms_helper_poll_fini(drm); drm_mode_config_cleanup(drm); pm_runtime_put_sync(drm->dev); pm_runtime_disable(drm->dev); drm->dev_private = NULL; } DEFINE_DRM_GEM_DMA_FOPS(fops); static const struct drm_driver lcdif_driver = { .driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC, DRM_GEM_DMA_DRIVER_OPS, .fops = &fops, .name = "imx-lcdif", .desc = "i.MX LCDIF Controller DRM", .date = "20220417", .major = 1, .minor = 0, }; static const struct of_device_id lcdif_dt_ids[] = { { .compatible = "fsl,imx8mp-lcdif" }, { .compatible = "fsl,imx93-lcdif" }, { /* sentinel */ } }; MODULE_DEVICE_TABLE(of, lcdif_dt_ids); static int lcdif_probe(struct platform_device *pdev) { struct drm_device *drm; int ret; drm = drm_dev_alloc(&lcdif_driver, &pdev->dev); if (IS_ERR(drm)) return PTR_ERR(drm); ret = lcdif_load(drm); if (ret) goto err_free; ret = drm_dev_register(drm, 0); if (ret) goto err_unload; drm_fbdev_dma_setup(drm, 32); return 0; err_unload: lcdif_unload(drm); err_free: drm_dev_put(drm); return ret; } static void lcdif_remove(struct platform_device *pdev) { struct drm_device *drm = platform_get_drvdata(pdev); drm_dev_unregister(drm); drm_atomic_helper_shutdown(drm); lcdif_unload(drm); drm_dev_put(drm); } static void lcdif_shutdown(struct platform_device *pdev) { struct drm_device *drm = platform_get_drvdata(pdev); drm_atomic_helper_shutdown(drm); } static int __maybe_unused lcdif_rpm_suspend(struct device *dev) { struct drm_device *drm = dev_get_drvdata(dev); struct lcdif_drm_private *lcdif = drm->dev_private; /* These clock supply the DISPLAY CLOCK Domain */ clk_disable_unprepare(lcdif->clk); /* These clock supply the System Bus, AXI, Write Path, LFIFO */ clk_disable_unprepare(lcdif->clk_disp_axi); /* These clock supply the Control Bus, APB, APBH Ctrl Registers */ clk_disable_unprepare(lcdif->clk_axi); return 0; } static int __maybe_unused lcdif_rpm_resume(struct device *dev) { struct drm_device *drm = dev_get_drvdata(dev); struct lcdif_drm_private *lcdif = drm->dev_private; /* These clock supply the Control Bus, APB, APBH Ctrl Registers */ clk_prepare_enable(lcdif->clk_axi); /* These clock supply the System Bus, AXI, Write Path, LFIFO */ clk_prepare_enable(lcdif->clk_disp_axi); /* These clock supply the DISPLAY CLOCK Domain */ clk_prepare_enable(lcdif->clk); return 0; } static int __maybe_unused lcdif_suspend(struct device *dev) { struct drm_device *drm = dev_get_drvdata(dev); int ret; ret = drm_mode_config_helper_suspend(drm); if (ret) return ret; return lcdif_rpm_suspend(dev); } static int __maybe_unused lcdif_resume(struct device *dev) { struct drm_device *drm = dev_get_drvdata(dev); lcdif_rpm_resume(dev); return drm_mode_config_helper_resume(drm); } static const struct dev_pm_ops lcdif_pm_ops = { SET_SYSTEM_SLEEP_PM_OPS(lcdif_suspend, lcdif_resume) SET_RUNTIME_PM_OPS(lcdif_rpm_suspend, lcdif_rpm_resume, NULL) }; static struct platform_driver lcdif_platform_driver = { .probe = lcdif_probe, .remove_new = lcdif_remove, .shutdown = lcdif_shutdown, .driver = { .name = "imx-lcdif", .of_match_table = lcdif_dt_ids, .pm = &lcdif_pm_ops, }, }; drm_module_platform_driver(lcdif_platform_driver); MODULE_AUTHOR("Marek Vasut <[email protected]>"); MODULE_DESCRIPTION("Freescale LCDIF DRM/KMS driver"); MODULE_LICENSE("GPL");
linux-master
drivers/gpu/drm/mxsfb/lcdif_drv.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * Copyright (C) 2022 Marek Vasut <[email protected]> * * This code is based on drivers/gpu/drm/mxsfb/mxsfb* */ #include <linux/bitfield.h> #include <linux/clk.h> #include <linux/io.h> #include <linux/iopoll.h> #include <linux/media-bus-format.h> #include <linux/pm_runtime.h> #include <linux/spinlock.h> #include <drm/drm_atomic.h> #include <drm/drm_atomic_helper.h> #include <drm/drm_bridge.h> #include <drm/drm_color_mgmt.h> #include <drm/drm_connector.h> #include <drm/drm_crtc.h> #include <drm/drm_encoder.h> #include <drm/drm_fb_dma_helper.h> #include <drm/drm_fourcc.h> #include <drm/drm_framebuffer.h> #include <drm/drm_gem_atomic_helper.h> #include <drm/drm_gem_dma_helper.h> #include <drm/drm_plane.h> #include <drm/drm_vblank.h> #include "lcdif_drv.h" #include "lcdif_regs.h" struct lcdif_crtc_state { struct drm_crtc_state base; /* always be the first member */ u32 bus_format; u32 bus_flags; }; static inline struct lcdif_crtc_state * to_lcdif_crtc_state(struct drm_crtc_state *s) { return container_of(s, struct lcdif_crtc_state, base); } /* ----------------------------------------------------------------------------- * CRTC */ /* * For conversion from YCbCr to RGB, the CSC operates as follows: * * |R| |A1 A2 A3| |Y + D1| * |G| = |B1 B2 B3| * |Cb + D2| * |B| |C1 C2 C3| |Cr + D3| * * The A, B and C coefficients are expressed as Q2.8 fixed point values, and * the D coefficients as Q0.8. Despite the reference manual stating the * opposite, the D1, D2 and D3 offset values are added to Y, Cb and Cr, not * subtracted. They must thus be programmed with negative values. */ static const u32 lcdif_yuv2rgb_coeffs[3][2][6] = { [DRM_COLOR_YCBCR_BT601] = { [DRM_COLOR_YCBCR_LIMITED_RANGE] = { /* * BT.601 limited range: * * |R| |1.1644 0.0000 1.5960| |Y - 16 | * |G| = |1.1644 -0.3917 -0.8129| * |Cb - 128| * |B| |1.1644 2.0172 0.0000| |Cr - 128| */ CSC0_COEF0_A1(0x12a) | CSC0_COEF0_A2(0x000), CSC0_COEF1_A3(0x199) | CSC0_COEF1_B1(0x12a), CSC0_COEF2_B2(0x79c) | CSC0_COEF2_B3(0x730), CSC0_COEF3_C1(0x12a) | CSC0_COEF3_C2(0x204), CSC0_COEF4_C3(0x000) | CSC0_COEF4_D1(0x1f0), CSC0_COEF5_D2(0x180) | CSC0_COEF5_D3(0x180), }, [DRM_COLOR_YCBCR_FULL_RANGE] = { /* * BT.601 full range: * * |R| |1.0000 0.0000 1.4020| |Y - 0 | * |G| = |1.0000 -0.3441 -0.7141| * |Cb - 128| * |B| |1.0000 1.7720 0.0000| |Cr - 128| */ CSC0_COEF0_A1(0x100) | CSC0_COEF0_A2(0x000), CSC0_COEF1_A3(0x167) | CSC0_COEF1_B1(0x100), CSC0_COEF2_B2(0x7a8) | CSC0_COEF2_B3(0x749), CSC0_COEF3_C1(0x100) | CSC0_COEF3_C2(0x1c6), CSC0_COEF4_C3(0x000) | CSC0_COEF4_D1(0x000), CSC0_COEF5_D2(0x180) | CSC0_COEF5_D3(0x180), }, }, [DRM_COLOR_YCBCR_BT709] = { [DRM_COLOR_YCBCR_LIMITED_RANGE] = { /* * Rec.709 limited range: * * |R| |1.1644 0.0000 1.7927| |Y - 16 | * |G| = |1.1644 -0.2132 -0.5329| * |Cb - 128| * |B| |1.1644 2.1124 0.0000| |Cr - 128| */ CSC0_COEF0_A1(0x12a) | CSC0_COEF0_A2(0x000), CSC0_COEF1_A3(0x1cb) | CSC0_COEF1_B1(0x12a), CSC0_COEF2_B2(0x7c9) | CSC0_COEF2_B3(0x778), CSC0_COEF3_C1(0x12a) | CSC0_COEF3_C2(0x21d), CSC0_COEF4_C3(0x000) | CSC0_COEF4_D1(0x1f0), CSC0_COEF5_D2(0x180) | CSC0_COEF5_D3(0x180), }, [DRM_COLOR_YCBCR_FULL_RANGE] = { /* * Rec.709 full range: * * |R| |1.0000 0.0000 1.5748| |Y - 0 | * |G| = |1.0000 -0.1873 -0.4681| * |Cb - 128| * |B| |1.0000 1.8556 0.0000| |Cr - 128| */ CSC0_COEF0_A1(0x100) | CSC0_COEF0_A2(0x000), CSC0_COEF1_A3(0x193) | CSC0_COEF1_B1(0x100), CSC0_COEF2_B2(0x7d0) | CSC0_COEF2_B3(0x788), CSC0_COEF3_C1(0x100) | CSC0_COEF3_C2(0x1db), CSC0_COEF4_C3(0x000) | CSC0_COEF4_D1(0x000), CSC0_COEF5_D2(0x180) | CSC0_COEF5_D3(0x180), }, }, [DRM_COLOR_YCBCR_BT2020] = { [DRM_COLOR_YCBCR_LIMITED_RANGE] = { /* * BT.2020 limited range: * * |R| |1.1644 0.0000 1.6787| |Y - 16 | * |G| = |1.1644 -0.1874 -0.6505| * |Cb - 128| * |B| |1.1644 2.1418 0.0000| |Cr - 128| */ CSC0_COEF0_A1(0x12a) | CSC0_COEF0_A2(0x000), CSC0_COEF1_A3(0x1ae) | CSC0_COEF1_B1(0x12a), CSC0_COEF2_B2(0x7d0) | CSC0_COEF2_B3(0x759), CSC0_COEF3_C1(0x12a) | CSC0_COEF3_C2(0x224), CSC0_COEF4_C3(0x000) | CSC0_COEF4_D1(0x1f0), CSC0_COEF5_D2(0x180) | CSC0_COEF5_D3(0x180), }, [DRM_COLOR_YCBCR_FULL_RANGE] = { /* * BT.2020 full range: * * |R| |1.0000 0.0000 1.4746| |Y - 0 | * |G| = |1.0000 -0.1646 -0.5714| * |Cb - 128| * |B| |1.0000 1.8814 0.0000| |Cr - 128| */ CSC0_COEF0_A1(0x100) | CSC0_COEF0_A2(0x000), CSC0_COEF1_A3(0x179) | CSC0_COEF1_B1(0x100), CSC0_COEF2_B2(0x7d6) | CSC0_COEF2_B3(0x76e), CSC0_COEF3_C1(0x100) | CSC0_COEF3_C2(0x1e2), CSC0_COEF4_C3(0x000) | CSC0_COEF4_D1(0x000), CSC0_COEF5_D2(0x180) | CSC0_COEF5_D3(0x180), }, }, }; static void lcdif_set_formats(struct lcdif_drm_private *lcdif, struct drm_plane_state *plane_state, const u32 bus_format) { struct drm_device *drm = lcdif->drm; const u32 format = plane_state->fb->format->format; bool in_yuv = false; bool out_yuv = false; switch (bus_format) { case MEDIA_BUS_FMT_RGB565_1X16: writel(DISP_PARA_LINE_PATTERN_RGB565, lcdif->base + LCDC_V8_DISP_PARA); break; case MEDIA_BUS_FMT_RGB888_1X24: writel(DISP_PARA_LINE_PATTERN_RGB888, lcdif->base + LCDC_V8_DISP_PARA); break; case MEDIA_BUS_FMT_UYVY8_1X16: writel(DISP_PARA_LINE_PATTERN_UYVY_H, lcdif->base + LCDC_V8_DISP_PARA); out_yuv = true; break; default: dev_err(drm->dev, "Unknown media bus format 0x%x\n", bus_format); break; } switch (format) { /* RGB Formats */ case DRM_FORMAT_RGB565: writel(CTRLDESCL0_5_BPP_16_RGB565, lcdif->base + LCDC_V8_CTRLDESCL0_5); break; case DRM_FORMAT_RGB888: writel(CTRLDESCL0_5_BPP_24_RGB888, lcdif->base + LCDC_V8_CTRLDESCL0_5); break; case DRM_FORMAT_XRGB1555: writel(CTRLDESCL0_5_BPP_16_ARGB1555, lcdif->base + LCDC_V8_CTRLDESCL0_5); break; case DRM_FORMAT_XRGB4444: writel(CTRLDESCL0_5_BPP_16_ARGB4444, lcdif->base + LCDC_V8_CTRLDESCL0_5); break; case DRM_FORMAT_XBGR8888: writel(CTRLDESCL0_5_BPP_32_ABGR8888, lcdif->base + LCDC_V8_CTRLDESCL0_5); break; case DRM_FORMAT_XRGB8888: writel(CTRLDESCL0_5_BPP_32_ARGB8888, lcdif->base + LCDC_V8_CTRLDESCL0_5); break; /* YUV Formats */ case DRM_FORMAT_YUYV: writel(CTRLDESCL0_5_BPP_YCbCr422 | CTRLDESCL0_5_YUV_FORMAT_VY2UY1, lcdif->base + LCDC_V8_CTRLDESCL0_5); in_yuv = true; break; case DRM_FORMAT_YVYU: writel(CTRLDESCL0_5_BPP_YCbCr422 | CTRLDESCL0_5_YUV_FORMAT_UY2VY1, lcdif->base + LCDC_V8_CTRLDESCL0_5); in_yuv = true; break; case DRM_FORMAT_UYVY: writel(CTRLDESCL0_5_BPP_YCbCr422 | CTRLDESCL0_5_YUV_FORMAT_Y2VY1U, lcdif->base + LCDC_V8_CTRLDESCL0_5); in_yuv = true; break; case DRM_FORMAT_VYUY: writel(CTRLDESCL0_5_BPP_YCbCr422 | CTRLDESCL0_5_YUV_FORMAT_Y2UY1V, lcdif->base + LCDC_V8_CTRLDESCL0_5); in_yuv = true; break; default: dev_err(drm->dev, "Unknown pixel format 0x%x\n", format); break; } /* * The CSC differentiates between "YCbCr" and "YUV", but the reference * manual doesn't detail how they differ. Experiments showed that the * luminance value is unaffected, only the calculations involving chroma * values differ. The YCbCr mode behaves as expected, with chroma values * being offset by 128. The YUV mode isn't fully understood. */ if (!in_yuv && out_yuv) { /* RGB -> YCbCr */ writel(CSC0_CTRL_CSC_MODE_RGB2YCbCr, lcdif->base + LCDC_V8_CSC0_CTRL); /* * CSC: BT.601 Limited Range RGB to YCbCr coefficients. * * |Y | | 0.2568 0.5041 0.0979| |R| |16 | * |Cb| = |-0.1482 -0.2910 0.4392| * |G| + |128| * |Cr| | 0.4392 0.4392 -0.3678| |B| |128| */ writel(CSC0_COEF0_A2(0x081) | CSC0_COEF0_A1(0x041), lcdif->base + LCDC_V8_CSC0_COEF0); writel(CSC0_COEF1_B1(0x7db) | CSC0_COEF1_A3(0x019), lcdif->base + LCDC_V8_CSC0_COEF1); writel(CSC0_COEF2_B3(0x070) | CSC0_COEF2_B2(0x7b6), lcdif->base + LCDC_V8_CSC0_COEF2); writel(CSC0_COEF3_C2(0x7a2) | CSC0_COEF3_C1(0x070), lcdif->base + LCDC_V8_CSC0_COEF3); writel(CSC0_COEF4_D1(0x010) | CSC0_COEF4_C3(0x7ee), lcdif->base + LCDC_V8_CSC0_COEF4); writel(CSC0_COEF5_D3(0x080) | CSC0_COEF5_D2(0x080), lcdif->base + LCDC_V8_CSC0_COEF5); } else if (in_yuv && !out_yuv) { /* YCbCr -> RGB */ const u32 *coeffs = lcdif_yuv2rgb_coeffs[plane_state->color_encoding] [plane_state->color_range]; writel(CSC0_CTRL_CSC_MODE_YCbCr2RGB, lcdif->base + LCDC_V8_CSC0_CTRL); writel(coeffs[0], lcdif->base + LCDC_V8_CSC0_COEF0); writel(coeffs[1], lcdif->base + LCDC_V8_CSC0_COEF1); writel(coeffs[2], lcdif->base + LCDC_V8_CSC0_COEF2); writel(coeffs[3], lcdif->base + LCDC_V8_CSC0_COEF3); writel(coeffs[4], lcdif->base + LCDC_V8_CSC0_COEF4); writel(coeffs[5], lcdif->base + LCDC_V8_CSC0_COEF5); } else { /* RGB -> RGB, YCbCr -> YCbCr: bypass colorspace converter. */ writel(CSC0_CTRL_BYPASS, lcdif->base + LCDC_V8_CSC0_CTRL); } } static void lcdif_set_mode(struct lcdif_drm_private *lcdif, u32 bus_flags) { struct drm_display_mode *m = &lcdif->crtc.state->adjusted_mode; u32 ctrl = 0; if (m->flags & DRM_MODE_FLAG_NHSYNC) ctrl |= CTRL_INV_HS; if (m->flags & DRM_MODE_FLAG_NVSYNC) ctrl |= CTRL_INV_VS; if (bus_flags & DRM_BUS_FLAG_DE_LOW) ctrl |= CTRL_INV_DE; if (bus_flags & DRM_BUS_FLAG_PIXDATA_DRIVE_NEGEDGE) ctrl |= CTRL_INV_PXCK; writel(ctrl, lcdif->base + LCDC_V8_CTRL); writel(DISP_SIZE_DELTA_Y(m->vdisplay) | DISP_SIZE_DELTA_X(m->hdisplay), lcdif->base + LCDC_V8_DISP_SIZE); writel(HSYN_PARA_BP_H(m->htotal - m->hsync_end) | HSYN_PARA_FP_H(m->hsync_start - m->hdisplay), lcdif->base + LCDC_V8_HSYN_PARA); writel(VSYN_PARA_BP_V(m->vtotal - m->vsync_end) | VSYN_PARA_FP_V(m->vsync_start - m->vdisplay), lcdif->base + LCDC_V8_VSYN_PARA); writel(VSYN_HSYN_WIDTH_PW_V(m->vsync_end - m->vsync_start) | VSYN_HSYN_WIDTH_PW_H(m->hsync_end - m->hsync_start), lcdif->base + LCDC_V8_VSYN_HSYN_WIDTH); writel(CTRLDESCL0_1_HEIGHT(m->vdisplay) | CTRLDESCL0_1_WIDTH(m->hdisplay), lcdif->base + LCDC_V8_CTRLDESCL0_1); /* * Undocumented P_SIZE and T_SIZE register but those written in the * downstream kernel those registers control the AXI burst size. As of * now there are two known values: * 1 - 128Byte * 2 - 256Byte * Downstream set it to 256B burst size to improve the memory * efficiency so set it here too. */ ctrl = CTRLDESCL0_3_P_SIZE(2) | CTRLDESCL0_3_T_SIZE(2) | CTRLDESCL0_3_PITCH(lcdif->crtc.primary->state->fb->pitches[0]); writel(ctrl, lcdif->base + LCDC_V8_CTRLDESCL0_3); } static void lcdif_enable_controller(struct lcdif_drm_private *lcdif) { u32 reg; /* Set FIFO Panic watermarks, low 1/3, high 2/3 . */ writel(FIELD_PREP(PANIC0_THRES_LOW_MASK, 1 * PANIC0_THRES_MAX / 3) | FIELD_PREP(PANIC0_THRES_HIGH_MASK, 2 * PANIC0_THRES_MAX / 3), lcdif->base + LCDC_V8_PANIC0_THRES); /* * Enable FIFO Panic, this does not generate interrupt, but * boosts NoC priority based on FIFO Panic watermarks. */ writel(INT_ENABLE_D1_PLANE_PANIC_EN, lcdif->base + LCDC_V8_INT_ENABLE_D1); reg = readl(lcdif->base + LCDC_V8_DISP_PARA); reg |= DISP_PARA_DISP_ON; writel(reg, lcdif->base + LCDC_V8_DISP_PARA); reg = readl(lcdif->base + LCDC_V8_CTRLDESCL0_5); reg |= CTRLDESCL0_5_EN; writel(reg, lcdif->base + LCDC_V8_CTRLDESCL0_5); } static void lcdif_disable_controller(struct lcdif_drm_private *lcdif) { u32 reg; int ret; reg = readl(lcdif->base + LCDC_V8_CTRLDESCL0_5); reg &= ~CTRLDESCL0_5_EN; writel(reg, lcdif->base + LCDC_V8_CTRLDESCL0_5); ret = readl_poll_timeout(lcdif->base + LCDC_V8_CTRLDESCL0_5, reg, !(reg & CTRLDESCL0_5_EN), 0, 36000); /* Wait ~2 frame times max */ if (ret) drm_err(lcdif->drm, "Failed to disable controller!\n"); reg = readl(lcdif->base + LCDC_V8_DISP_PARA); reg &= ~DISP_PARA_DISP_ON; writel(reg, lcdif->base + LCDC_V8_DISP_PARA); /* Disable FIFO Panic NoC priority booster. */ writel(0, lcdif->base + LCDC_V8_INT_ENABLE_D1); } static void lcdif_reset_block(struct lcdif_drm_private *lcdif) { writel(CTRL_SW_RESET, lcdif->base + LCDC_V8_CTRL + REG_SET); readl(lcdif->base + LCDC_V8_CTRL); writel(CTRL_SW_RESET, lcdif->base + LCDC_V8_CTRL + REG_CLR); readl(lcdif->base + LCDC_V8_CTRL); } static void lcdif_crtc_mode_set_nofb(struct drm_crtc_state *crtc_state, struct drm_plane_state *plane_state) { struct lcdif_crtc_state *lcdif_crtc_state = to_lcdif_crtc_state(crtc_state); struct drm_device *drm = crtc_state->crtc->dev; struct lcdif_drm_private *lcdif = to_lcdif_drm_private(drm); struct drm_display_mode *m = &crtc_state->adjusted_mode; DRM_DEV_DEBUG_DRIVER(drm->dev, "Pixel clock: %dkHz (actual: %dkHz)\n", m->crtc_clock, (int)(clk_get_rate(lcdif->clk) / 1000)); DRM_DEV_DEBUG_DRIVER(drm->dev, "Bridge bus_flags: 0x%08X\n", lcdif_crtc_state->bus_flags); DRM_DEV_DEBUG_DRIVER(drm->dev, "Mode flags: 0x%08X\n", m->flags); /* Mandatory eLCDIF reset as per the Reference Manual */ lcdif_reset_block(lcdif); lcdif_set_formats(lcdif, plane_state, lcdif_crtc_state->bus_format); lcdif_set_mode(lcdif, lcdif_crtc_state->bus_flags); } static int lcdif_crtc_atomic_check(struct drm_crtc *crtc, struct drm_atomic_state *state) { struct drm_device *drm = crtc->dev; struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state, crtc); struct lcdif_crtc_state *lcdif_crtc_state = to_lcdif_crtc_state(crtc_state); bool has_primary = crtc_state->plane_mask & drm_plane_mask(crtc->primary); struct drm_connector_state *connector_state; struct drm_connector *connector; struct drm_encoder *encoder; struct drm_bridge_state *bridge_state; struct drm_bridge *bridge; u32 bus_format, bus_flags; bool format_set = false, flags_set = false; int ret, i; /* The primary plane has to be enabled when the CRTC is active. */ if (crtc_state->active && !has_primary) return -EINVAL; ret = drm_atomic_add_affected_planes(state, crtc); if (ret) return ret; /* Try to find consistent bus format and flags across first bridges. */ for_each_new_connector_in_state(state, connector, connector_state, i) { if (!connector_state->crtc) continue; encoder = connector_state->best_encoder; bridge = drm_bridge_chain_get_first_bridge(encoder); if (!bridge) continue; bridge_state = drm_atomic_get_new_bridge_state(state, bridge); if (!bridge_state) bus_format = MEDIA_BUS_FMT_FIXED; else bus_format = bridge_state->input_bus_cfg.format; if (bus_format == MEDIA_BUS_FMT_FIXED) { dev_warn(drm->dev, "[ENCODER:%d:%s]'s bridge does not provide bus format, assuming MEDIA_BUS_FMT_RGB888_1X24.\n" "Please fix bridge driver by handling atomic_get_input_bus_fmts.\n", encoder->base.id, encoder->name); bus_format = MEDIA_BUS_FMT_RGB888_1X24; } else if (!bus_format) { /* If all else fails, default to RGB888_1X24 */ bus_format = MEDIA_BUS_FMT_RGB888_1X24; } if (!format_set) { lcdif_crtc_state->bus_format = bus_format; format_set = true; } else if (lcdif_crtc_state->bus_format != bus_format) { DRM_DEV_DEBUG_DRIVER(drm->dev, "inconsistent bus format\n"); return -EINVAL; } if (bridge->timings) bus_flags = bridge->timings->input_bus_flags; else if (bridge_state) bus_flags = bridge_state->input_bus_cfg.flags; else bus_flags = 0; if (!flags_set) { lcdif_crtc_state->bus_flags = bus_flags; flags_set = true; } else if (lcdif_crtc_state->bus_flags != bus_flags) { DRM_DEV_DEBUG_DRIVER(drm->dev, "inconsistent bus flags\n"); return -EINVAL; } } return 0; } static void lcdif_crtc_atomic_flush(struct drm_crtc *crtc, struct drm_atomic_state *state) { struct lcdif_drm_private *lcdif = to_lcdif_drm_private(crtc->dev); struct drm_pending_vblank_event *event; u32 reg; reg = readl(lcdif->base + LCDC_V8_CTRLDESCL0_5); reg |= CTRLDESCL0_5_SHADOW_LOAD_EN; writel(reg, lcdif->base + LCDC_V8_CTRLDESCL0_5); event = crtc->state->event; crtc->state->event = NULL; if (!event) return; spin_lock_irq(&crtc->dev->event_lock); if (drm_crtc_vblank_get(crtc) == 0) drm_crtc_arm_vblank_event(crtc, event); else drm_crtc_send_vblank_event(crtc, event); spin_unlock_irq(&crtc->dev->event_lock); } static void lcdif_crtc_atomic_enable(struct drm_crtc *crtc, struct drm_atomic_state *state) { struct lcdif_drm_private *lcdif = to_lcdif_drm_private(crtc->dev); struct drm_crtc_state *new_cstate = drm_atomic_get_new_crtc_state(state, crtc); struct drm_plane_state *new_pstate = drm_atomic_get_new_plane_state(state, crtc->primary); struct drm_display_mode *m = &lcdif->crtc.state->adjusted_mode; struct drm_device *drm = lcdif->drm; dma_addr_t paddr; clk_set_rate(lcdif->clk, m->crtc_clock * 1000); pm_runtime_get_sync(drm->dev); lcdif_crtc_mode_set_nofb(new_cstate, new_pstate); /* Write cur_buf as well to avoid an initial corrupt frame */ paddr = drm_fb_dma_get_gem_addr(new_pstate->fb, new_pstate, 0); if (paddr) { writel(lower_32_bits(paddr), lcdif->base + LCDC_V8_CTRLDESCL_LOW0_4); writel(CTRLDESCL_HIGH0_4_ADDR_HIGH(upper_32_bits(paddr)), lcdif->base + LCDC_V8_CTRLDESCL_HIGH0_4); } lcdif_enable_controller(lcdif); drm_crtc_vblank_on(crtc); } static void lcdif_crtc_atomic_disable(struct drm_crtc *crtc, struct drm_atomic_state *state) { struct lcdif_drm_private *lcdif = to_lcdif_drm_private(crtc->dev); struct drm_device *drm = lcdif->drm; struct drm_pending_vblank_event *event; drm_crtc_vblank_off(crtc); lcdif_disable_controller(lcdif); spin_lock_irq(&drm->event_lock); event = crtc->state->event; if (event) { crtc->state->event = NULL; drm_crtc_send_vblank_event(crtc, event); } spin_unlock_irq(&drm->event_lock); pm_runtime_put_sync(drm->dev); } static void lcdif_crtc_atomic_destroy_state(struct drm_crtc *crtc, struct drm_crtc_state *state) { __drm_atomic_helper_crtc_destroy_state(state); kfree(to_lcdif_crtc_state(state)); } static void lcdif_crtc_reset(struct drm_crtc *crtc) { struct lcdif_crtc_state *state; if (crtc->state) lcdif_crtc_atomic_destroy_state(crtc, crtc->state); crtc->state = NULL; state = kzalloc(sizeof(*state), GFP_KERNEL); if (state) __drm_atomic_helper_crtc_reset(crtc, &state->base); } static struct drm_crtc_state * lcdif_crtc_atomic_duplicate_state(struct drm_crtc *crtc) { struct lcdif_crtc_state *old = to_lcdif_crtc_state(crtc->state); struct lcdif_crtc_state *new; if (WARN_ON(!crtc->state)) return NULL; new = kzalloc(sizeof(*new), GFP_KERNEL); if (!new) return NULL; __drm_atomic_helper_crtc_duplicate_state(crtc, &new->base); new->bus_format = old->bus_format; new->bus_flags = old->bus_flags; return &new->base; } static int lcdif_crtc_enable_vblank(struct drm_crtc *crtc) { struct lcdif_drm_private *lcdif = to_lcdif_drm_private(crtc->dev); /* Clear and enable VBLANK IRQ */ writel(INT_STATUS_D0_VS_BLANK, lcdif->base + LCDC_V8_INT_STATUS_D0); writel(INT_ENABLE_D0_VS_BLANK_EN, lcdif->base + LCDC_V8_INT_ENABLE_D0); return 0; } static void lcdif_crtc_disable_vblank(struct drm_crtc *crtc) { struct lcdif_drm_private *lcdif = to_lcdif_drm_private(crtc->dev); /* Disable and clear VBLANK IRQ */ writel(0, lcdif->base + LCDC_V8_INT_ENABLE_D0); writel(INT_STATUS_D0_VS_BLANK, lcdif->base + LCDC_V8_INT_STATUS_D0); } static const struct drm_crtc_helper_funcs lcdif_crtc_helper_funcs = { .atomic_check = lcdif_crtc_atomic_check, .atomic_flush = lcdif_crtc_atomic_flush, .atomic_enable = lcdif_crtc_atomic_enable, .atomic_disable = lcdif_crtc_atomic_disable, }; static const struct drm_crtc_funcs lcdif_crtc_funcs = { .reset = lcdif_crtc_reset, .destroy = drm_crtc_cleanup, .set_config = drm_atomic_helper_set_config, .page_flip = drm_atomic_helper_page_flip, .atomic_duplicate_state = lcdif_crtc_atomic_duplicate_state, .atomic_destroy_state = lcdif_crtc_atomic_destroy_state, .enable_vblank = lcdif_crtc_enable_vblank, .disable_vblank = lcdif_crtc_disable_vblank, }; /* ----------------------------------------------------------------------------- * Planes */ static int lcdif_plane_atomic_check(struct drm_plane *plane, struct drm_atomic_state *state) { struct drm_plane_state *plane_state = drm_atomic_get_new_plane_state(state, plane); struct lcdif_drm_private *lcdif = to_lcdif_drm_private(plane->dev); struct drm_crtc_state *crtc_state; crtc_state = drm_atomic_get_new_crtc_state(state, &lcdif->crtc); return drm_atomic_helper_check_plane_state(plane_state, crtc_state, DRM_PLANE_NO_SCALING, DRM_PLANE_NO_SCALING, false, true); } static void lcdif_plane_primary_atomic_update(struct drm_plane *plane, struct drm_atomic_state *state) { struct lcdif_drm_private *lcdif = to_lcdif_drm_private(plane->dev); struct drm_plane_state *new_pstate = drm_atomic_get_new_plane_state(state, plane); dma_addr_t paddr; paddr = drm_fb_dma_get_gem_addr(new_pstate->fb, new_pstate, 0); if (paddr) { writel(lower_32_bits(paddr), lcdif->base + LCDC_V8_CTRLDESCL_LOW0_4); writel(CTRLDESCL_HIGH0_4_ADDR_HIGH(upper_32_bits(paddr)), lcdif->base + LCDC_V8_CTRLDESCL_HIGH0_4); } } static bool lcdif_format_mod_supported(struct drm_plane *plane, uint32_t format, uint64_t modifier) { return modifier == DRM_FORMAT_MOD_LINEAR; } static const struct drm_plane_helper_funcs lcdif_plane_primary_helper_funcs = { .atomic_check = lcdif_plane_atomic_check, .atomic_update = lcdif_plane_primary_atomic_update, }; static const struct drm_plane_funcs lcdif_plane_funcs = { .format_mod_supported = lcdif_format_mod_supported, .update_plane = drm_atomic_helper_update_plane, .disable_plane = drm_atomic_helper_disable_plane, .destroy = drm_plane_cleanup, .reset = drm_atomic_helper_plane_reset, .atomic_duplicate_state = drm_atomic_helper_plane_duplicate_state, .atomic_destroy_state = drm_atomic_helper_plane_destroy_state, }; static const u32 lcdif_primary_plane_formats[] = { /* RGB */ DRM_FORMAT_RGB565, DRM_FORMAT_RGB888, DRM_FORMAT_XBGR8888, DRM_FORMAT_XRGB1555, DRM_FORMAT_XRGB4444, DRM_FORMAT_XRGB8888, /* Packed YCbCr */ DRM_FORMAT_YUYV, DRM_FORMAT_YVYU, DRM_FORMAT_UYVY, DRM_FORMAT_VYUY, }; static const u64 lcdif_modifiers[] = { DRM_FORMAT_MOD_LINEAR, DRM_FORMAT_MOD_INVALID }; /* ----------------------------------------------------------------------------- * Initialization */ int lcdif_kms_init(struct lcdif_drm_private *lcdif) { const u32 supported_encodings = BIT(DRM_COLOR_YCBCR_BT601) | BIT(DRM_COLOR_YCBCR_BT709) | BIT(DRM_COLOR_YCBCR_BT2020); const u32 supported_ranges = BIT(DRM_COLOR_YCBCR_LIMITED_RANGE) | BIT(DRM_COLOR_YCBCR_FULL_RANGE); struct drm_crtc *crtc = &lcdif->crtc; int ret; drm_plane_helper_add(&lcdif->planes.primary, &lcdif_plane_primary_helper_funcs); ret = drm_universal_plane_init(lcdif->drm, &lcdif->planes.primary, 1, &lcdif_plane_funcs, lcdif_primary_plane_formats, ARRAY_SIZE(lcdif_primary_plane_formats), lcdif_modifiers, DRM_PLANE_TYPE_PRIMARY, NULL); if (ret) return ret; ret = drm_plane_create_color_properties(&lcdif->planes.primary, supported_encodings, supported_ranges, DRM_COLOR_YCBCR_BT601, DRM_COLOR_YCBCR_LIMITED_RANGE); if (ret) return ret; drm_crtc_helper_add(crtc, &lcdif_crtc_helper_funcs); return drm_crtc_init_with_planes(lcdif->drm, crtc, &lcdif->planes.primary, NULL, &lcdif_crtc_funcs, NULL); }
linux-master
drivers/gpu/drm/mxsfb/lcdif_kms.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * Copyright (C) 2016 Marek Vasut <[email protected]> * * This code is based on drivers/video/fbdev/mxsfb.c : * Copyright (C) 2010 Juergen Beisert, Pengutronix * Copyright (C) 2008-2009 Freescale Semiconductor, Inc. All Rights Reserved. * Copyright (C) 2008 Embedded Alley Solutions, Inc All Rights Reserved. */ #include <linux/clk.h> #include <linux/io.h> #include <linux/iopoll.h> #include <linux/media-bus-format.h> #include <linux/pm_runtime.h> #include <linux/spinlock.h> #include <drm/drm_atomic.h> #include <drm/drm_atomic_helper.h> #include <drm/drm_bridge.h> #include <drm/drm_crtc.h> #include <drm/drm_encoder.h> #include <drm/drm_fb_dma_helper.h> #include <drm/drm_fourcc.h> #include <drm/drm_framebuffer.h> #include <drm/drm_gem_atomic_helper.h> #include <drm/drm_gem_dma_helper.h> #include <drm/drm_plane.h> #include <drm/drm_vblank.h> #include "mxsfb_drv.h" #include "mxsfb_regs.h" /* 1 second delay should be plenty of time for block reset */ #define RESET_TIMEOUT 1000000 /* ----------------------------------------------------------------------------- * CRTC */ static u32 set_hsync_pulse_width(struct mxsfb_drm_private *mxsfb, u32 val) { return (val & mxsfb->devdata->hs_wdth_mask) << mxsfb->devdata->hs_wdth_shift; } /* * Setup the MXSFB registers for decoding the pixels out of the framebuffer and * outputting them on the bus. */ static void mxsfb_set_formats(struct mxsfb_drm_private *mxsfb, const u32 bus_format) { struct drm_device *drm = mxsfb->drm; const u32 format = mxsfb->crtc.primary->state->fb->format->format; u32 ctrl, ctrl1; DRM_DEV_DEBUG_DRIVER(drm->dev, "Using bus_format: 0x%08X\n", bus_format); ctrl = CTRL_BYPASS_COUNT | CTRL_MASTER; /* CTRL1 contains IRQ config and status bits, preserve those. */ ctrl1 = readl(mxsfb->base + LCDC_CTRL1); ctrl1 &= CTRL1_CUR_FRAME_DONE_IRQ_EN | CTRL1_CUR_FRAME_DONE_IRQ; switch (format) { case DRM_FORMAT_RGB565: dev_dbg(drm->dev, "Setting up RGB565 mode\n"); ctrl |= CTRL_WORD_LENGTH_16; ctrl1 |= CTRL1_SET_BYTE_PACKAGING(0xf); break; case DRM_FORMAT_XRGB8888: dev_dbg(drm->dev, "Setting up XRGB8888 mode\n"); ctrl |= CTRL_WORD_LENGTH_24; /* Do not use packed pixels = one pixel per word instead. */ ctrl1 |= CTRL1_SET_BYTE_PACKAGING(0x7); break; } switch (bus_format) { case MEDIA_BUS_FMT_RGB565_1X16: ctrl |= CTRL_BUS_WIDTH_16; break; case MEDIA_BUS_FMT_RGB666_1X18: ctrl |= CTRL_BUS_WIDTH_18; break; case MEDIA_BUS_FMT_RGB888_1X24: ctrl |= CTRL_BUS_WIDTH_24; break; default: dev_err(drm->dev, "Unknown media bus format 0x%x\n", bus_format); break; } writel(ctrl1, mxsfb->base + LCDC_CTRL1); writel(ctrl, mxsfb->base + LCDC_CTRL); } static void mxsfb_set_mode(struct mxsfb_drm_private *mxsfb, u32 bus_flags) { struct drm_display_mode *m = &mxsfb->crtc.state->adjusted_mode; u32 vdctrl0, vsync_pulse_len, hsync_pulse_len; writel(TRANSFER_COUNT_SET_VCOUNT(m->crtc_vdisplay) | TRANSFER_COUNT_SET_HCOUNT(m->crtc_hdisplay), mxsfb->base + mxsfb->devdata->transfer_count); vsync_pulse_len = m->crtc_vsync_end - m->crtc_vsync_start; vdctrl0 = VDCTRL0_ENABLE_PRESENT | /* Always in DOTCLOCK mode */ VDCTRL0_VSYNC_PERIOD_UNIT | VDCTRL0_VSYNC_PULSE_WIDTH_UNIT | VDCTRL0_SET_VSYNC_PULSE_WIDTH(vsync_pulse_len); if (m->flags & DRM_MODE_FLAG_PHSYNC) vdctrl0 |= VDCTRL0_HSYNC_ACT_HIGH; if (m->flags & DRM_MODE_FLAG_PVSYNC) vdctrl0 |= VDCTRL0_VSYNC_ACT_HIGH; /* Make sure Data Enable is high active by default */ if (!(bus_flags & DRM_BUS_FLAG_DE_LOW)) vdctrl0 |= VDCTRL0_ENABLE_ACT_HIGH; /* * DRM_BUS_FLAG_PIXDATA_DRIVE_ defines are controller centric, * controllers VDCTRL0_DOTCLK is display centric. * Drive on positive edge -> display samples on falling edge * DRM_BUS_FLAG_PIXDATA_DRIVE_POSEDGE -> VDCTRL0_DOTCLK_ACT_FALLING */ if (bus_flags & DRM_BUS_FLAG_PIXDATA_DRIVE_POSEDGE) vdctrl0 |= VDCTRL0_DOTCLK_ACT_FALLING; writel(vdctrl0, mxsfb->base + LCDC_VDCTRL0); /* Frame length in lines. */ writel(m->crtc_vtotal, mxsfb->base + LCDC_VDCTRL1); /* Line length in units of clocks or pixels. */ hsync_pulse_len = m->crtc_hsync_end - m->crtc_hsync_start; writel(set_hsync_pulse_width(mxsfb, hsync_pulse_len) | VDCTRL2_SET_HSYNC_PERIOD(m->crtc_htotal), mxsfb->base + LCDC_VDCTRL2); writel(SET_HOR_WAIT_CNT(m->crtc_htotal - m->crtc_hsync_start) | SET_VERT_WAIT_CNT(m->crtc_vtotal - m->crtc_vsync_start), mxsfb->base + LCDC_VDCTRL3); writel(SET_DOTCLK_H_VALID_DATA_CNT(m->hdisplay), mxsfb->base + LCDC_VDCTRL4); } static void mxsfb_enable_controller(struct mxsfb_drm_private *mxsfb) { u32 reg; if (mxsfb->clk_disp_axi) clk_prepare_enable(mxsfb->clk_disp_axi); clk_prepare_enable(mxsfb->clk); /* Increase number of outstanding requests on all supported IPs */ if (mxsfb->devdata->has_ctrl2) { reg = readl(mxsfb->base + LCDC_V4_CTRL2); reg &= ~CTRL2_SET_OUTSTANDING_REQS_MASK; reg |= CTRL2_SET_OUTSTANDING_REQS_16; writel(reg, mxsfb->base + LCDC_V4_CTRL2); } /* If it was disabled, re-enable the mode again */ writel(CTRL_DOTCLK_MODE, mxsfb->base + LCDC_CTRL + REG_SET); /* Enable the SYNC signals first, then the DMA engine */ reg = readl(mxsfb->base + LCDC_VDCTRL4); reg |= VDCTRL4_SYNC_SIGNALS_ON; writel(reg, mxsfb->base + LCDC_VDCTRL4); /* * Enable recovery on underflow. * * There is some sort of corner case behavior of the controller, * which could rarely be triggered at least on i.MX6SX connected * to 800x480 DPI panel and i.MX8MM connected to DPI->DSI->LVDS * bridged 1920x1080 panel (and likely on other setups too), where * the image on the panel shifts to the right and wraps around. * This happens either when the controller is enabled on boot or * even later during run time. The condition does not correct * itself automatically, i.e. the display image remains shifted. * * It seems this problem is known and is due to sporadic underflows * of the LCDIF FIFO. While the LCDIF IP does have underflow/overflow * IRQs, neither of the IRQs trigger and neither IRQ status bit is * asserted when this condition occurs. * * All known revisions of the LCDIF IP have CTRL1 RECOVER_ON_UNDERFLOW * bit, which is described in the reference manual since i.MX23 as * " * Set this bit to enable the LCDIF block to recover in the next * field/frame if there was an underflow in the current field/frame. * " * Enable this bit to mitigate the sporadic underflows. */ reg = readl(mxsfb->base + LCDC_CTRL1); reg |= CTRL1_RECOVER_ON_UNDERFLOW; writel(reg, mxsfb->base + LCDC_CTRL1); writel(CTRL_RUN, mxsfb->base + LCDC_CTRL + REG_SET); } static void mxsfb_disable_controller(struct mxsfb_drm_private *mxsfb) { u32 reg; /* * Even if we disable the controller here, it will still continue * until its FIFOs are running out of data */ writel(CTRL_DOTCLK_MODE, mxsfb->base + LCDC_CTRL + REG_CLR); readl_poll_timeout(mxsfb->base + LCDC_CTRL, reg, !(reg & CTRL_RUN), 0, 1000); reg = readl(mxsfb->base + LCDC_VDCTRL4); reg &= ~VDCTRL4_SYNC_SIGNALS_ON; writel(reg, mxsfb->base + LCDC_VDCTRL4); clk_disable_unprepare(mxsfb->clk); if (mxsfb->clk_disp_axi) clk_disable_unprepare(mxsfb->clk_disp_axi); } /* * Clear the bit and poll it cleared. This is usually called with * a reset address and mask being either SFTRST(bit 31) or CLKGATE * (bit 30). */ static int clear_poll_bit(void __iomem *addr, u32 mask) { u32 reg; writel(mask, addr + REG_CLR); return readl_poll_timeout(addr, reg, !(reg & mask), 0, RESET_TIMEOUT); } static int mxsfb_reset_block(struct mxsfb_drm_private *mxsfb) { int ret; /* * It seems, you can't re-program the controller if it is still * running. This may lead to shifted pictures (FIFO issue?), so * first stop the controller and drain its FIFOs. */ ret = clear_poll_bit(mxsfb->base + LCDC_CTRL, CTRL_SFTRST); if (ret) return ret; writel(CTRL_CLKGATE, mxsfb->base + LCDC_CTRL + REG_CLR); ret = clear_poll_bit(mxsfb->base + LCDC_CTRL, CTRL_SFTRST); if (ret) return ret; ret = clear_poll_bit(mxsfb->base + LCDC_CTRL, CTRL_CLKGATE); if (ret) return ret; /* Clear the FIFOs */ writel(CTRL1_FIFO_CLEAR, mxsfb->base + LCDC_CTRL1 + REG_SET); readl(mxsfb->base + LCDC_CTRL1); writel(CTRL1_FIFO_CLEAR, mxsfb->base + LCDC_CTRL1 + REG_CLR); readl(mxsfb->base + LCDC_CTRL1); if (mxsfb->devdata->has_overlay) writel(0, mxsfb->base + LCDC_AS_CTRL); return 0; } static void mxsfb_crtc_mode_set_nofb(struct mxsfb_drm_private *mxsfb, struct drm_bridge_state *bridge_state, const u32 bus_format) { struct drm_device *drm = mxsfb->crtc.dev; struct drm_display_mode *m = &mxsfb->crtc.state->adjusted_mode; u32 bus_flags = mxsfb->connector->display_info.bus_flags; int err; if (mxsfb->bridge && mxsfb->bridge->timings) bus_flags = mxsfb->bridge->timings->input_bus_flags; else if (bridge_state) bus_flags = bridge_state->input_bus_cfg.flags; DRM_DEV_DEBUG_DRIVER(drm->dev, "Pixel clock: %dkHz (actual: %dkHz)\n", m->crtc_clock, (int)(clk_get_rate(mxsfb->clk) / 1000)); DRM_DEV_DEBUG_DRIVER(drm->dev, "Connector bus_flags: 0x%08X\n", bus_flags); DRM_DEV_DEBUG_DRIVER(drm->dev, "Mode flags: 0x%08X\n", m->flags); /* Mandatory eLCDIF reset as per the Reference Manual */ err = mxsfb_reset_block(mxsfb); if (err) return; mxsfb_set_formats(mxsfb, bus_format); clk_set_rate(mxsfb->clk, m->crtc_clock * 1000); mxsfb_set_mode(mxsfb, bus_flags); } static int mxsfb_crtc_atomic_check(struct drm_crtc *crtc, struct drm_atomic_state *state) { struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state, crtc); bool has_primary = crtc_state->plane_mask & drm_plane_mask(crtc->primary); /* The primary plane has to be enabled when the CRTC is active. */ if (crtc_state->active && !has_primary) return -EINVAL; /* TODO: Is this needed ? */ return drm_atomic_add_affected_planes(state, crtc); } static void mxsfb_crtc_atomic_flush(struct drm_crtc *crtc, struct drm_atomic_state *state) { struct drm_pending_vblank_event *event; event = crtc->state->event; crtc->state->event = NULL; if (!event) return; spin_lock_irq(&crtc->dev->event_lock); if (drm_crtc_vblank_get(crtc) == 0) drm_crtc_arm_vblank_event(crtc, event); else drm_crtc_send_vblank_event(crtc, event); spin_unlock_irq(&crtc->dev->event_lock); } static void mxsfb_crtc_atomic_enable(struct drm_crtc *crtc, struct drm_atomic_state *state) { struct mxsfb_drm_private *mxsfb = to_mxsfb_drm_private(crtc->dev); struct drm_plane_state *new_pstate = drm_atomic_get_new_plane_state(state, crtc->primary); struct drm_bridge_state *bridge_state = NULL; struct drm_device *drm = mxsfb->drm; u32 bus_format = 0; dma_addr_t dma_addr; pm_runtime_get_sync(drm->dev); mxsfb_enable_axi_clk(mxsfb); drm_crtc_vblank_on(crtc); /* If there is a bridge attached to the LCDIF, use its bus format */ if (mxsfb->bridge) { bridge_state = drm_atomic_get_new_bridge_state(state, mxsfb->bridge); if (!bridge_state) bus_format = MEDIA_BUS_FMT_FIXED; else bus_format = bridge_state->input_bus_cfg.format; if (bus_format == MEDIA_BUS_FMT_FIXED) { dev_warn_once(drm->dev, "Bridge does not provide bus format, assuming MEDIA_BUS_FMT_RGB888_1X24.\n" "Please fix bridge driver by handling atomic_get_input_bus_fmts.\n"); bus_format = MEDIA_BUS_FMT_RGB888_1X24; } } /* If there is no bridge, use bus format from connector */ if (!bus_format && mxsfb->connector->display_info.num_bus_formats) bus_format = mxsfb->connector->display_info.bus_formats[0]; /* If all else fails, default to RGB888_1X24 */ if (!bus_format) bus_format = MEDIA_BUS_FMT_RGB888_1X24; mxsfb_crtc_mode_set_nofb(mxsfb, bridge_state, bus_format); /* Write cur_buf as well to avoid an initial corrupt frame */ dma_addr = drm_fb_dma_get_gem_addr(new_pstate->fb, new_pstate, 0); if (dma_addr) { writel(dma_addr, mxsfb->base + mxsfb->devdata->cur_buf); writel(dma_addr, mxsfb->base + mxsfb->devdata->next_buf); } mxsfb_enable_controller(mxsfb); } static void mxsfb_crtc_atomic_disable(struct drm_crtc *crtc, struct drm_atomic_state *state) { struct mxsfb_drm_private *mxsfb = to_mxsfb_drm_private(crtc->dev); struct drm_device *drm = mxsfb->drm; struct drm_pending_vblank_event *event; mxsfb_disable_controller(mxsfb); spin_lock_irq(&drm->event_lock); event = crtc->state->event; if (event) { crtc->state->event = NULL; drm_crtc_send_vblank_event(crtc, event); } spin_unlock_irq(&drm->event_lock); drm_crtc_vblank_off(crtc); mxsfb_disable_axi_clk(mxsfb); pm_runtime_put_sync(drm->dev); } static int mxsfb_crtc_enable_vblank(struct drm_crtc *crtc) { struct mxsfb_drm_private *mxsfb = to_mxsfb_drm_private(crtc->dev); /* Clear and enable VBLANK IRQ */ writel(CTRL1_CUR_FRAME_DONE_IRQ, mxsfb->base + LCDC_CTRL1 + REG_CLR); writel(CTRL1_CUR_FRAME_DONE_IRQ_EN, mxsfb->base + LCDC_CTRL1 + REG_SET); return 0; } static void mxsfb_crtc_disable_vblank(struct drm_crtc *crtc) { struct mxsfb_drm_private *mxsfb = to_mxsfb_drm_private(crtc->dev); /* Disable and clear VBLANK IRQ */ writel(CTRL1_CUR_FRAME_DONE_IRQ_EN, mxsfb->base + LCDC_CTRL1 + REG_CLR); writel(CTRL1_CUR_FRAME_DONE_IRQ, mxsfb->base + LCDC_CTRL1 + REG_CLR); } static int mxsfb_crtc_set_crc_source(struct drm_crtc *crtc, const char *source) { struct mxsfb_drm_private *mxsfb; if (!crtc) return -ENODEV; mxsfb = to_mxsfb_drm_private(crtc->dev); if (source && strcmp(source, "auto") == 0) mxsfb->crc_active = true; else if (!source) mxsfb->crc_active = false; else return -EINVAL; return 0; } static int mxsfb_crtc_verify_crc_source(struct drm_crtc *crtc, const char *source, size_t *values_cnt) { if (!crtc) return -ENODEV; if (source && strcmp(source, "auto") != 0) { DRM_DEBUG_DRIVER("Unknown CRC source %s for %s\n", source, crtc->name); return -EINVAL; } *values_cnt = 1; return 0; } static const struct drm_crtc_helper_funcs mxsfb_crtc_helper_funcs = { .atomic_check = mxsfb_crtc_atomic_check, .atomic_flush = mxsfb_crtc_atomic_flush, .atomic_enable = mxsfb_crtc_atomic_enable, .atomic_disable = mxsfb_crtc_atomic_disable, }; static const struct drm_crtc_funcs mxsfb_crtc_funcs = { .reset = drm_atomic_helper_crtc_reset, .destroy = drm_crtc_cleanup, .set_config = drm_atomic_helper_set_config, .page_flip = drm_atomic_helper_page_flip, .atomic_duplicate_state = drm_atomic_helper_crtc_duplicate_state, .atomic_destroy_state = drm_atomic_helper_crtc_destroy_state, .enable_vblank = mxsfb_crtc_enable_vblank, .disable_vblank = mxsfb_crtc_disable_vblank, }; static const struct drm_crtc_funcs mxsfb_crtc_with_crc_funcs = { .reset = drm_atomic_helper_crtc_reset, .destroy = drm_crtc_cleanup, .set_config = drm_atomic_helper_set_config, .page_flip = drm_atomic_helper_page_flip, .atomic_duplicate_state = drm_atomic_helper_crtc_duplicate_state, .atomic_destroy_state = drm_atomic_helper_crtc_destroy_state, .enable_vblank = mxsfb_crtc_enable_vblank, .disable_vblank = mxsfb_crtc_disable_vblank, .set_crc_source = mxsfb_crtc_set_crc_source, .verify_crc_source = mxsfb_crtc_verify_crc_source, }; /* ----------------------------------------------------------------------------- * Encoder */ static const struct drm_encoder_funcs mxsfb_encoder_funcs = { .destroy = drm_encoder_cleanup, }; /* ----------------------------------------------------------------------------- * Planes */ static int mxsfb_plane_atomic_check(struct drm_plane *plane, struct drm_atomic_state *state) { struct drm_plane_state *plane_state = drm_atomic_get_new_plane_state(state, plane); struct mxsfb_drm_private *mxsfb = to_mxsfb_drm_private(plane->dev); struct drm_crtc_state *crtc_state; crtc_state = drm_atomic_get_new_crtc_state(state, &mxsfb->crtc); return drm_atomic_helper_check_plane_state(plane_state, crtc_state, DRM_PLANE_NO_SCALING, DRM_PLANE_NO_SCALING, false, true); } static void mxsfb_plane_primary_atomic_update(struct drm_plane *plane, struct drm_atomic_state *state) { struct mxsfb_drm_private *mxsfb = to_mxsfb_drm_private(plane->dev); struct drm_plane_state *new_pstate = drm_atomic_get_new_plane_state(state, plane); dma_addr_t dma_addr; dma_addr = drm_fb_dma_get_gem_addr(new_pstate->fb, new_pstate, 0); if (dma_addr) writel(dma_addr, mxsfb->base + mxsfb->devdata->next_buf); } static void mxsfb_plane_overlay_atomic_update(struct drm_plane *plane, struct drm_atomic_state *state) { struct drm_plane_state *old_pstate = drm_atomic_get_old_plane_state(state, plane); struct mxsfb_drm_private *mxsfb = to_mxsfb_drm_private(plane->dev); struct drm_plane_state *new_pstate = drm_atomic_get_new_plane_state(state, plane); dma_addr_t dma_addr; u32 ctrl; dma_addr = drm_fb_dma_get_gem_addr(new_pstate->fb, new_pstate, 0); if (!dma_addr) { writel(0, mxsfb->base + LCDC_AS_CTRL); return; } /* * HACK: The hardware seems to output 64 bytes of data of unknown * origin, and then to proceed with the framebuffer. Until the reason * is understood, live with the 16 initial invalid pixels on the first * line and start 64 bytes within the framebuffer. */ dma_addr += 64; writel(dma_addr, mxsfb->base + LCDC_AS_NEXT_BUF); /* * If the plane was previously disabled, write LCDC_AS_BUF as well to * provide the first buffer. */ if (!old_pstate->fb) writel(dma_addr, mxsfb->base + LCDC_AS_BUF); ctrl = AS_CTRL_AS_ENABLE | AS_CTRL_ALPHA(255); switch (new_pstate->fb->format->format) { case DRM_FORMAT_XRGB4444: ctrl |= AS_CTRL_FORMAT_RGB444 | AS_CTRL_ALPHA_CTRL_OVERRIDE; break; case DRM_FORMAT_ARGB4444: ctrl |= AS_CTRL_FORMAT_ARGB4444 | AS_CTRL_ALPHA_CTRL_EMBEDDED; break; case DRM_FORMAT_XRGB1555: ctrl |= AS_CTRL_FORMAT_RGB555 | AS_CTRL_ALPHA_CTRL_OVERRIDE; break; case DRM_FORMAT_ARGB1555: ctrl |= AS_CTRL_FORMAT_ARGB1555 | AS_CTRL_ALPHA_CTRL_EMBEDDED; break; case DRM_FORMAT_RGB565: ctrl |= AS_CTRL_FORMAT_RGB565 | AS_CTRL_ALPHA_CTRL_OVERRIDE; break; case DRM_FORMAT_XRGB8888: ctrl |= AS_CTRL_FORMAT_RGB888 | AS_CTRL_ALPHA_CTRL_OVERRIDE; break; case DRM_FORMAT_ARGB8888: ctrl |= AS_CTRL_FORMAT_ARGB8888 | AS_CTRL_ALPHA_CTRL_EMBEDDED; break; } writel(ctrl, mxsfb->base + LCDC_AS_CTRL); } static void mxsfb_plane_overlay_atomic_disable(struct drm_plane *plane, struct drm_atomic_state *state) { struct mxsfb_drm_private *mxsfb = to_mxsfb_drm_private(plane->dev); writel(0, mxsfb->base + LCDC_AS_CTRL); } static bool mxsfb_format_mod_supported(struct drm_plane *plane, uint32_t format, uint64_t modifier) { return modifier == DRM_FORMAT_MOD_LINEAR; } static const struct drm_plane_helper_funcs mxsfb_plane_primary_helper_funcs = { .atomic_check = mxsfb_plane_atomic_check, .atomic_update = mxsfb_plane_primary_atomic_update, }; static const struct drm_plane_helper_funcs mxsfb_plane_overlay_helper_funcs = { .atomic_check = mxsfb_plane_atomic_check, .atomic_update = mxsfb_plane_overlay_atomic_update, .atomic_disable = mxsfb_plane_overlay_atomic_disable, }; static const struct drm_plane_funcs mxsfb_plane_funcs = { .format_mod_supported = mxsfb_format_mod_supported, .update_plane = drm_atomic_helper_update_plane, .disable_plane = drm_atomic_helper_disable_plane, .destroy = drm_plane_cleanup, .reset = drm_atomic_helper_plane_reset, .atomic_duplicate_state = drm_atomic_helper_plane_duplicate_state, .atomic_destroy_state = drm_atomic_helper_plane_destroy_state, }; static const uint32_t mxsfb_primary_plane_formats[] = { DRM_FORMAT_RGB565, DRM_FORMAT_XRGB8888, }; static const uint32_t mxsfb_overlay_plane_formats[] = { DRM_FORMAT_XRGB4444, DRM_FORMAT_ARGB4444, DRM_FORMAT_XRGB1555, DRM_FORMAT_ARGB1555, DRM_FORMAT_RGB565, DRM_FORMAT_XRGB8888, DRM_FORMAT_ARGB8888, }; static const uint64_t mxsfb_modifiers[] = { DRM_FORMAT_MOD_LINEAR, DRM_FORMAT_MOD_INVALID }; /* ----------------------------------------------------------------------------- * Initialization */ int mxsfb_kms_init(struct mxsfb_drm_private *mxsfb) { struct drm_encoder *encoder = &mxsfb->encoder; struct drm_crtc *crtc = &mxsfb->crtc; int ret; drm_plane_helper_add(&mxsfb->planes.primary, &mxsfb_plane_primary_helper_funcs); ret = drm_universal_plane_init(mxsfb->drm, &mxsfb->planes.primary, 1, &mxsfb_plane_funcs, mxsfb_primary_plane_formats, ARRAY_SIZE(mxsfb_primary_plane_formats), mxsfb_modifiers, DRM_PLANE_TYPE_PRIMARY, NULL); if (ret) return ret; if (mxsfb->devdata->has_overlay) { drm_plane_helper_add(&mxsfb->planes.overlay, &mxsfb_plane_overlay_helper_funcs); ret = drm_universal_plane_init(mxsfb->drm, &mxsfb->planes.overlay, 1, &mxsfb_plane_funcs, mxsfb_overlay_plane_formats, ARRAY_SIZE(mxsfb_overlay_plane_formats), mxsfb_modifiers, DRM_PLANE_TYPE_OVERLAY, NULL); if (ret) return ret; } drm_crtc_helper_add(crtc, &mxsfb_crtc_helper_funcs); if (mxsfb->devdata->has_crc32) { ret = drm_crtc_init_with_planes(mxsfb->drm, crtc, &mxsfb->planes.primary, NULL, &mxsfb_crtc_with_crc_funcs, NULL); } else { ret = drm_crtc_init_with_planes(mxsfb->drm, crtc, &mxsfb->planes.primary, NULL, &mxsfb_crtc_funcs, NULL); } if (ret) return ret; encoder->possible_crtcs = drm_crtc_mask(crtc); return drm_encoder_init(mxsfb->drm, encoder, &mxsfb_encoder_funcs, DRM_MODE_ENCODER_NONE, NULL); }
linux-master
drivers/gpu/drm/mxsfb/mxsfb_kms.c
// SPDX-License-Identifier: MIT /* * Copyright 2020 Noralf Trønnes */ #include <linux/backlight.h> #include <linux/workqueue.h> #include <drm/drm_atomic.h> #include <drm/drm_atomic_state_helper.h> #include <drm/drm_connector.h> #include <drm/drm_drv.h> #include <drm/drm_edid.h> #include <drm/drm_encoder.h> #include <drm/drm_file.h> #include <drm/drm_modeset_helper_vtables.h> #include <drm/drm_print.h> #include <drm/drm_probe_helper.h> #include <drm/drm_simple_kms_helper.h> #include <drm/gud.h> #include "gud_internal.h" struct gud_connector { struct drm_connector connector; struct drm_encoder encoder; struct backlight_device *backlight; struct work_struct backlight_work; /* Supported properties */ u16 *properties; unsigned int num_properties; /* Initial gadget tv state if applicable, applied on state reset */ struct drm_tv_connector_state initial_tv_state; /* * Initial gadget backlight brightness if applicable, applied on state reset. * The value -ENODEV is used to signal no backlight. */ int initial_brightness; }; static inline struct gud_connector *to_gud_connector(struct drm_connector *connector) { return container_of(connector, struct gud_connector, connector); } static void gud_conn_err(struct drm_connector *connector, const char *msg, int ret) { dev_err(connector->dev->dev, "%s: %s (ret=%d)\n", connector->name, msg, ret); } /* * Use a worker to avoid taking kms locks inside the backlight lock. * Other display drivers use backlight within their kms locks. * This avoids inconsistent locking rules, which would upset lockdep. */ static void gud_connector_backlight_update_status_work(struct work_struct *work) { struct gud_connector *gconn = container_of(work, struct gud_connector, backlight_work); struct drm_connector *connector = &gconn->connector; struct drm_connector_state *connector_state; struct drm_device *drm = connector->dev; struct drm_modeset_acquire_ctx ctx; struct drm_atomic_state *state; int idx, ret; if (!drm_dev_enter(drm, &idx)) return; state = drm_atomic_state_alloc(drm); if (!state) { ret = -ENOMEM; goto exit; } drm_modeset_acquire_init(&ctx, 0); state->acquire_ctx = &ctx; retry: connector_state = drm_atomic_get_connector_state(state, connector); if (IS_ERR(connector_state)) { ret = PTR_ERR(connector_state); goto out; } /* Reuse tv.brightness to avoid having to subclass */ connector_state->tv.brightness = gconn->backlight->props.brightness; ret = drm_atomic_commit(state); out: if (ret == -EDEADLK) { drm_atomic_state_clear(state); drm_modeset_backoff(&ctx); goto retry; } drm_atomic_state_put(state); drm_modeset_drop_locks(&ctx); drm_modeset_acquire_fini(&ctx); exit: drm_dev_exit(idx); if (ret) dev_err(drm->dev, "Failed to update backlight, err=%d\n", ret); } static int gud_connector_backlight_update_status(struct backlight_device *bd) { struct drm_connector *connector = bl_get_data(bd); struct gud_connector *gconn = to_gud_connector(connector); /* The USB timeout is 5 seconds so use system_long_wq for worst case scenario */ queue_work(system_long_wq, &gconn->backlight_work); return 0; } static const struct backlight_ops gud_connector_backlight_ops = { .update_status = gud_connector_backlight_update_status, }; static int gud_connector_backlight_register(struct gud_connector *gconn) { struct drm_connector *connector = &gconn->connector; struct backlight_device *bd; const char *name; const struct backlight_properties props = { .type = BACKLIGHT_RAW, .scale = BACKLIGHT_SCALE_NON_LINEAR, .max_brightness = 100, .brightness = gconn->initial_brightness, }; name = kasprintf(GFP_KERNEL, "card%d-%s-backlight", connector->dev->primary->index, connector->name); if (!name) return -ENOMEM; bd = backlight_device_register(name, connector->kdev, connector, &gud_connector_backlight_ops, &props); kfree(name); if (IS_ERR(bd)) return PTR_ERR(bd); gconn->backlight = bd; return 0; } static int gud_connector_detect(struct drm_connector *connector, struct drm_modeset_acquire_ctx *ctx, bool force) { struct gud_device *gdrm = to_gud_device(connector->dev); int idx, ret; u8 status; if (!drm_dev_enter(connector->dev, &idx)) return connector_status_disconnected; if (force) { ret = gud_usb_set(gdrm, GUD_REQ_SET_CONNECTOR_FORCE_DETECT, connector->index, NULL, 0); if (ret) { ret = connector_status_unknown; goto exit; } } ret = gud_usb_get_u8(gdrm, GUD_REQ_GET_CONNECTOR_STATUS, connector->index, &status); if (ret) { ret = connector_status_unknown; goto exit; } switch (status & GUD_CONNECTOR_STATUS_CONNECTED_MASK) { case GUD_CONNECTOR_STATUS_DISCONNECTED: ret = connector_status_disconnected; break; case GUD_CONNECTOR_STATUS_CONNECTED: ret = connector_status_connected; break; default: ret = connector_status_unknown; break; } if (status & GUD_CONNECTOR_STATUS_CHANGED) connector->epoch_counter += 1; exit: drm_dev_exit(idx); return ret; } struct gud_connector_get_edid_ctx { void *buf; size_t len; bool edid_override; }; static int gud_connector_get_edid_block(void *data, u8 *buf, unsigned int block, size_t len) { struct gud_connector_get_edid_ctx *ctx = data; size_t start = block * EDID_LENGTH; ctx->edid_override = false; if (start + len > ctx->len) return -1; memcpy(buf, ctx->buf + start, len); return 0; } static int gud_connector_get_modes(struct drm_connector *connector) { struct gud_device *gdrm = to_gud_device(connector->dev); struct gud_display_mode_req *reqmodes = NULL; struct gud_connector_get_edid_ctx edid_ctx; unsigned int i, num_modes = 0; struct edid *edid = NULL; int idx, ret; if (!drm_dev_enter(connector->dev, &idx)) return 0; edid_ctx.edid_override = true; edid_ctx.buf = kmalloc(GUD_CONNECTOR_MAX_EDID_LEN, GFP_KERNEL); if (!edid_ctx.buf) goto out; ret = gud_usb_get(gdrm, GUD_REQ_GET_CONNECTOR_EDID, connector->index, edid_ctx.buf, GUD_CONNECTOR_MAX_EDID_LEN); if (ret > 0 && ret % EDID_LENGTH) { gud_conn_err(connector, "Invalid EDID size", ret); } else if (ret > 0) { edid_ctx.len = ret; edid = drm_do_get_edid(connector, gud_connector_get_edid_block, &edid_ctx); } kfree(edid_ctx.buf); drm_connector_update_edid_property(connector, edid); if (edid && edid_ctx.edid_override) goto out; reqmodes = kmalloc_array(GUD_CONNECTOR_MAX_NUM_MODES, sizeof(*reqmodes), GFP_KERNEL); if (!reqmodes) goto out; ret = gud_usb_get(gdrm, GUD_REQ_GET_CONNECTOR_MODES, connector->index, reqmodes, GUD_CONNECTOR_MAX_NUM_MODES * sizeof(*reqmodes)); if (ret <= 0) goto out; if (ret % sizeof(*reqmodes)) { gud_conn_err(connector, "Invalid display mode array size", ret); goto out; } num_modes = ret / sizeof(*reqmodes); for (i = 0; i < num_modes; i++) { struct drm_display_mode *mode; mode = drm_mode_create(connector->dev); if (!mode) { num_modes = i; goto out; } gud_to_display_mode(mode, &reqmodes[i]); drm_mode_probed_add(connector, mode); } out: if (!num_modes) num_modes = drm_add_edid_modes(connector, edid); kfree(reqmodes); kfree(edid); drm_dev_exit(idx); return num_modes; } static int gud_connector_atomic_check(struct drm_connector *connector, struct drm_atomic_state *state) { struct drm_connector_state *new_state; struct drm_crtc_state *new_crtc_state; struct drm_connector_state *old_state; new_state = drm_atomic_get_new_connector_state(state, connector); if (!new_state->crtc) return 0; old_state = drm_atomic_get_old_connector_state(state, connector); new_crtc_state = drm_atomic_get_new_crtc_state(state, new_state->crtc); if (old_state->tv.margins.left != new_state->tv.margins.left || old_state->tv.margins.right != new_state->tv.margins.right || old_state->tv.margins.top != new_state->tv.margins.top || old_state->tv.margins.bottom != new_state->tv.margins.bottom || old_state->tv.legacy_mode != new_state->tv.legacy_mode || old_state->tv.brightness != new_state->tv.brightness || old_state->tv.contrast != new_state->tv.contrast || old_state->tv.flicker_reduction != new_state->tv.flicker_reduction || old_state->tv.overscan != new_state->tv.overscan || old_state->tv.saturation != new_state->tv.saturation || old_state->tv.hue != new_state->tv.hue) new_crtc_state->connectors_changed = true; return 0; } static const struct drm_connector_helper_funcs gud_connector_helper_funcs = { .detect_ctx = gud_connector_detect, .get_modes = gud_connector_get_modes, .atomic_check = gud_connector_atomic_check, }; static int gud_connector_late_register(struct drm_connector *connector) { struct gud_connector *gconn = to_gud_connector(connector); if (gconn->initial_brightness < 0) return 0; return gud_connector_backlight_register(gconn); } static void gud_connector_early_unregister(struct drm_connector *connector) { struct gud_connector *gconn = to_gud_connector(connector); backlight_device_unregister(gconn->backlight); cancel_work_sync(&gconn->backlight_work); } static void gud_connector_destroy(struct drm_connector *connector) { struct gud_connector *gconn = to_gud_connector(connector); drm_connector_cleanup(connector); kfree(gconn->properties); kfree(gconn); } static void gud_connector_reset(struct drm_connector *connector) { struct gud_connector *gconn = to_gud_connector(connector); drm_atomic_helper_connector_reset(connector); connector->state->tv = gconn->initial_tv_state; /* Set margins from command line */ drm_atomic_helper_connector_tv_margins_reset(connector); if (gconn->initial_brightness >= 0) connector->state->tv.brightness = gconn->initial_brightness; } static const struct drm_connector_funcs gud_connector_funcs = { .fill_modes = drm_helper_probe_single_connector_modes, .late_register = gud_connector_late_register, .early_unregister = gud_connector_early_unregister, .destroy = gud_connector_destroy, .reset = gud_connector_reset, .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state, .atomic_destroy_state = drm_atomic_helper_connector_destroy_state, }; /* * The tv.mode property is shared among the connectors and its enum names are * driver specific. This means that if more than one connector uses tv.mode, * the enum names has to be the same. */ static int gud_connector_add_tv_mode(struct gud_device *gdrm, struct drm_connector *connector) { size_t buf_len = GUD_CONNECTOR_TV_MODE_MAX_NUM * GUD_CONNECTOR_TV_MODE_NAME_LEN; const char *modes[GUD_CONNECTOR_TV_MODE_MAX_NUM]; unsigned int i, num_modes; char *buf; int ret; buf = kmalloc(buf_len, GFP_KERNEL); if (!buf) return -ENOMEM; ret = gud_usb_get(gdrm, GUD_REQ_GET_CONNECTOR_TV_MODE_VALUES, connector->index, buf, buf_len); if (ret < 0) goto free; if (!ret || ret % GUD_CONNECTOR_TV_MODE_NAME_LEN) { ret = -EIO; goto free; } num_modes = ret / GUD_CONNECTOR_TV_MODE_NAME_LEN; for (i = 0; i < num_modes; i++) modes[i] = &buf[i * GUD_CONNECTOR_TV_MODE_NAME_LEN]; ret = drm_mode_create_tv_properties_legacy(connector->dev, num_modes, modes); free: kfree(buf); if (ret < 0) gud_conn_err(connector, "Failed to add TV modes", ret); return ret; } static struct drm_property * gud_connector_property_lookup(struct drm_connector *connector, u16 prop) { struct drm_mode_config *config = &connector->dev->mode_config; switch (prop) { case GUD_PROPERTY_TV_LEFT_MARGIN: return config->tv_left_margin_property; case GUD_PROPERTY_TV_RIGHT_MARGIN: return config->tv_right_margin_property; case GUD_PROPERTY_TV_TOP_MARGIN: return config->tv_top_margin_property; case GUD_PROPERTY_TV_BOTTOM_MARGIN: return config->tv_bottom_margin_property; case GUD_PROPERTY_TV_MODE: return config->legacy_tv_mode_property; case GUD_PROPERTY_TV_BRIGHTNESS: return config->tv_brightness_property; case GUD_PROPERTY_TV_CONTRAST: return config->tv_contrast_property; case GUD_PROPERTY_TV_FLICKER_REDUCTION: return config->tv_flicker_reduction_property; case GUD_PROPERTY_TV_OVERSCAN: return config->tv_overscan_property; case GUD_PROPERTY_TV_SATURATION: return config->tv_saturation_property; case GUD_PROPERTY_TV_HUE: return config->tv_hue_property; default: return ERR_PTR(-EINVAL); } } static unsigned int *gud_connector_tv_state_val(u16 prop, struct drm_tv_connector_state *state) { switch (prop) { case GUD_PROPERTY_TV_LEFT_MARGIN: return &state->margins.left; case GUD_PROPERTY_TV_RIGHT_MARGIN: return &state->margins.right; case GUD_PROPERTY_TV_TOP_MARGIN: return &state->margins.top; case GUD_PROPERTY_TV_BOTTOM_MARGIN: return &state->margins.bottom; case GUD_PROPERTY_TV_MODE: return &state->legacy_mode; case GUD_PROPERTY_TV_BRIGHTNESS: return &state->brightness; case GUD_PROPERTY_TV_CONTRAST: return &state->contrast; case GUD_PROPERTY_TV_FLICKER_REDUCTION: return &state->flicker_reduction; case GUD_PROPERTY_TV_OVERSCAN: return &state->overscan; case GUD_PROPERTY_TV_SATURATION: return &state->saturation; case GUD_PROPERTY_TV_HUE: return &state->hue; default: return ERR_PTR(-EINVAL); } } static int gud_connector_add_properties(struct gud_device *gdrm, struct gud_connector *gconn) { struct drm_connector *connector = &gconn->connector; struct drm_device *drm = &gdrm->drm; struct gud_property_req *properties; unsigned int i, num_properties; int ret; properties = kcalloc(GUD_CONNECTOR_PROPERTIES_MAX_NUM, sizeof(*properties), GFP_KERNEL); if (!properties) return -ENOMEM; ret = gud_usb_get(gdrm, GUD_REQ_GET_CONNECTOR_PROPERTIES, connector->index, properties, GUD_CONNECTOR_PROPERTIES_MAX_NUM * sizeof(*properties)); if (ret <= 0) goto out; if (ret % sizeof(*properties)) { ret = -EIO; goto out; } num_properties = ret / sizeof(*properties); ret = 0; gconn->properties = kcalloc(num_properties, sizeof(*gconn->properties), GFP_KERNEL); if (!gconn->properties) { ret = -ENOMEM; goto out; } for (i = 0; i < num_properties; i++) { u16 prop = le16_to_cpu(properties[i].prop); u64 val = le64_to_cpu(properties[i].val); struct drm_property *property; unsigned int *state_val; drm_dbg(drm, "property: %u = %llu(0x%llx)\n", prop, val, val); switch (prop) { case GUD_PROPERTY_TV_LEFT_MARGIN: fallthrough; case GUD_PROPERTY_TV_RIGHT_MARGIN: fallthrough; case GUD_PROPERTY_TV_TOP_MARGIN: fallthrough; case GUD_PROPERTY_TV_BOTTOM_MARGIN: ret = drm_mode_create_tv_margin_properties(drm); if (ret) goto out; break; case GUD_PROPERTY_TV_MODE: ret = gud_connector_add_tv_mode(gdrm, connector); if (ret) goto out; break; case GUD_PROPERTY_TV_BRIGHTNESS: fallthrough; case GUD_PROPERTY_TV_CONTRAST: fallthrough; case GUD_PROPERTY_TV_FLICKER_REDUCTION: fallthrough; case GUD_PROPERTY_TV_OVERSCAN: fallthrough; case GUD_PROPERTY_TV_SATURATION: fallthrough; case GUD_PROPERTY_TV_HUE: /* This is a no-op if already added. */ ret = drm_mode_create_tv_properties_legacy(drm, 0, NULL); if (ret) goto out; break; case GUD_PROPERTY_BACKLIGHT_BRIGHTNESS: if (val > 100) { ret = -EINVAL; goto out; } gconn->initial_brightness = val; break; default: /* New ones might show up in future devices, skip those we don't know. */ drm_dbg(drm, "Ignoring unknown property: %u\n", prop); continue; } gconn->properties[gconn->num_properties++] = prop; if (prop == GUD_PROPERTY_BACKLIGHT_BRIGHTNESS) continue; /* not a DRM property */ property = gud_connector_property_lookup(connector, prop); if (WARN_ON(IS_ERR(property))) continue; state_val = gud_connector_tv_state_val(prop, &gconn->initial_tv_state); if (WARN_ON(IS_ERR(state_val))) continue; *state_val = val; drm_object_attach_property(&connector->base, property, 0); } out: kfree(properties); return ret; } int gud_connector_fill_properties(struct drm_connector_state *connector_state, struct gud_property_req *properties) { struct gud_connector *gconn = to_gud_connector(connector_state->connector); unsigned int i; for (i = 0; i < gconn->num_properties; i++) { u16 prop = gconn->properties[i]; u64 val; if (prop == GUD_PROPERTY_BACKLIGHT_BRIGHTNESS) { val = connector_state->tv.brightness; } else { unsigned int *state_val; state_val = gud_connector_tv_state_val(prop, &connector_state->tv); if (WARN_ON_ONCE(IS_ERR(state_val))) return PTR_ERR(state_val); val = *state_val; } properties[i].prop = cpu_to_le16(prop); properties[i].val = cpu_to_le64(val); } return gconn->num_properties; } static int gud_connector_create(struct gud_device *gdrm, unsigned int index, struct gud_connector_descriptor_req *desc) { struct drm_device *drm = &gdrm->drm; struct gud_connector *gconn; struct drm_connector *connector; struct drm_encoder *encoder; int ret, connector_type; u32 flags; gconn = kzalloc(sizeof(*gconn), GFP_KERNEL); if (!gconn) return -ENOMEM; INIT_WORK(&gconn->backlight_work, gud_connector_backlight_update_status_work); gconn->initial_brightness = -ENODEV; flags = le32_to_cpu(desc->flags); connector = &gconn->connector; drm_dbg(drm, "Connector: index=%u type=%u flags=0x%x\n", index, desc->connector_type, flags); switch (desc->connector_type) { case GUD_CONNECTOR_TYPE_PANEL: connector_type = DRM_MODE_CONNECTOR_USB; break; case GUD_CONNECTOR_TYPE_VGA: connector_type = DRM_MODE_CONNECTOR_VGA; break; case GUD_CONNECTOR_TYPE_DVI: connector_type = DRM_MODE_CONNECTOR_DVID; break; case GUD_CONNECTOR_TYPE_COMPOSITE: connector_type = DRM_MODE_CONNECTOR_Composite; break; case GUD_CONNECTOR_TYPE_SVIDEO: connector_type = DRM_MODE_CONNECTOR_SVIDEO; break; case GUD_CONNECTOR_TYPE_COMPONENT: connector_type = DRM_MODE_CONNECTOR_Component; break; case GUD_CONNECTOR_TYPE_DISPLAYPORT: connector_type = DRM_MODE_CONNECTOR_DisplayPort; break; case GUD_CONNECTOR_TYPE_HDMI: connector_type = DRM_MODE_CONNECTOR_HDMIA; break; default: /* future types */ connector_type = DRM_MODE_CONNECTOR_USB; break; } drm_connector_helper_add(connector, &gud_connector_helper_funcs); ret = drm_connector_init(drm, connector, &gud_connector_funcs, connector_type); if (ret) { kfree(connector); return ret; } if (WARN_ON(connector->index != index)) return -EINVAL; if (flags & GUD_CONNECTOR_FLAGS_POLL_STATUS) connector->polled = (DRM_CONNECTOR_POLL_CONNECT | DRM_CONNECTOR_POLL_DISCONNECT); if (flags & GUD_CONNECTOR_FLAGS_INTERLACE) connector->interlace_allowed = true; if (flags & GUD_CONNECTOR_FLAGS_DOUBLESCAN) connector->doublescan_allowed = true; ret = gud_connector_add_properties(gdrm, gconn); if (ret) { gud_conn_err(connector, "Failed to add properties", ret); return ret; } /* The first connector is attached to the existing simple pipe encoder */ if (!connector->index) { encoder = &gdrm->pipe.encoder; } else { encoder = &gconn->encoder; ret = drm_simple_encoder_init(drm, encoder, DRM_MODE_ENCODER_NONE); if (ret) return ret; encoder->possible_crtcs = 1; } return drm_connector_attach_encoder(connector, encoder); } int gud_get_connectors(struct gud_device *gdrm) { struct gud_connector_descriptor_req *descs; unsigned int i, num_connectors; int ret; descs = kmalloc_array(GUD_CONNECTORS_MAX_NUM, sizeof(*descs), GFP_KERNEL); if (!descs) return -ENOMEM; ret = gud_usb_get(gdrm, GUD_REQ_GET_CONNECTORS, 0, descs, GUD_CONNECTORS_MAX_NUM * sizeof(*descs)); if (ret < 0) goto free; if (!ret || ret % sizeof(*descs)) { ret = -EIO; goto free; } num_connectors = ret / sizeof(*descs); for (i = 0; i < num_connectors; i++) { ret = gud_connector_create(gdrm, i, &descs[i]); if (ret) goto free; } free: kfree(descs); return ret; }
linux-master
drivers/gpu/drm/gud/gud_connector.c
// SPDX-License-Identifier: MIT /* * Copyright 2020 Noralf Trønnes */ #include <linux/dma-buf.h> #include <linux/dma-mapping.h> #include <linux/lz4.h> #include <linux/module.h> #include <linux/platform_device.h> #include <linux/string_helpers.h> #include <linux/usb.h> #include <linux/vmalloc.h> #include <linux/workqueue.h> #include <drm/drm_atomic_helper.h> #include <drm/drm_blend.h> #include <drm/drm_damage_helper.h> #include <drm/drm_debugfs.h> #include <drm/drm_drv.h> #include <drm/drm_fbdev_generic.h> #include <drm/drm_fourcc.h> #include <drm/drm_gem_atomic_helper.h> #include <drm/drm_gem_framebuffer_helper.h> #include <drm/drm_gem_shmem_helper.h> #include <drm/drm_managed.h> #include <drm/drm_print.h> #include <drm/drm_probe_helper.h> #include <drm/drm_simple_kms_helper.h> #include <drm/gud.h> #include "gud_internal.h" /* Only used internally */ static const struct drm_format_info gud_drm_format_r1 = { .format = GUD_DRM_FORMAT_R1, .num_planes = 1, .char_per_block = { 1, 0, 0 }, .block_w = { 8, 0, 0 }, .block_h = { 1, 0, 0 }, .hsub = 1, .vsub = 1, }; static const struct drm_format_info gud_drm_format_xrgb1111 = { .format = GUD_DRM_FORMAT_XRGB1111, .num_planes = 1, .char_per_block = { 1, 0, 0 }, .block_w = { 2, 0, 0 }, .block_h = { 1, 0, 0 }, .hsub = 1, .vsub = 1, }; static int gud_usb_control_msg(struct usb_interface *intf, bool in, u8 request, u16 value, void *buf, size_t len) { u8 requesttype = USB_TYPE_VENDOR | USB_RECIP_INTERFACE; u8 ifnum = intf->cur_altsetting->desc.bInterfaceNumber; struct usb_device *usb = interface_to_usbdev(intf); unsigned int pipe; if (len && !buf) return -EINVAL; if (in) { pipe = usb_rcvctrlpipe(usb, 0); requesttype |= USB_DIR_IN; } else { pipe = usb_sndctrlpipe(usb, 0); requesttype |= USB_DIR_OUT; } return usb_control_msg(usb, pipe, request, requesttype, value, ifnum, buf, len, USB_CTRL_GET_TIMEOUT); } static int gud_get_display_descriptor(struct usb_interface *intf, struct gud_display_descriptor_req *desc) { void *buf; int ret; buf = kmalloc(sizeof(*desc), GFP_KERNEL); if (!buf) return -ENOMEM; ret = gud_usb_control_msg(intf, true, GUD_REQ_GET_DESCRIPTOR, 0, buf, sizeof(*desc)); memcpy(desc, buf, sizeof(*desc)); kfree(buf); if (ret < 0) return ret; if (ret != sizeof(*desc)) return -EIO; if (desc->magic != le32_to_cpu(GUD_DISPLAY_MAGIC)) return -ENODATA; DRM_DEV_DEBUG_DRIVER(&intf->dev, "version=%u flags=0x%x compression=0x%x max_buffer_size=%u\n", desc->version, le32_to_cpu(desc->flags), desc->compression, le32_to_cpu(desc->max_buffer_size)); if (!desc->version || !desc->max_width || !desc->max_height || le32_to_cpu(desc->min_width) > le32_to_cpu(desc->max_width) || le32_to_cpu(desc->min_height) > le32_to_cpu(desc->max_height)) return -EINVAL; return 0; } static int gud_status_to_errno(u8 status) { switch (status) { case GUD_STATUS_OK: return 0; case GUD_STATUS_BUSY: return -EBUSY; case GUD_STATUS_REQUEST_NOT_SUPPORTED: return -EOPNOTSUPP; case GUD_STATUS_PROTOCOL_ERROR: return -EPROTO; case GUD_STATUS_INVALID_PARAMETER: return -EINVAL; case GUD_STATUS_ERROR: return -EREMOTEIO; default: return -EREMOTEIO; } } static int gud_usb_get_status(struct usb_interface *intf) { int ret, status = -EIO; u8 *buf; buf = kmalloc(sizeof(*buf), GFP_KERNEL); if (!buf) return -ENOMEM; ret = gud_usb_control_msg(intf, true, GUD_REQ_GET_STATUS, 0, buf, sizeof(*buf)); if (ret == sizeof(*buf)) status = gud_status_to_errno(*buf); kfree(buf); if (ret < 0) return ret; return status; } static int gud_usb_transfer(struct gud_device *gdrm, bool in, u8 request, u16 index, void *buf, size_t len) { struct usb_interface *intf = to_usb_interface(gdrm->drm.dev); int idx, ret; drm_dbg(&gdrm->drm, "%s: request=0x%x index=%u len=%zu\n", in ? "get" : "set", request, index, len); if (!drm_dev_enter(&gdrm->drm, &idx)) return -ENODEV; mutex_lock(&gdrm->ctrl_lock); ret = gud_usb_control_msg(intf, in, request, index, buf, len); if (ret == -EPIPE || ((gdrm->flags & GUD_DISPLAY_FLAG_STATUS_ON_SET) && !in && ret >= 0)) { int status; status = gud_usb_get_status(intf); if (status < 0) { ret = status; } else if (ret < 0) { dev_err_once(gdrm->drm.dev, "Unexpected status OK for failed transfer\n"); ret = -EPIPE; } } if (ret < 0) { drm_dbg(&gdrm->drm, "ret=%d\n", ret); gdrm->stats_num_errors++; } mutex_unlock(&gdrm->ctrl_lock); drm_dev_exit(idx); return ret; } /* * @buf cannot be allocated on the stack. * Returns number of bytes received or negative error code on failure. */ int gud_usb_get(struct gud_device *gdrm, u8 request, u16 index, void *buf, size_t max_len) { return gud_usb_transfer(gdrm, true, request, index, buf, max_len); } /* * @buf can be allocated on the stack or NULL. * Returns zero on success or negative error code on failure. */ int gud_usb_set(struct gud_device *gdrm, u8 request, u16 index, void *buf, size_t len) { void *trbuf = NULL; int ret; if (buf && len) { trbuf = kmemdup(buf, len, GFP_KERNEL); if (!trbuf) return -ENOMEM; } ret = gud_usb_transfer(gdrm, false, request, index, trbuf, len); kfree(trbuf); if (ret < 0) return ret; return ret != len ? -EIO : 0; } /* * @val can be allocated on the stack. * Returns zero on success or negative error code on failure. */ int gud_usb_get_u8(struct gud_device *gdrm, u8 request, u16 index, u8 *val) { u8 *buf; int ret; buf = kmalloc(sizeof(*val), GFP_KERNEL); if (!buf) return -ENOMEM; ret = gud_usb_get(gdrm, request, index, buf, sizeof(*val)); *val = *buf; kfree(buf); if (ret < 0) return ret; return ret != sizeof(*val) ? -EIO : 0; } /* Returns zero on success or negative error code on failure. */ int gud_usb_set_u8(struct gud_device *gdrm, u8 request, u8 val) { return gud_usb_set(gdrm, request, 0, &val, sizeof(val)); } static int gud_get_properties(struct gud_device *gdrm) { struct gud_property_req *properties; unsigned int i, num_properties; int ret; properties = kcalloc(GUD_PROPERTIES_MAX_NUM, sizeof(*properties), GFP_KERNEL); if (!properties) return -ENOMEM; ret = gud_usb_get(gdrm, GUD_REQ_GET_PROPERTIES, 0, properties, GUD_PROPERTIES_MAX_NUM * sizeof(*properties)); if (ret <= 0) goto out; if (ret % sizeof(*properties)) { ret = -EIO; goto out; } num_properties = ret / sizeof(*properties); ret = 0; gdrm->properties = drmm_kcalloc(&gdrm->drm, num_properties, sizeof(*gdrm->properties), GFP_KERNEL); if (!gdrm->properties) { ret = -ENOMEM; goto out; } for (i = 0; i < num_properties; i++) { u16 prop = le16_to_cpu(properties[i].prop); u64 val = le64_to_cpu(properties[i].val); switch (prop) { case GUD_PROPERTY_ROTATION: /* * DRM UAPI matches the protocol so use the value directly, * but mask out any additions on future devices. */ val &= GUD_ROTATION_MASK; ret = drm_plane_create_rotation_property(&gdrm->pipe.plane, DRM_MODE_ROTATE_0, val); break; default: /* New ones might show up in future devices, skip those we don't know. */ drm_dbg(&gdrm->drm, "Ignoring unknown property: %u\n", prop); continue; } if (ret) goto out; gdrm->properties[gdrm->num_properties++] = prop; } out: kfree(properties); return ret; } /* * FIXME: Dma-buf sharing requires DMA support by the importing device. * This function is a workaround to make USB devices work as well. * See todo.rst for how to fix the issue in the dma-buf framework. */ static struct drm_gem_object *gud_gem_prime_import(struct drm_device *drm, struct dma_buf *dma_buf) { struct gud_device *gdrm = to_gud_device(drm); if (!gdrm->dmadev) return ERR_PTR(-ENODEV); return drm_gem_prime_import_dev(drm, dma_buf, gdrm->dmadev); } static int gud_stats_debugfs(struct seq_file *m, void *data) { struct drm_debugfs_entry *entry = m->private; struct gud_device *gdrm = to_gud_device(entry->dev); char buf[10]; string_get_size(gdrm->bulk_len, 1, STRING_UNITS_2, buf, sizeof(buf)); seq_printf(m, "Max buffer size: %s\n", buf); seq_printf(m, "Number of errors: %u\n", gdrm->stats_num_errors); seq_puts(m, "Compression: "); if (gdrm->compression & GUD_COMPRESSION_LZ4) seq_puts(m, " lz4"); if (!gdrm->compression) seq_puts(m, " none"); seq_puts(m, "\n"); if (gdrm->compression) { u64 remainder; u64 ratio = div64_u64_rem(gdrm->stats_length, gdrm->stats_actual_length, &remainder); u64 ratio_frac = div64_u64(remainder * 10, gdrm->stats_actual_length); seq_printf(m, "Compression ratio: %llu.%llu\n", ratio, ratio_frac); } return 0; } static const struct drm_simple_display_pipe_funcs gud_pipe_funcs = { .check = gud_pipe_check, .update = gud_pipe_update, DRM_GEM_SIMPLE_DISPLAY_PIPE_SHADOW_PLANE_FUNCS }; static const struct drm_mode_config_funcs gud_mode_config_funcs = { .fb_create = drm_gem_fb_create_with_dirty, .atomic_check = drm_atomic_helper_check, .atomic_commit = drm_atomic_helper_commit, }; static const u64 gud_pipe_modifiers[] = { DRM_FORMAT_MOD_LINEAR, DRM_FORMAT_MOD_INVALID }; DEFINE_DRM_GEM_FOPS(gud_fops); static const struct drm_driver gud_drm_driver = { .driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_ATOMIC, .fops = &gud_fops, DRM_GEM_SHMEM_DRIVER_OPS, .gem_prime_import = gud_gem_prime_import, .name = "gud", .desc = "Generic USB Display", .date = "20200422", .major = 1, .minor = 0, }; static int gud_alloc_bulk_buffer(struct gud_device *gdrm) { unsigned int i, num_pages; struct page **pages; void *ptr; int ret; gdrm->bulk_buf = vmalloc_32(gdrm->bulk_len); if (!gdrm->bulk_buf) return -ENOMEM; num_pages = DIV_ROUND_UP(gdrm->bulk_len, PAGE_SIZE); pages = kmalloc_array(num_pages, sizeof(struct page *), GFP_KERNEL); if (!pages) return -ENOMEM; for (i = 0, ptr = gdrm->bulk_buf; i < num_pages; i++, ptr += PAGE_SIZE) pages[i] = vmalloc_to_page(ptr); ret = sg_alloc_table_from_pages(&gdrm->bulk_sgt, pages, num_pages, 0, gdrm->bulk_len, GFP_KERNEL); kfree(pages); return ret; } static void gud_free_buffers_and_mutex(void *data) { struct gud_device *gdrm = data; vfree(gdrm->compress_buf); gdrm->compress_buf = NULL; sg_free_table(&gdrm->bulk_sgt); vfree(gdrm->bulk_buf); gdrm->bulk_buf = NULL; mutex_destroy(&gdrm->ctrl_lock); } static int gud_probe(struct usb_interface *intf, const struct usb_device_id *id) { const struct drm_format_info *xrgb8888_emulation_format = NULL; bool rgb565_supported = false, xrgb8888_supported = false; unsigned int num_formats_dev, num_formats = 0; struct usb_endpoint_descriptor *bulk_out; struct gud_display_descriptor_req desc; struct device *dev = &intf->dev; size_t max_buffer_size = 0; struct gud_device *gdrm; struct drm_device *drm; u8 *formats_dev; u32 *formats; int ret, i; ret = usb_find_bulk_out_endpoint(intf->cur_altsetting, &bulk_out); if (ret) return ret; ret = gud_get_display_descriptor(intf, &desc); if (ret) { DRM_DEV_DEBUG_DRIVER(dev, "Not a display interface: ret=%d\n", ret); return -ENODEV; } if (desc.version > 1) { dev_err(dev, "Protocol version %u is not supported\n", desc.version); return -ENODEV; } gdrm = devm_drm_dev_alloc(dev, &gud_drm_driver, struct gud_device, drm); if (IS_ERR(gdrm)) return PTR_ERR(gdrm); drm = &gdrm->drm; drm->mode_config.funcs = &gud_mode_config_funcs; ret = drmm_mode_config_init(drm); if (ret) return ret; gdrm->flags = le32_to_cpu(desc.flags); gdrm->compression = desc.compression & GUD_COMPRESSION_LZ4; if (gdrm->flags & GUD_DISPLAY_FLAG_FULL_UPDATE && gdrm->compression) return -EINVAL; mutex_init(&gdrm->ctrl_lock); mutex_init(&gdrm->damage_lock); INIT_WORK(&gdrm->work, gud_flush_work); gud_clear_damage(gdrm); ret = devm_add_action(dev, gud_free_buffers_and_mutex, gdrm); if (ret) return ret; drm->mode_config.min_width = le32_to_cpu(desc.min_width); drm->mode_config.max_width = le32_to_cpu(desc.max_width); drm->mode_config.min_height = le32_to_cpu(desc.min_height); drm->mode_config.max_height = le32_to_cpu(desc.max_height); formats_dev = devm_kmalloc(dev, GUD_FORMATS_MAX_NUM, GFP_KERNEL); /* Add room for emulated XRGB8888 */ formats = devm_kmalloc_array(dev, GUD_FORMATS_MAX_NUM + 1, sizeof(*formats), GFP_KERNEL); if (!formats_dev || !formats) return -ENOMEM; ret = gud_usb_get(gdrm, GUD_REQ_GET_FORMATS, 0, formats_dev, GUD_FORMATS_MAX_NUM); if (ret < 0) return ret; num_formats_dev = ret; for (i = 0; i < num_formats_dev; i++) { const struct drm_format_info *info; size_t fmt_buf_size; u32 format; format = gud_to_fourcc(formats_dev[i]); if (!format) { drm_dbg(drm, "Unsupported format: 0x%02x\n", formats_dev[i]); continue; } if (format == GUD_DRM_FORMAT_R1) info = &gud_drm_format_r1; else if (format == GUD_DRM_FORMAT_XRGB1111) info = &gud_drm_format_xrgb1111; else info = drm_format_info(format); switch (format) { case GUD_DRM_FORMAT_R1: fallthrough; case DRM_FORMAT_R8: fallthrough; case GUD_DRM_FORMAT_XRGB1111: fallthrough; case DRM_FORMAT_RGB332: fallthrough; case DRM_FORMAT_RGB888: if (!xrgb8888_emulation_format) xrgb8888_emulation_format = info; break; case DRM_FORMAT_RGB565: rgb565_supported = true; if (!xrgb8888_emulation_format) xrgb8888_emulation_format = info; break; case DRM_FORMAT_XRGB8888: xrgb8888_supported = true; break; } fmt_buf_size = drm_format_info_min_pitch(info, 0, drm->mode_config.max_width) * drm->mode_config.max_height; max_buffer_size = max(max_buffer_size, fmt_buf_size); if (format == GUD_DRM_FORMAT_R1 || format == GUD_DRM_FORMAT_XRGB1111) continue; /* Internal not for userspace */ formats[num_formats++] = format; } if (!num_formats && !xrgb8888_emulation_format) { dev_err(dev, "No supported pixel formats found\n"); return -EINVAL; } /* Prefer speed over color depth */ if (rgb565_supported) drm->mode_config.preferred_depth = 16; if (!xrgb8888_supported && xrgb8888_emulation_format) { gdrm->xrgb8888_emulation_format = xrgb8888_emulation_format; formats[num_formats++] = DRM_FORMAT_XRGB8888; } if (desc.max_buffer_size) max_buffer_size = le32_to_cpu(desc.max_buffer_size); /* Prevent a misbehaving device from allocating loads of RAM. 4096x4096@XRGB8888 = 64 MB */ if (max_buffer_size > SZ_64M) max_buffer_size = SZ_64M; gdrm->bulk_pipe = usb_sndbulkpipe(interface_to_usbdev(intf), usb_endpoint_num(bulk_out)); gdrm->bulk_len = max_buffer_size; ret = gud_alloc_bulk_buffer(gdrm); if (ret) return ret; if (gdrm->compression & GUD_COMPRESSION_LZ4) { gdrm->lz4_comp_mem = devm_kmalloc(dev, LZ4_MEM_COMPRESS, GFP_KERNEL); if (!gdrm->lz4_comp_mem) return -ENOMEM; gdrm->compress_buf = vmalloc(gdrm->bulk_len); if (!gdrm->compress_buf) return -ENOMEM; } ret = drm_simple_display_pipe_init(drm, &gdrm->pipe, &gud_pipe_funcs, formats, num_formats, gud_pipe_modifiers, NULL); if (ret) return ret; devm_kfree(dev, formats); devm_kfree(dev, formats_dev); ret = gud_get_properties(gdrm); if (ret) { dev_err(dev, "Failed to get properties (error=%d)\n", ret); return ret; } drm_plane_enable_fb_damage_clips(&gdrm->pipe.plane); ret = gud_get_connectors(gdrm); if (ret) { dev_err(dev, "Failed to get connectors (error=%d)\n", ret); return ret; } drm_mode_config_reset(drm); usb_set_intfdata(intf, gdrm); gdrm->dmadev = usb_intf_get_dma_device(intf); if (!gdrm->dmadev) dev_warn(dev, "buffer sharing not supported"); drm_debugfs_add_file(drm, "stats", gud_stats_debugfs, NULL); ret = drm_dev_register(drm, 0); if (ret) { put_device(gdrm->dmadev); return ret; } drm_kms_helper_poll_init(drm); drm_fbdev_generic_setup(drm, 0); return 0; } static void gud_disconnect(struct usb_interface *interface) { struct gud_device *gdrm = usb_get_intfdata(interface); struct drm_device *drm = &gdrm->drm; drm_dbg(drm, "%s:\n", __func__); drm_kms_helper_poll_fini(drm); drm_dev_unplug(drm); drm_atomic_helper_shutdown(drm); put_device(gdrm->dmadev); gdrm->dmadev = NULL; } static int gud_suspend(struct usb_interface *intf, pm_message_t message) { struct gud_device *gdrm = usb_get_intfdata(intf); return drm_mode_config_helper_suspend(&gdrm->drm); } static int gud_resume(struct usb_interface *intf) { struct gud_device *gdrm = usb_get_intfdata(intf); drm_mode_config_helper_resume(&gdrm->drm); return 0; } static const struct usb_device_id gud_id_table[] = { { USB_DEVICE_INTERFACE_CLASS(0x1d50, 0x614d, USB_CLASS_VENDOR_SPEC) }, { USB_DEVICE_INTERFACE_CLASS(0x16d0, 0x10a9, USB_CLASS_VENDOR_SPEC) }, { } }; MODULE_DEVICE_TABLE(usb, gud_id_table); static struct usb_driver gud_usb_driver = { .name = "gud", .probe = gud_probe, .disconnect = gud_disconnect, .id_table = gud_id_table, .suspend = gud_suspend, .resume = gud_resume, .reset_resume = gud_resume, }; module_usb_driver(gud_usb_driver); MODULE_AUTHOR("Noralf Trønnes"); MODULE_LICENSE("Dual MIT/GPL");
linux-master
drivers/gpu/drm/gud/gud_drv.c
// SPDX-License-Identifier: MIT /* * Copyright 2020 Noralf Trønnes */ #include <linux/lz4.h> #include <linux/usb.h> #include <linux/vmalloc.h> #include <linux/workqueue.h> #include <drm/drm_atomic.h> #include <drm/drm_connector.h> #include <drm/drm_damage_helper.h> #include <drm/drm_drv.h> #include <drm/drm_format_helper.h> #include <drm/drm_fourcc.h> #include <drm/drm_framebuffer.h> #include <drm/drm_gem.h> #include <drm/drm_gem_atomic_helper.h> #include <drm/drm_gem_framebuffer_helper.h> #include <drm/drm_print.h> #include <drm/drm_rect.h> #include <drm/drm_simple_kms_helper.h> #include <drm/gud.h> #include "gud_internal.h" /* * Some userspace rendering loops run all displays in the same loop. * This means that a fast display will have to wait for a slow one. * Such users might want to enable this module parameter. */ static bool gud_async_flush; module_param_named(async_flush, gud_async_flush, bool, 0644); MODULE_PARM_DESC(async_flush, "Enable asynchronous flushing [default=0]"); /* * FIXME: The driver is probably broken on Big Endian machines. * See discussion: * https://lore.kernel.org/dri-devel/CAKb7UvihLX0hgBOP3VBG7O+atwZcUVCPVuBdfmDMpg0NjXe-cQ@mail.gmail.com/ */ static bool gud_is_big_endian(void) { #if defined(__BIG_ENDIAN) return true; #else return false; #endif } static size_t gud_xrgb8888_to_r124(u8 *dst, const struct drm_format_info *format, void *src, struct drm_framebuffer *fb, struct drm_rect *rect) { unsigned int block_width = drm_format_info_block_width(format, 0); unsigned int bits_per_pixel = 8 / block_width; unsigned int x, y, width, height; u8 pix, *pix8, *block = dst; /* Assign to silence compiler warning */ struct iosys_map dst_map, vmap; size_t len; void *buf; WARN_ON_ONCE(format->char_per_block[0] != 1); /* Start on a byte boundary */ rect->x1 = ALIGN_DOWN(rect->x1, block_width); width = drm_rect_width(rect); height = drm_rect_height(rect); len = drm_format_info_min_pitch(format, 0, width) * height; buf = kmalloc(width * height, GFP_KERNEL); if (!buf) return 0; iosys_map_set_vaddr(&dst_map, buf); iosys_map_set_vaddr(&vmap, src); drm_fb_xrgb8888_to_gray8(&dst_map, NULL, &vmap, fb, rect); pix8 = buf; for (y = 0; y < height; y++) { for (x = 0; x < width; x++) { unsigned int pixpos = x % block_width; /* within byte from the left */ unsigned int pixshift = (block_width - pixpos - 1) * bits_per_pixel; if (!pixpos) { block = dst++; *block = 0; } pix = (*pix8++) >> (8 - bits_per_pixel); *block |= pix << pixshift; } } kfree(buf); return len; } static size_t gud_xrgb8888_to_color(u8 *dst, const struct drm_format_info *format, void *src, struct drm_framebuffer *fb, struct drm_rect *rect) { unsigned int block_width = drm_format_info_block_width(format, 0); unsigned int bits_per_pixel = 8 / block_width; u8 r, g, b, pix, *block = dst; /* Assign to silence compiler warning */ unsigned int x, y, width; __le32 *sbuf32; u32 pix32; size_t len; /* Start on a byte boundary */ rect->x1 = ALIGN_DOWN(rect->x1, block_width); width = drm_rect_width(rect); len = drm_format_info_min_pitch(format, 0, width) * drm_rect_height(rect); for (y = rect->y1; y < rect->y2; y++) { sbuf32 = src + (y * fb->pitches[0]); sbuf32 += rect->x1; for (x = 0; x < width; x++) { unsigned int pixpos = x % block_width; /* within byte from the left */ unsigned int pixshift = (block_width - pixpos - 1) * bits_per_pixel; if (!pixpos) { block = dst++; *block = 0; } pix32 = le32_to_cpu(*sbuf32++); r = pix32 >> 16; g = pix32 >> 8; b = pix32; switch (format->format) { case GUD_DRM_FORMAT_XRGB1111: pix = ((r >> 7) << 2) | ((g >> 7) << 1) | (b >> 7); break; default: WARN_ON_ONCE(1); return len; } *block |= pix << pixshift; } } return len; } static int gud_prep_flush(struct gud_device *gdrm, struct drm_framebuffer *fb, const struct iosys_map *src, bool cached_reads, const struct drm_format_info *format, struct drm_rect *rect, struct gud_set_buffer_req *req) { u8 compression = gdrm->compression; struct iosys_map dst; void *vaddr, *buf; size_t pitch, len; pitch = drm_format_info_min_pitch(format, 0, drm_rect_width(rect)); len = pitch * drm_rect_height(rect); if (len > gdrm->bulk_len) return -E2BIG; vaddr = src[0].vaddr; retry: if (compression) buf = gdrm->compress_buf; else buf = gdrm->bulk_buf; iosys_map_set_vaddr(&dst, buf); /* * Imported buffers are assumed to be write-combined and thus uncached * with slow reads (at least on ARM). */ if (format != fb->format) { if (format->format == GUD_DRM_FORMAT_R1) { len = gud_xrgb8888_to_r124(buf, format, vaddr, fb, rect); if (!len) return -ENOMEM; } else if (format->format == DRM_FORMAT_R8) { drm_fb_xrgb8888_to_gray8(&dst, NULL, src, fb, rect); } else if (format->format == DRM_FORMAT_RGB332) { drm_fb_xrgb8888_to_rgb332(&dst, NULL, src, fb, rect); } else if (format->format == DRM_FORMAT_RGB565) { drm_fb_xrgb8888_to_rgb565(&dst, NULL, src, fb, rect, gud_is_big_endian()); } else if (format->format == DRM_FORMAT_RGB888) { drm_fb_xrgb8888_to_rgb888(&dst, NULL, src, fb, rect); } else { len = gud_xrgb8888_to_color(buf, format, vaddr, fb, rect); } } else if (gud_is_big_endian() && format->cpp[0] > 1) { drm_fb_swab(&dst, NULL, src, fb, rect, cached_reads); } else if (compression && cached_reads && pitch == fb->pitches[0]) { /* can compress directly from the framebuffer */ buf = vaddr + rect->y1 * pitch; } else { drm_fb_memcpy(&dst, NULL, src, fb, rect); } memset(req, 0, sizeof(*req)); req->x = cpu_to_le32(rect->x1); req->y = cpu_to_le32(rect->y1); req->width = cpu_to_le32(drm_rect_width(rect)); req->height = cpu_to_le32(drm_rect_height(rect)); req->length = cpu_to_le32(len); if (compression & GUD_COMPRESSION_LZ4) { int complen; complen = LZ4_compress_default(buf, gdrm->bulk_buf, len, len, gdrm->lz4_comp_mem); if (complen <= 0) { compression = 0; goto retry; } req->compression = GUD_COMPRESSION_LZ4; req->compressed_length = cpu_to_le32(complen); } return 0; } struct gud_usb_bulk_context { struct timer_list timer; struct usb_sg_request sgr; }; static void gud_usb_bulk_timeout(struct timer_list *t) { struct gud_usb_bulk_context *ctx = from_timer(ctx, t, timer); usb_sg_cancel(&ctx->sgr); } static int gud_usb_bulk(struct gud_device *gdrm, size_t len) { struct gud_usb_bulk_context ctx; int ret; ret = usb_sg_init(&ctx.sgr, gud_to_usb_device(gdrm), gdrm->bulk_pipe, 0, gdrm->bulk_sgt.sgl, gdrm->bulk_sgt.nents, len, GFP_KERNEL); if (ret) return ret; timer_setup_on_stack(&ctx.timer, gud_usb_bulk_timeout, 0); mod_timer(&ctx.timer, jiffies + msecs_to_jiffies(3000)); usb_sg_wait(&ctx.sgr); if (!del_timer_sync(&ctx.timer)) ret = -ETIMEDOUT; else if (ctx.sgr.status < 0) ret = ctx.sgr.status; else if (ctx.sgr.bytes != len) ret = -EIO; destroy_timer_on_stack(&ctx.timer); return ret; } static int gud_flush_rect(struct gud_device *gdrm, struct drm_framebuffer *fb, const struct iosys_map *src, bool cached_reads, const struct drm_format_info *format, struct drm_rect *rect) { struct gud_set_buffer_req req; size_t len, trlen; int ret; drm_dbg(&gdrm->drm, "Flushing [FB:%d] " DRM_RECT_FMT "\n", fb->base.id, DRM_RECT_ARG(rect)); ret = gud_prep_flush(gdrm, fb, src, cached_reads, format, rect, &req); if (ret) return ret; len = le32_to_cpu(req.length); if (req.compression) trlen = le32_to_cpu(req.compressed_length); else trlen = len; gdrm->stats_length += len; /* Did it wrap around? */ if (gdrm->stats_length <= len && gdrm->stats_actual_length) { gdrm->stats_length = len; gdrm->stats_actual_length = 0; } gdrm->stats_actual_length += trlen; if (!(gdrm->flags & GUD_DISPLAY_FLAG_FULL_UPDATE) || gdrm->prev_flush_failed) { ret = gud_usb_set(gdrm, GUD_REQ_SET_BUFFER, 0, &req, sizeof(req)); if (ret) return ret; } ret = gud_usb_bulk(gdrm, trlen); if (ret) gdrm->stats_num_errors++; return ret; } void gud_clear_damage(struct gud_device *gdrm) { gdrm->damage.x1 = INT_MAX; gdrm->damage.y1 = INT_MAX; gdrm->damage.x2 = 0; gdrm->damage.y2 = 0; } static void gud_flush_damage(struct gud_device *gdrm, struct drm_framebuffer *fb, const struct iosys_map *src, bool cached_reads, struct drm_rect *damage) { const struct drm_format_info *format; unsigned int i, lines; size_t pitch; int ret; format = fb->format; if (format->format == DRM_FORMAT_XRGB8888 && gdrm->xrgb8888_emulation_format) format = gdrm->xrgb8888_emulation_format; /* Split update if it's too big */ pitch = drm_format_info_min_pitch(format, 0, drm_rect_width(damage)); lines = drm_rect_height(damage); if (gdrm->bulk_len < lines * pitch) lines = gdrm->bulk_len / pitch; for (i = 0; i < DIV_ROUND_UP(drm_rect_height(damage), lines); i++) { struct drm_rect rect = *damage; rect.y1 += i * lines; rect.y2 = min_t(u32, rect.y1 + lines, damage->y2); ret = gud_flush_rect(gdrm, fb, src, cached_reads, format, &rect); if (ret) { if (ret != -ENODEV && ret != -ECONNRESET && ret != -ESHUTDOWN && ret != -EPROTO) dev_err_ratelimited(fb->dev->dev, "Failed to flush framebuffer: error=%d\n", ret); gdrm->prev_flush_failed = true; break; } } } void gud_flush_work(struct work_struct *work) { struct gud_device *gdrm = container_of(work, struct gud_device, work); struct iosys_map shadow_map; struct drm_framebuffer *fb; struct drm_rect damage; int idx; if (!drm_dev_enter(&gdrm->drm, &idx)) return; mutex_lock(&gdrm->damage_lock); fb = gdrm->fb; gdrm->fb = NULL; iosys_map_set_vaddr(&shadow_map, gdrm->shadow_buf); damage = gdrm->damage; gud_clear_damage(gdrm); mutex_unlock(&gdrm->damage_lock); if (!fb) goto out; gud_flush_damage(gdrm, fb, &shadow_map, true, &damage); drm_framebuffer_put(fb); out: drm_dev_exit(idx); } static int gud_fb_queue_damage(struct gud_device *gdrm, struct drm_framebuffer *fb, const struct iosys_map *src, struct drm_rect *damage) { struct drm_framebuffer *old_fb = NULL; struct iosys_map shadow_map; mutex_lock(&gdrm->damage_lock); if (!gdrm->shadow_buf) { gdrm->shadow_buf = vcalloc(fb->pitches[0], fb->height); if (!gdrm->shadow_buf) { mutex_unlock(&gdrm->damage_lock); return -ENOMEM; } } iosys_map_set_vaddr(&shadow_map, gdrm->shadow_buf); iosys_map_incr(&shadow_map, drm_fb_clip_offset(fb->pitches[0], fb->format, damage)); drm_fb_memcpy(&shadow_map, fb->pitches, src, fb, damage); if (fb != gdrm->fb) { old_fb = gdrm->fb; drm_framebuffer_get(fb); gdrm->fb = fb; } gdrm->damage.x1 = min(gdrm->damage.x1, damage->x1); gdrm->damage.y1 = min(gdrm->damage.y1, damage->y1); gdrm->damage.x2 = max(gdrm->damage.x2, damage->x2); gdrm->damage.y2 = max(gdrm->damage.y2, damage->y2); mutex_unlock(&gdrm->damage_lock); queue_work(system_long_wq, &gdrm->work); if (old_fb) drm_framebuffer_put(old_fb); return 0; } static void gud_fb_handle_damage(struct gud_device *gdrm, struct drm_framebuffer *fb, const struct iosys_map *src, struct drm_rect *damage) { int ret; if (gdrm->flags & GUD_DISPLAY_FLAG_FULL_UPDATE) drm_rect_init(damage, 0, 0, fb->width, fb->height); if (gud_async_flush) { ret = gud_fb_queue_damage(gdrm, fb, src, damage); if (ret != -ENOMEM) return; } /* Imported buffers are assumed to be WriteCombined with uncached reads */ gud_flush_damage(gdrm, fb, src, !fb->obj[0]->import_attach, damage); } int gud_pipe_check(struct drm_simple_display_pipe *pipe, struct drm_plane_state *new_plane_state, struct drm_crtc_state *new_crtc_state) { struct gud_device *gdrm = to_gud_device(pipe->crtc.dev); struct drm_plane_state *old_plane_state = pipe->plane.state; const struct drm_display_mode *mode = &new_crtc_state->mode; struct drm_atomic_state *state = new_plane_state->state; struct drm_framebuffer *old_fb = old_plane_state->fb; struct drm_connector_state *connector_state = NULL; struct drm_framebuffer *fb = new_plane_state->fb; const struct drm_format_info *format = fb->format; struct drm_connector *connector; unsigned int i, num_properties; struct gud_state_req *req; int idx, ret; size_t len; if (WARN_ON_ONCE(!fb)) return -EINVAL; if (old_plane_state->rotation != new_plane_state->rotation) new_crtc_state->mode_changed = true; if (old_fb && old_fb->format != format) new_crtc_state->mode_changed = true; if (!new_crtc_state->mode_changed && !new_crtc_state->connectors_changed) return 0; /* Only one connector is supported */ if (hweight32(new_crtc_state->connector_mask) != 1) return -EINVAL; if (format->format == DRM_FORMAT_XRGB8888 && gdrm->xrgb8888_emulation_format) format = gdrm->xrgb8888_emulation_format; for_each_new_connector_in_state(state, connector, connector_state, i) { if (connector_state->crtc) break; } /* * DRM_IOCTL_MODE_OBJ_SETPROPERTY on the rotation property will not have * the connector included in the state. */ if (!connector_state) { struct drm_connector_list_iter conn_iter; drm_connector_list_iter_begin(pipe->crtc.dev, &conn_iter); drm_for_each_connector_iter(connector, &conn_iter) { if (connector->state->crtc) { connector_state = connector->state; break; } } drm_connector_list_iter_end(&conn_iter); } if (WARN_ON_ONCE(!connector_state)) return -ENOENT; len = struct_size(req, properties, GUD_PROPERTIES_MAX_NUM + GUD_CONNECTOR_PROPERTIES_MAX_NUM); req = kzalloc(len, GFP_KERNEL); if (!req) return -ENOMEM; gud_from_display_mode(&req->mode, mode); req->format = gud_from_fourcc(format->format); if (WARN_ON_ONCE(!req->format)) { ret = -EINVAL; goto out; } req->connector = drm_connector_index(connector_state->connector); ret = gud_connector_fill_properties(connector_state, req->properties); if (ret < 0) goto out; num_properties = ret; for (i = 0; i < gdrm->num_properties; i++) { u16 prop = gdrm->properties[i]; u64 val; switch (prop) { case GUD_PROPERTY_ROTATION: /* DRM UAPI matches the protocol so use value directly */ val = new_plane_state->rotation; break; default: WARN_ON_ONCE(1); ret = -EINVAL; goto out; } req->properties[num_properties + i].prop = cpu_to_le16(prop); req->properties[num_properties + i].val = cpu_to_le64(val); num_properties++; } if (drm_dev_enter(fb->dev, &idx)) { len = struct_size(req, properties, num_properties); ret = gud_usb_set(gdrm, GUD_REQ_SET_STATE_CHECK, 0, req, len); drm_dev_exit(idx); } else { ret = -ENODEV; } out: kfree(req); return ret; } void gud_pipe_update(struct drm_simple_display_pipe *pipe, struct drm_plane_state *old_state) { struct drm_device *drm = pipe->crtc.dev; struct gud_device *gdrm = to_gud_device(drm); struct drm_plane_state *state = pipe->plane.state; struct drm_shadow_plane_state *shadow_plane_state = to_drm_shadow_plane_state(state); struct drm_framebuffer *fb = state->fb; struct drm_crtc *crtc = &pipe->crtc; struct drm_rect damage; int ret, idx; if (crtc->state->mode_changed || !crtc->state->enable) { cancel_work_sync(&gdrm->work); mutex_lock(&gdrm->damage_lock); if (gdrm->fb) { drm_framebuffer_put(gdrm->fb); gdrm->fb = NULL; } gud_clear_damage(gdrm); vfree(gdrm->shadow_buf); gdrm->shadow_buf = NULL; mutex_unlock(&gdrm->damage_lock); } if (!drm_dev_enter(drm, &idx)) return; if (!old_state->fb) gud_usb_set_u8(gdrm, GUD_REQ_SET_CONTROLLER_ENABLE, 1); if (fb && (crtc->state->mode_changed || crtc->state->connectors_changed)) gud_usb_set(gdrm, GUD_REQ_SET_STATE_COMMIT, 0, NULL, 0); if (crtc->state->active_changed) gud_usb_set_u8(gdrm, GUD_REQ_SET_DISPLAY_ENABLE, crtc->state->active); if (!fb) goto ctrl_disable; ret = drm_gem_fb_begin_cpu_access(fb, DMA_FROM_DEVICE); if (ret) goto ctrl_disable; if (drm_atomic_helper_damage_merged(old_state, state, &damage)) gud_fb_handle_damage(gdrm, fb, &shadow_plane_state->data[0], &damage); drm_gem_fb_end_cpu_access(fb, DMA_FROM_DEVICE); ctrl_disable: if (!crtc->state->enable) gud_usb_set_u8(gdrm, GUD_REQ_SET_CONTROLLER_ENABLE, 0); drm_dev_exit(idx); }
linux-master
drivers/gpu/drm/gud/gud_pipe.c
// SPDX-License-Identifier: GPL-2.0 /* * Copyright (C) 2017-2018, Bootlin */ #include <linux/delay.h> #include <linux/device.h> #include <linux/err.h> #include <linux/errno.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/of.h> #include <linux/gpio/consumer.h> #include <linux/regulator/consumer.h> #include <drm/drm_mipi_dsi.h> #include <drm/drm_modes.h> #include <drm/drm_panel.h> #include <video/mipi_display.h> enum ili9881c_op { ILI9881C_SWITCH_PAGE, ILI9881C_COMMAND, }; struct ili9881c_instr { enum ili9881c_op op; union arg { struct cmd { u8 cmd; u8 data; } cmd; u8 page; } arg; }; struct ili9881c_desc { const struct ili9881c_instr *init; const size_t init_length; const struct drm_display_mode *mode; const unsigned long mode_flags; }; struct ili9881c { struct drm_panel panel; struct mipi_dsi_device *dsi; const struct ili9881c_desc *desc; struct regulator *power; struct gpio_desc *reset; enum drm_panel_orientation orientation; }; #define ILI9881C_SWITCH_PAGE_INSTR(_page) \ { \ .op = ILI9881C_SWITCH_PAGE, \ .arg = { \ .page = (_page), \ }, \ } #define ILI9881C_COMMAND_INSTR(_cmd, _data) \ { \ .op = ILI9881C_COMMAND, \ .arg = { \ .cmd = { \ .cmd = (_cmd), \ .data = (_data), \ }, \ }, \ } static const struct ili9881c_instr lhr050h41_init[] = { ILI9881C_SWITCH_PAGE_INSTR(3), ILI9881C_COMMAND_INSTR(0x01, 0x00), ILI9881C_COMMAND_INSTR(0x02, 0x00), ILI9881C_COMMAND_INSTR(0x03, 0x73), ILI9881C_COMMAND_INSTR(0x04, 0x03), ILI9881C_COMMAND_INSTR(0x05, 0x00), ILI9881C_COMMAND_INSTR(0x06, 0x06), ILI9881C_COMMAND_INSTR(0x07, 0x06), ILI9881C_COMMAND_INSTR(0x08, 0x00), ILI9881C_COMMAND_INSTR(0x09, 0x18), ILI9881C_COMMAND_INSTR(0x0a, 0x04), ILI9881C_COMMAND_INSTR(0x0b, 0x00), ILI9881C_COMMAND_INSTR(0x0c, 0x02), ILI9881C_COMMAND_INSTR(0x0d, 0x03), ILI9881C_COMMAND_INSTR(0x0e, 0x00), ILI9881C_COMMAND_INSTR(0x0f, 0x25), ILI9881C_COMMAND_INSTR(0x10, 0x25), ILI9881C_COMMAND_INSTR(0x11, 0x00), ILI9881C_COMMAND_INSTR(0x12, 0x00), ILI9881C_COMMAND_INSTR(0x13, 0x00), ILI9881C_COMMAND_INSTR(0x14, 0x00), ILI9881C_COMMAND_INSTR(0x15, 0x00), ILI9881C_COMMAND_INSTR(0x16, 0x0C), ILI9881C_COMMAND_INSTR(0x17, 0x00), ILI9881C_COMMAND_INSTR(0x18, 0x00), ILI9881C_COMMAND_INSTR(0x19, 0x00), ILI9881C_COMMAND_INSTR(0x1a, 0x00), ILI9881C_COMMAND_INSTR(0x1b, 0x00), ILI9881C_COMMAND_INSTR(0x1c, 0x00), ILI9881C_COMMAND_INSTR(0x1d, 0x00), ILI9881C_COMMAND_INSTR(0x1e, 0xC0), ILI9881C_COMMAND_INSTR(0x1f, 0x80), ILI9881C_COMMAND_INSTR(0x20, 0x04), ILI9881C_COMMAND_INSTR(0x21, 0x01), ILI9881C_COMMAND_INSTR(0x22, 0x00), ILI9881C_COMMAND_INSTR(0x23, 0x00), ILI9881C_COMMAND_INSTR(0x24, 0x00), ILI9881C_COMMAND_INSTR(0x25, 0x00), ILI9881C_COMMAND_INSTR(0x26, 0x00), ILI9881C_COMMAND_INSTR(0x27, 0x00), ILI9881C_COMMAND_INSTR(0x28, 0x33), ILI9881C_COMMAND_INSTR(0x29, 0x03), ILI9881C_COMMAND_INSTR(0x2a, 0x00), ILI9881C_COMMAND_INSTR(0x2b, 0x00), ILI9881C_COMMAND_INSTR(0x2c, 0x00), ILI9881C_COMMAND_INSTR(0x2d, 0x00), ILI9881C_COMMAND_INSTR(0x2e, 0x00), ILI9881C_COMMAND_INSTR(0x2f, 0x00), ILI9881C_COMMAND_INSTR(0x30, 0x00), ILI9881C_COMMAND_INSTR(0x31, 0x00), ILI9881C_COMMAND_INSTR(0x32, 0x00), ILI9881C_COMMAND_INSTR(0x33, 0x00), ILI9881C_COMMAND_INSTR(0x34, 0x04), ILI9881C_COMMAND_INSTR(0x35, 0x00), ILI9881C_COMMAND_INSTR(0x36, 0x00), ILI9881C_COMMAND_INSTR(0x37, 0x00), ILI9881C_COMMAND_INSTR(0x38, 0x3C), ILI9881C_COMMAND_INSTR(0x39, 0x00), ILI9881C_COMMAND_INSTR(0x3a, 0x00), ILI9881C_COMMAND_INSTR(0x3b, 0x00), ILI9881C_COMMAND_INSTR(0x3c, 0x00), ILI9881C_COMMAND_INSTR(0x3d, 0x00), ILI9881C_COMMAND_INSTR(0x3e, 0x00), ILI9881C_COMMAND_INSTR(0x3f, 0x00), ILI9881C_COMMAND_INSTR(0x40, 0x00), ILI9881C_COMMAND_INSTR(0x41, 0x00), ILI9881C_COMMAND_INSTR(0x42, 0x00), ILI9881C_COMMAND_INSTR(0x43, 0x00), ILI9881C_COMMAND_INSTR(0x44, 0x00), ILI9881C_COMMAND_INSTR(0x50, 0x01), ILI9881C_COMMAND_INSTR(0x51, 0x23), ILI9881C_COMMAND_INSTR(0x52, 0x45), ILI9881C_COMMAND_INSTR(0x53, 0x67), ILI9881C_COMMAND_INSTR(0x54, 0x89), ILI9881C_COMMAND_INSTR(0x55, 0xab), ILI9881C_COMMAND_INSTR(0x56, 0x01), ILI9881C_COMMAND_INSTR(0x57, 0x23), ILI9881C_COMMAND_INSTR(0x58, 0x45), ILI9881C_COMMAND_INSTR(0x59, 0x67), ILI9881C_COMMAND_INSTR(0x5a, 0x89), ILI9881C_COMMAND_INSTR(0x5b, 0xab), ILI9881C_COMMAND_INSTR(0x5c, 0xcd), ILI9881C_COMMAND_INSTR(0x5d, 0xef), ILI9881C_COMMAND_INSTR(0x5e, 0x11), ILI9881C_COMMAND_INSTR(0x5f, 0x02), ILI9881C_COMMAND_INSTR(0x60, 0x02), ILI9881C_COMMAND_INSTR(0x61, 0x02), ILI9881C_COMMAND_INSTR(0x62, 0x02), ILI9881C_COMMAND_INSTR(0x63, 0x02), ILI9881C_COMMAND_INSTR(0x64, 0x02), ILI9881C_COMMAND_INSTR(0x65, 0x02), ILI9881C_COMMAND_INSTR(0x66, 0x02), ILI9881C_COMMAND_INSTR(0x67, 0x02), ILI9881C_COMMAND_INSTR(0x68, 0x02), ILI9881C_COMMAND_INSTR(0x69, 0x02), ILI9881C_COMMAND_INSTR(0x6a, 0x0C), ILI9881C_COMMAND_INSTR(0x6b, 0x02), ILI9881C_COMMAND_INSTR(0x6c, 0x0F), ILI9881C_COMMAND_INSTR(0x6d, 0x0E), ILI9881C_COMMAND_INSTR(0x6e, 0x0D), ILI9881C_COMMAND_INSTR(0x6f, 0x06), ILI9881C_COMMAND_INSTR(0x70, 0x07), ILI9881C_COMMAND_INSTR(0x71, 0x02), ILI9881C_COMMAND_INSTR(0x72, 0x02), ILI9881C_COMMAND_INSTR(0x73, 0x02), ILI9881C_COMMAND_INSTR(0x74, 0x02), ILI9881C_COMMAND_INSTR(0x75, 0x02), ILI9881C_COMMAND_INSTR(0x76, 0x02), ILI9881C_COMMAND_INSTR(0x77, 0x02), ILI9881C_COMMAND_INSTR(0x78, 0x02), ILI9881C_COMMAND_INSTR(0x79, 0x02), ILI9881C_COMMAND_INSTR(0x7a, 0x02), ILI9881C_COMMAND_INSTR(0x7b, 0x02), ILI9881C_COMMAND_INSTR(0x7c, 0x02), ILI9881C_COMMAND_INSTR(0x7d, 0x02), ILI9881C_COMMAND_INSTR(0x7e, 0x02), ILI9881C_COMMAND_INSTR(0x7f, 0x02), ILI9881C_COMMAND_INSTR(0x80, 0x0C), ILI9881C_COMMAND_INSTR(0x81, 0x02), ILI9881C_COMMAND_INSTR(0x82, 0x0F), ILI9881C_COMMAND_INSTR(0x83, 0x0E), ILI9881C_COMMAND_INSTR(0x84, 0x0D), ILI9881C_COMMAND_INSTR(0x85, 0x06), ILI9881C_COMMAND_INSTR(0x86, 0x07), ILI9881C_COMMAND_INSTR(0x87, 0x02), ILI9881C_COMMAND_INSTR(0x88, 0x02), ILI9881C_COMMAND_INSTR(0x89, 0x02), ILI9881C_COMMAND_INSTR(0x8A, 0x02), ILI9881C_SWITCH_PAGE_INSTR(4), ILI9881C_COMMAND_INSTR(0x6C, 0x15), ILI9881C_COMMAND_INSTR(0x6E, 0x22), ILI9881C_COMMAND_INSTR(0x6F, 0x33), ILI9881C_COMMAND_INSTR(0x3A, 0xA4), ILI9881C_COMMAND_INSTR(0x8D, 0x0D), ILI9881C_COMMAND_INSTR(0x87, 0xBA), ILI9881C_COMMAND_INSTR(0x26, 0x76), ILI9881C_COMMAND_INSTR(0xB2, 0xD1), ILI9881C_SWITCH_PAGE_INSTR(1), ILI9881C_COMMAND_INSTR(0x22, 0x0A), ILI9881C_COMMAND_INSTR(0x53, 0xDC), ILI9881C_COMMAND_INSTR(0x55, 0xA7), ILI9881C_COMMAND_INSTR(0x50, 0x78), ILI9881C_COMMAND_INSTR(0x51, 0x78), ILI9881C_COMMAND_INSTR(0x31, 0x02), ILI9881C_COMMAND_INSTR(0x60, 0x14), ILI9881C_COMMAND_INSTR(0xA0, 0x2A), ILI9881C_COMMAND_INSTR(0xA1, 0x39), ILI9881C_COMMAND_INSTR(0xA2, 0x46), ILI9881C_COMMAND_INSTR(0xA3, 0x0e), ILI9881C_COMMAND_INSTR(0xA4, 0x12), ILI9881C_COMMAND_INSTR(0xA5, 0x25), ILI9881C_COMMAND_INSTR(0xA6, 0x19), ILI9881C_COMMAND_INSTR(0xA7, 0x1d), ILI9881C_COMMAND_INSTR(0xA8, 0xa6), ILI9881C_COMMAND_INSTR(0xA9, 0x1C), ILI9881C_COMMAND_INSTR(0xAA, 0x29), ILI9881C_COMMAND_INSTR(0xAB, 0x85), ILI9881C_COMMAND_INSTR(0xAC, 0x1C), ILI9881C_COMMAND_INSTR(0xAD, 0x1B), ILI9881C_COMMAND_INSTR(0xAE, 0x51), ILI9881C_COMMAND_INSTR(0xAF, 0x22), ILI9881C_COMMAND_INSTR(0xB0, 0x2d), ILI9881C_COMMAND_INSTR(0xB1, 0x4f), ILI9881C_COMMAND_INSTR(0xB2, 0x59), ILI9881C_COMMAND_INSTR(0xB3, 0x3F), ILI9881C_COMMAND_INSTR(0xC0, 0x2A), ILI9881C_COMMAND_INSTR(0xC1, 0x3a), ILI9881C_COMMAND_INSTR(0xC2, 0x45), ILI9881C_COMMAND_INSTR(0xC3, 0x0e), ILI9881C_COMMAND_INSTR(0xC4, 0x11), ILI9881C_COMMAND_INSTR(0xC5, 0x24), ILI9881C_COMMAND_INSTR(0xC6, 0x1a), ILI9881C_COMMAND_INSTR(0xC7, 0x1c), ILI9881C_COMMAND_INSTR(0xC8, 0xaa), ILI9881C_COMMAND_INSTR(0xC9, 0x1C), ILI9881C_COMMAND_INSTR(0xCA, 0x29), ILI9881C_COMMAND_INSTR(0xCB, 0x96), ILI9881C_COMMAND_INSTR(0xCC, 0x1C), ILI9881C_COMMAND_INSTR(0xCD, 0x1B), ILI9881C_COMMAND_INSTR(0xCE, 0x51), ILI9881C_COMMAND_INSTR(0xCF, 0x22), ILI9881C_COMMAND_INSTR(0xD0, 0x2b), ILI9881C_COMMAND_INSTR(0xD1, 0x4b), ILI9881C_COMMAND_INSTR(0xD2, 0x59), ILI9881C_COMMAND_INSTR(0xD3, 0x3F), }; static const struct ili9881c_instr k101_im2byl02_init[] = { ILI9881C_SWITCH_PAGE_INSTR(3), ILI9881C_COMMAND_INSTR(0x01, 0x00), ILI9881C_COMMAND_INSTR(0x02, 0x00), ILI9881C_COMMAND_INSTR(0x03, 0x73), ILI9881C_COMMAND_INSTR(0x04, 0x00), ILI9881C_COMMAND_INSTR(0x05, 0x00), ILI9881C_COMMAND_INSTR(0x06, 0x08), ILI9881C_COMMAND_INSTR(0x07, 0x00), ILI9881C_COMMAND_INSTR(0x08, 0x00), ILI9881C_COMMAND_INSTR(0x09, 0x00), ILI9881C_COMMAND_INSTR(0x0A, 0x01), ILI9881C_COMMAND_INSTR(0x0B, 0x01), ILI9881C_COMMAND_INSTR(0x0C, 0x00), ILI9881C_COMMAND_INSTR(0x0D, 0x01), ILI9881C_COMMAND_INSTR(0x0E, 0x01), ILI9881C_COMMAND_INSTR(0x0F, 0x00), ILI9881C_COMMAND_INSTR(0x10, 0x00), ILI9881C_COMMAND_INSTR(0x11, 0x00), ILI9881C_COMMAND_INSTR(0x12, 0x00), ILI9881C_COMMAND_INSTR(0x13, 0x00), ILI9881C_COMMAND_INSTR(0x14, 0x00), ILI9881C_COMMAND_INSTR(0x15, 0x00), ILI9881C_COMMAND_INSTR(0x16, 0x00), ILI9881C_COMMAND_INSTR(0x17, 0x00), ILI9881C_COMMAND_INSTR(0x18, 0x00), ILI9881C_COMMAND_INSTR(0x19, 0x00), ILI9881C_COMMAND_INSTR(0x1A, 0x00), ILI9881C_COMMAND_INSTR(0x1B, 0x00), ILI9881C_COMMAND_INSTR(0x1C, 0x00), ILI9881C_COMMAND_INSTR(0x1D, 0x00), ILI9881C_COMMAND_INSTR(0x1E, 0x40), ILI9881C_COMMAND_INSTR(0x1F, 0xC0), ILI9881C_COMMAND_INSTR(0x20, 0x06), ILI9881C_COMMAND_INSTR(0x21, 0x01), ILI9881C_COMMAND_INSTR(0x22, 0x06), ILI9881C_COMMAND_INSTR(0x23, 0x01), ILI9881C_COMMAND_INSTR(0x24, 0x88), ILI9881C_COMMAND_INSTR(0x25, 0x88), ILI9881C_COMMAND_INSTR(0x26, 0x00), ILI9881C_COMMAND_INSTR(0x27, 0x00), ILI9881C_COMMAND_INSTR(0x28, 0x3B), ILI9881C_COMMAND_INSTR(0x29, 0x03), ILI9881C_COMMAND_INSTR(0x2A, 0x00), ILI9881C_COMMAND_INSTR(0x2B, 0x00), ILI9881C_COMMAND_INSTR(0x2C, 0x00), ILI9881C_COMMAND_INSTR(0x2D, 0x00), ILI9881C_COMMAND_INSTR(0x2E, 0x00), ILI9881C_COMMAND_INSTR(0x2F, 0x00), ILI9881C_COMMAND_INSTR(0x30, 0x00), ILI9881C_COMMAND_INSTR(0x31, 0x00), ILI9881C_COMMAND_INSTR(0x32, 0x00), ILI9881C_COMMAND_INSTR(0x33, 0x00), ILI9881C_COMMAND_INSTR(0x34, 0x00), /* GPWR1/2 non overlap time 2.62us */ ILI9881C_COMMAND_INSTR(0x35, 0x00), ILI9881C_COMMAND_INSTR(0x36, 0x00), ILI9881C_COMMAND_INSTR(0x37, 0x00), ILI9881C_COMMAND_INSTR(0x38, 0x00), ILI9881C_COMMAND_INSTR(0x39, 0x00), ILI9881C_COMMAND_INSTR(0x3A, 0x00), ILI9881C_COMMAND_INSTR(0x3B, 0x00), ILI9881C_COMMAND_INSTR(0x3C, 0x00), ILI9881C_COMMAND_INSTR(0x3D, 0x00), ILI9881C_COMMAND_INSTR(0x3E, 0x00), ILI9881C_COMMAND_INSTR(0x3F, 0x00), ILI9881C_COMMAND_INSTR(0x40, 0x00), ILI9881C_COMMAND_INSTR(0x41, 0x00), ILI9881C_COMMAND_INSTR(0x42, 0x00), ILI9881C_COMMAND_INSTR(0x43, 0x00), ILI9881C_COMMAND_INSTR(0x44, 0x00), ILI9881C_COMMAND_INSTR(0x50, 0x01), ILI9881C_COMMAND_INSTR(0x51, 0x23), ILI9881C_COMMAND_INSTR(0x52, 0x45), ILI9881C_COMMAND_INSTR(0x53, 0x67), ILI9881C_COMMAND_INSTR(0x54, 0x89), ILI9881C_COMMAND_INSTR(0x55, 0xAB), ILI9881C_COMMAND_INSTR(0x56, 0x01), ILI9881C_COMMAND_INSTR(0x57, 0x23), ILI9881C_COMMAND_INSTR(0x58, 0x45), ILI9881C_COMMAND_INSTR(0x59, 0x67), ILI9881C_COMMAND_INSTR(0x5A, 0x89), ILI9881C_COMMAND_INSTR(0x5B, 0xAB), ILI9881C_COMMAND_INSTR(0x5C, 0xCD), ILI9881C_COMMAND_INSTR(0x5D, 0xEF), ILI9881C_COMMAND_INSTR(0x5E, 0x00), ILI9881C_COMMAND_INSTR(0x5F, 0x01), ILI9881C_COMMAND_INSTR(0x60, 0x01), ILI9881C_COMMAND_INSTR(0x61, 0x06), ILI9881C_COMMAND_INSTR(0x62, 0x06), ILI9881C_COMMAND_INSTR(0x63, 0x07), ILI9881C_COMMAND_INSTR(0x64, 0x07), ILI9881C_COMMAND_INSTR(0x65, 0x00), ILI9881C_COMMAND_INSTR(0x66, 0x00), ILI9881C_COMMAND_INSTR(0x67, 0x02), ILI9881C_COMMAND_INSTR(0x68, 0x02), ILI9881C_COMMAND_INSTR(0x69, 0x05), ILI9881C_COMMAND_INSTR(0x6A, 0x05), ILI9881C_COMMAND_INSTR(0x6B, 0x02), ILI9881C_COMMAND_INSTR(0x6C, 0x0D), ILI9881C_COMMAND_INSTR(0x6D, 0x0D), ILI9881C_COMMAND_INSTR(0x6E, 0x0C), ILI9881C_COMMAND_INSTR(0x6F, 0x0C), ILI9881C_COMMAND_INSTR(0x70, 0x0F), ILI9881C_COMMAND_INSTR(0x71, 0x0F), ILI9881C_COMMAND_INSTR(0x72, 0x0E), ILI9881C_COMMAND_INSTR(0x73, 0x0E), ILI9881C_COMMAND_INSTR(0x74, 0x02), ILI9881C_COMMAND_INSTR(0x75, 0x01), ILI9881C_COMMAND_INSTR(0x76, 0x01), ILI9881C_COMMAND_INSTR(0x77, 0x06), ILI9881C_COMMAND_INSTR(0x78, 0x06), ILI9881C_COMMAND_INSTR(0x79, 0x07), ILI9881C_COMMAND_INSTR(0x7A, 0x07), ILI9881C_COMMAND_INSTR(0x7B, 0x00), ILI9881C_COMMAND_INSTR(0x7C, 0x00), ILI9881C_COMMAND_INSTR(0x7D, 0x02), ILI9881C_COMMAND_INSTR(0x7E, 0x02), ILI9881C_COMMAND_INSTR(0x7F, 0x05), ILI9881C_COMMAND_INSTR(0x80, 0x05), ILI9881C_COMMAND_INSTR(0x81, 0x02), ILI9881C_COMMAND_INSTR(0x82, 0x0D), ILI9881C_COMMAND_INSTR(0x83, 0x0D), ILI9881C_COMMAND_INSTR(0x84, 0x0C), ILI9881C_COMMAND_INSTR(0x85, 0x0C), ILI9881C_COMMAND_INSTR(0x86, 0x0F), ILI9881C_COMMAND_INSTR(0x87, 0x0F), ILI9881C_COMMAND_INSTR(0x88, 0x0E), ILI9881C_COMMAND_INSTR(0x89, 0x0E), ILI9881C_COMMAND_INSTR(0x8A, 0x02), ILI9881C_SWITCH_PAGE_INSTR(4), ILI9881C_COMMAND_INSTR(0x3B, 0xC0), /* ILI4003D sel */ ILI9881C_COMMAND_INSTR(0x6C, 0x15), /* Set VCORE voltage = 1.5V */ ILI9881C_COMMAND_INSTR(0x6E, 0x2A), /* di_pwr_reg=0 for power mode 2A, VGH clamp 18V */ ILI9881C_COMMAND_INSTR(0x6F, 0x33), /* pumping ratio VGH=5x VGL=-3x */ ILI9881C_COMMAND_INSTR(0x8D, 0x1B), /* VGL clamp -10V */ ILI9881C_COMMAND_INSTR(0x87, 0xBA), /* ESD */ ILI9881C_COMMAND_INSTR(0x3A, 0x24), /* POWER SAVING */ ILI9881C_COMMAND_INSTR(0x26, 0x76), ILI9881C_COMMAND_INSTR(0xB2, 0xD1), ILI9881C_SWITCH_PAGE_INSTR(1), ILI9881C_COMMAND_INSTR(0x22, 0x0A), /* BGR, SS */ ILI9881C_COMMAND_INSTR(0x31, 0x00), /* Zigzag type3 inversion */ ILI9881C_COMMAND_INSTR(0x40, 0x53), /* ILI4003D sel */ ILI9881C_COMMAND_INSTR(0x43, 0x66), ILI9881C_COMMAND_INSTR(0x53, 0x4C), ILI9881C_COMMAND_INSTR(0x50, 0x87), ILI9881C_COMMAND_INSTR(0x51, 0x82), ILI9881C_COMMAND_INSTR(0x60, 0x15), ILI9881C_COMMAND_INSTR(0x61, 0x01), ILI9881C_COMMAND_INSTR(0x62, 0x0C), ILI9881C_COMMAND_INSTR(0x63, 0x00), ILI9881C_COMMAND_INSTR(0xA0, 0x00), ILI9881C_COMMAND_INSTR(0xA1, 0x13), /* VP251 */ ILI9881C_COMMAND_INSTR(0xA2, 0x23), /* VP247 */ ILI9881C_COMMAND_INSTR(0xA3, 0x14), /* VP243 */ ILI9881C_COMMAND_INSTR(0xA4, 0x16), /* VP239 */ ILI9881C_COMMAND_INSTR(0xA5, 0x29), /* VP231 */ ILI9881C_COMMAND_INSTR(0xA6, 0x1E), /* VP219 */ ILI9881C_COMMAND_INSTR(0xA7, 0x1D), /* VP203 */ ILI9881C_COMMAND_INSTR(0xA8, 0x86), /* VP175 */ ILI9881C_COMMAND_INSTR(0xA9, 0x1E), /* VP144 */ ILI9881C_COMMAND_INSTR(0xAA, 0x29), /* VP111 */ ILI9881C_COMMAND_INSTR(0xAB, 0x74), /* VP80 */ ILI9881C_COMMAND_INSTR(0xAC, 0x19), /* VP52 */ ILI9881C_COMMAND_INSTR(0xAD, 0x17), /* VP36 */ ILI9881C_COMMAND_INSTR(0xAE, 0x4B), /* VP24 */ ILI9881C_COMMAND_INSTR(0xAF, 0x20), /* VP16 */ ILI9881C_COMMAND_INSTR(0xB0, 0x26), /* VP12 */ ILI9881C_COMMAND_INSTR(0xB1, 0x4C), /* VP8 */ ILI9881C_COMMAND_INSTR(0xB2, 0x5D), /* VP4 */ ILI9881C_COMMAND_INSTR(0xB3, 0x3F), /* VP0 */ ILI9881C_COMMAND_INSTR(0xC0, 0x00), /* VN255 GAMMA N */ ILI9881C_COMMAND_INSTR(0xC1, 0x13), /* VN251 */ ILI9881C_COMMAND_INSTR(0xC2, 0x23), /* VN247 */ ILI9881C_COMMAND_INSTR(0xC3, 0x14), /* VN243 */ ILI9881C_COMMAND_INSTR(0xC4, 0x16), /* VN239 */ ILI9881C_COMMAND_INSTR(0xC5, 0x29), /* VN231 */ ILI9881C_COMMAND_INSTR(0xC6, 0x1E), /* VN219 */ ILI9881C_COMMAND_INSTR(0xC7, 0x1D), /* VN203 */ ILI9881C_COMMAND_INSTR(0xC8, 0x86), /* VN175 */ ILI9881C_COMMAND_INSTR(0xC9, 0x1E), /* VN144 */ ILI9881C_COMMAND_INSTR(0xCA, 0x29), /* VN111 */ ILI9881C_COMMAND_INSTR(0xCB, 0x74), /* VN80 */ ILI9881C_COMMAND_INSTR(0xCC, 0x19), /* VN52 */ ILI9881C_COMMAND_INSTR(0xCD, 0x17), /* VN36 */ ILI9881C_COMMAND_INSTR(0xCE, 0x4B), /* VN24 */ ILI9881C_COMMAND_INSTR(0xCF, 0x20), /* VN16 */ ILI9881C_COMMAND_INSTR(0xD0, 0x26), /* VN12 */ ILI9881C_COMMAND_INSTR(0xD1, 0x4C), /* VN8 */ ILI9881C_COMMAND_INSTR(0xD2, 0x5D), /* VN4 */ ILI9881C_COMMAND_INSTR(0xD3, 0x3F), /* VN0 */ }; static const struct ili9881c_instr tl050hdv35_init[] = { ILI9881C_SWITCH_PAGE_INSTR(3), ILI9881C_COMMAND_INSTR(0x01, 0x00), ILI9881C_COMMAND_INSTR(0x02, 0x00), ILI9881C_COMMAND_INSTR(0x03, 0x73), ILI9881C_COMMAND_INSTR(0x04, 0x00), ILI9881C_COMMAND_INSTR(0x05, 0x00), ILI9881C_COMMAND_INSTR(0x06, 0x0a), ILI9881C_COMMAND_INSTR(0x07, 0x00), ILI9881C_COMMAND_INSTR(0x08, 0x00), ILI9881C_COMMAND_INSTR(0x09, 0x01), ILI9881C_COMMAND_INSTR(0x0a, 0x00), ILI9881C_COMMAND_INSTR(0x0b, 0x00), ILI9881C_COMMAND_INSTR(0x0c, 0x01), ILI9881C_COMMAND_INSTR(0x0d, 0x00), ILI9881C_COMMAND_INSTR(0x0e, 0x00), ILI9881C_COMMAND_INSTR(0x0f, 0x1d), ILI9881C_COMMAND_INSTR(0x10, 0x1d), ILI9881C_COMMAND_INSTR(0x15, 0x00), ILI9881C_COMMAND_INSTR(0x16, 0x00), ILI9881C_COMMAND_INSTR(0x17, 0x00), ILI9881C_COMMAND_INSTR(0x18, 0x00), ILI9881C_COMMAND_INSTR(0x19, 0x00), ILI9881C_COMMAND_INSTR(0x1a, 0x00), ILI9881C_COMMAND_INSTR(0x1b, 0x00), ILI9881C_COMMAND_INSTR(0x1c, 0x00), ILI9881C_COMMAND_INSTR(0x1d, 0x00), ILI9881C_COMMAND_INSTR(0x1e, 0x40), ILI9881C_COMMAND_INSTR(0x1f, 0x80), ILI9881C_COMMAND_INSTR(0x20, 0x06), ILI9881C_COMMAND_INSTR(0x21, 0x02), ILI9881C_COMMAND_INSTR(0x28, 0x33), ILI9881C_COMMAND_INSTR(0x29, 0x03), ILI9881C_COMMAND_INSTR(0x2a, 0x00), ILI9881C_COMMAND_INSTR(0x2b, 0x00), ILI9881C_COMMAND_INSTR(0x2c, 0x00), ILI9881C_COMMAND_INSTR(0x2d, 0x00), ILI9881C_COMMAND_INSTR(0x2e, 0x00), ILI9881C_COMMAND_INSTR(0x2f, 0x00), ILI9881C_COMMAND_INSTR(0x35, 0x00), ILI9881C_COMMAND_INSTR(0x36, 0x00), ILI9881C_COMMAND_INSTR(0x37, 0x00), ILI9881C_COMMAND_INSTR(0x38, 0x3C), ILI9881C_COMMAND_INSTR(0x39, 0x00), ILI9881C_COMMAND_INSTR(0x3a, 0x40), ILI9881C_COMMAND_INSTR(0x3b, 0x40), ILI9881C_COMMAND_INSTR(0x3c, 0x00), ILI9881C_COMMAND_INSTR(0x3d, 0x00), ILI9881C_COMMAND_INSTR(0x3e, 0x00), ILI9881C_COMMAND_INSTR(0x3f, 0x00), ILI9881C_COMMAND_INSTR(0x40, 0x00), ILI9881C_COMMAND_INSTR(0x41, 0x00), ILI9881C_COMMAND_INSTR(0x42, 0x00), ILI9881C_COMMAND_INSTR(0x43, 0x00), ILI9881C_COMMAND_INSTR(0x44, 0x00), ILI9881C_COMMAND_INSTR(0x55, 0xab), ILI9881C_COMMAND_INSTR(0x5a, 0x89), ILI9881C_COMMAND_INSTR(0x5b, 0xab), ILI9881C_COMMAND_INSTR(0x5c, 0xcd), ILI9881C_COMMAND_INSTR(0x5d, 0xef), ILI9881C_COMMAND_INSTR(0x5e, 0x11), ILI9881C_COMMAND_INSTR(0x5f, 0x01), ILI9881C_COMMAND_INSTR(0x60, 0x00), ILI9881C_COMMAND_INSTR(0x61, 0x15), ILI9881C_COMMAND_INSTR(0x62, 0x14), ILI9881C_COMMAND_INSTR(0x63, 0x0e), ILI9881C_COMMAND_INSTR(0x64, 0x0f), ILI9881C_COMMAND_INSTR(0x65, 0x0c), ILI9881C_COMMAND_INSTR(0x66, 0x0d), ILI9881C_COMMAND_INSTR(0x67, 0x06), ILI9881C_COMMAND_INSTR(0x68, 0x02), ILI9881C_COMMAND_INSTR(0x69, 0x07), ILI9881C_COMMAND_INSTR(0x6a, 0x02), ILI9881C_COMMAND_INSTR(0x6b, 0x02), ILI9881C_COMMAND_INSTR(0x6c, 0x02), ILI9881C_COMMAND_INSTR(0x6d, 0x02), ILI9881C_COMMAND_INSTR(0x6e, 0x02), ILI9881C_COMMAND_INSTR(0x6f, 0x02), ILI9881C_COMMAND_INSTR(0x70, 0x02), ILI9881C_COMMAND_INSTR(0x71, 0x02), ILI9881C_COMMAND_INSTR(0x72, 0x02), ILI9881C_COMMAND_INSTR(0x73, 0x02), ILI9881C_COMMAND_INSTR(0x74, 0x02), ILI9881C_COMMAND_INSTR(0x75, 0x01), ILI9881C_COMMAND_INSTR(0x76, 0x00), ILI9881C_COMMAND_INSTR(0x77, 0x14), ILI9881C_COMMAND_INSTR(0x78, 0x15), ILI9881C_COMMAND_INSTR(0x79, 0x0e), ILI9881C_COMMAND_INSTR(0x7a, 0x0f), ILI9881C_COMMAND_INSTR(0x7b, 0x0c), ILI9881C_COMMAND_INSTR(0x7c, 0x0d), ILI9881C_COMMAND_INSTR(0x7d, 0x06), ILI9881C_COMMAND_INSTR(0x7e, 0x02), ILI9881C_COMMAND_INSTR(0x7f, 0x07), ILI9881C_COMMAND_INSTR(0x88, 0x02), ILI9881C_COMMAND_INSTR(0x89, 0x02), ILI9881C_COMMAND_INSTR(0x8A, 0x02), ILI9881C_SWITCH_PAGE_INSTR(4), ILI9881C_COMMAND_INSTR(0x38, 0x01), ILI9881C_COMMAND_INSTR(0x39, 0x00), ILI9881C_COMMAND_INSTR(0x6c, 0x15), ILI9881C_COMMAND_INSTR(0x6e, 0x2b), ILI9881C_COMMAND_INSTR(0x6f, 0x33), ILI9881C_COMMAND_INSTR(0x8d, 0x18), ILI9881C_COMMAND_INSTR(0x87, 0xba), ILI9881C_COMMAND_INSTR(0x26, 0x76), ILI9881C_COMMAND_INSTR(0xb2, 0xd1), ILI9881C_COMMAND_INSTR(0xb5, 0x06), ILI9881C_COMMAND_INSTR(0x3a, 0x24), ILI9881C_COMMAND_INSTR(0x35, 0x1f), ILI9881C_COMMAND_INSTR(0x33, 0x14), ILI9881C_COMMAND_INSTR(0x3b, 0x98), ILI9881C_SWITCH_PAGE_INSTR(1), ILI9881C_COMMAND_INSTR(0x22, 0x0a), ILI9881C_COMMAND_INSTR(0x31, 0x00), ILI9881C_COMMAND_INSTR(0x40, 0x33), ILI9881C_COMMAND_INSTR(0x53, 0xa2), ILI9881C_COMMAND_INSTR(0x55, 0x92), ILI9881C_COMMAND_INSTR(0x50, 0x96), ILI9881C_COMMAND_INSTR(0x51, 0x96), ILI9881C_COMMAND_INSTR(0x60, 0x22), ILI9881C_COMMAND_INSTR(0x61, 0x00), ILI9881C_COMMAND_INSTR(0x62, 0x19), ILI9881C_COMMAND_INSTR(0x63, 0x00), ILI9881C_COMMAND_INSTR(0xa0, 0x08), ILI9881C_COMMAND_INSTR(0xa1, 0x11), ILI9881C_COMMAND_INSTR(0xa2, 0x19), ILI9881C_COMMAND_INSTR(0xa3, 0x0d), ILI9881C_COMMAND_INSTR(0xa4, 0x0d), ILI9881C_COMMAND_INSTR(0xa5, 0x1e), ILI9881C_COMMAND_INSTR(0xa6, 0x14), ILI9881C_COMMAND_INSTR(0xa7, 0x17), ILI9881C_COMMAND_INSTR(0xa8, 0x4f), ILI9881C_COMMAND_INSTR(0xa9, 0x1a), ILI9881C_COMMAND_INSTR(0xaa, 0x27), ILI9881C_COMMAND_INSTR(0xab, 0x49), ILI9881C_COMMAND_INSTR(0xac, 0x1a), ILI9881C_COMMAND_INSTR(0xad, 0x18), ILI9881C_COMMAND_INSTR(0xae, 0x4c), ILI9881C_COMMAND_INSTR(0xaf, 0x22), ILI9881C_COMMAND_INSTR(0xb0, 0x27), ILI9881C_COMMAND_INSTR(0xb1, 0x4b), ILI9881C_COMMAND_INSTR(0xb2, 0x60), ILI9881C_COMMAND_INSTR(0xb3, 0x39), ILI9881C_COMMAND_INSTR(0xc0, 0x08), ILI9881C_COMMAND_INSTR(0xc1, 0x11), ILI9881C_COMMAND_INSTR(0xc2, 0x19), ILI9881C_COMMAND_INSTR(0xc3, 0x0d), ILI9881C_COMMAND_INSTR(0xc4, 0x0d), ILI9881C_COMMAND_INSTR(0xc5, 0x1e), ILI9881C_COMMAND_INSTR(0xc6, 0x14), ILI9881C_COMMAND_INSTR(0xc7, 0x17), ILI9881C_COMMAND_INSTR(0xc8, 0x4f), ILI9881C_COMMAND_INSTR(0xc9, 0x1a), ILI9881C_COMMAND_INSTR(0xca, 0x27), ILI9881C_COMMAND_INSTR(0xcb, 0x49), ILI9881C_COMMAND_INSTR(0xcc, 0x1a), ILI9881C_COMMAND_INSTR(0xcd, 0x18), ILI9881C_COMMAND_INSTR(0xce, 0x4c), ILI9881C_COMMAND_INSTR(0xcf, 0x33), ILI9881C_COMMAND_INSTR(0xd0, 0x27), ILI9881C_COMMAND_INSTR(0xd1, 0x4b), ILI9881C_COMMAND_INSTR(0xd2, 0x60), ILI9881C_COMMAND_INSTR(0xd3, 0x39), ILI9881C_SWITCH_PAGE_INSTR(0), ILI9881C_COMMAND_INSTR(0x36, 0x03), }; static const struct ili9881c_instr w552946ab_init[] = { ILI9881C_SWITCH_PAGE_INSTR(3), ILI9881C_COMMAND_INSTR(0x01, 0x00), ILI9881C_COMMAND_INSTR(0x02, 0x00), ILI9881C_COMMAND_INSTR(0x03, 0x53), ILI9881C_COMMAND_INSTR(0x04, 0x53), ILI9881C_COMMAND_INSTR(0x05, 0x13), ILI9881C_COMMAND_INSTR(0x06, 0x04), ILI9881C_COMMAND_INSTR(0x07, 0x02), ILI9881C_COMMAND_INSTR(0x08, 0x02), ILI9881C_COMMAND_INSTR(0x09, 0x00), ILI9881C_COMMAND_INSTR(0x0A, 0x00), ILI9881C_COMMAND_INSTR(0x0B, 0x00), ILI9881C_COMMAND_INSTR(0x0C, 0x00), ILI9881C_COMMAND_INSTR(0x0D, 0x00), ILI9881C_COMMAND_INSTR(0x0E, 0x00), ILI9881C_COMMAND_INSTR(0x0F, 0x00), ILI9881C_COMMAND_INSTR(0x10, 0x00), ILI9881C_COMMAND_INSTR(0x11, 0x00), ILI9881C_COMMAND_INSTR(0x12, 0x00), ILI9881C_COMMAND_INSTR(0x13, 0x00), ILI9881C_COMMAND_INSTR(0x14, 0x00), ILI9881C_COMMAND_INSTR(0x15, 0x08), ILI9881C_COMMAND_INSTR(0x16, 0x10), ILI9881C_COMMAND_INSTR(0x17, 0x00), ILI9881C_COMMAND_INSTR(0x18, 0x08), ILI9881C_COMMAND_INSTR(0x19, 0x00), ILI9881C_COMMAND_INSTR(0x1A, 0x00), ILI9881C_COMMAND_INSTR(0x1B, 0x00), ILI9881C_COMMAND_INSTR(0x1C, 0x00), ILI9881C_COMMAND_INSTR(0x1D, 0x00), ILI9881C_COMMAND_INSTR(0x1E, 0xC0), ILI9881C_COMMAND_INSTR(0x1F, 0x80), ILI9881C_COMMAND_INSTR(0x20, 0x02), ILI9881C_COMMAND_INSTR(0x21, 0x09), ILI9881C_COMMAND_INSTR(0x22, 0x00), ILI9881C_COMMAND_INSTR(0x23, 0x00), ILI9881C_COMMAND_INSTR(0x24, 0x00), ILI9881C_COMMAND_INSTR(0x25, 0x00), ILI9881C_COMMAND_INSTR(0x26, 0x00), ILI9881C_COMMAND_INSTR(0x27, 0x00), ILI9881C_COMMAND_INSTR(0x28, 0x55), ILI9881C_COMMAND_INSTR(0x29, 0x03), ILI9881C_COMMAND_INSTR(0x2A, 0x00), ILI9881C_COMMAND_INSTR(0x2B, 0x00), ILI9881C_COMMAND_INSTR(0x2C, 0x00), ILI9881C_COMMAND_INSTR(0x2D, 0x00), ILI9881C_COMMAND_INSTR(0x2E, 0x00), ILI9881C_COMMAND_INSTR(0x2F, 0x00), ILI9881C_COMMAND_INSTR(0x30, 0x00), ILI9881C_COMMAND_INSTR(0x31, 0x00), ILI9881C_COMMAND_INSTR(0x32, 0x00), ILI9881C_COMMAND_INSTR(0x33, 0x00), ILI9881C_COMMAND_INSTR(0x34, 0x04), ILI9881C_COMMAND_INSTR(0x35, 0x05), ILI9881C_COMMAND_INSTR(0x36, 0x05), ILI9881C_COMMAND_INSTR(0x37, 0x00), ILI9881C_COMMAND_INSTR(0x38, 0x3C), ILI9881C_COMMAND_INSTR(0x39, 0x35), ILI9881C_COMMAND_INSTR(0x3A, 0x00), ILI9881C_COMMAND_INSTR(0x3B, 0x40), ILI9881C_COMMAND_INSTR(0x3C, 0x00), ILI9881C_COMMAND_INSTR(0x3D, 0x00), ILI9881C_COMMAND_INSTR(0x3E, 0x00), ILI9881C_COMMAND_INSTR(0x3F, 0x00), ILI9881C_COMMAND_INSTR(0x40, 0x00), ILI9881C_COMMAND_INSTR(0x41, 0x88), ILI9881C_COMMAND_INSTR(0x42, 0x00), ILI9881C_COMMAND_INSTR(0x43, 0x00), ILI9881C_COMMAND_INSTR(0x44, 0x1F), ILI9881C_COMMAND_INSTR(0x50, 0x01), ILI9881C_COMMAND_INSTR(0x51, 0x23), ILI9881C_COMMAND_INSTR(0x52, 0x45), ILI9881C_COMMAND_INSTR(0x53, 0x67), ILI9881C_COMMAND_INSTR(0x54, 0x89), ILI9881C_COMMAND_INSTR(0x55, 0xaB), ILI9881C_COMMAND_INSTR(0x56, 0x01), ILI9881C_COMMAND_INSTR(0x57, 0x23), ILI9881C_COMMAND_INSTR(0x58, 0x45), ILI9881C_COMMAND_INSTR(0x59, 0x67), ILI9881C_COMMAND_INSTR(0x5A, 0x89), ILI9881C_COMMAND_INSTR(0x5B, 0xAB), ILI9881C_COMMAND_INSTR(0x5C, 0xCD), ILI9881C_COMMAND_INSTR(0x5D, 0xEF), ILI9881C_COMMAND_INSTR(0x5E, 0x03), ILI9881C_COMMAND_INSTR(0x5F, 0x14), ILI9881C_COMMAND_INSTR(0x60, 0x15), ILI9881C_COMMAND_INSTR(0x61, 0x0C), ILI9881C_COMMAND_INSTR(0x62, 0x0D), ILI9881C_COMMAND_INSTR(0x63, 0x0E), ILI9881C_COMMAND_INSTR(0x64, 0x0F), ILI9881C_COMMAND_INSTR(0x65, 0x10), ILI9881C_COMMAND_INSTR(0x66, 0x11), ILI9881C_COMMAND_INSTR(0x67, 0x08), ILI9881C_COMMAND_INSTR(0x68, 0x02), ILI9881C_COMMAND_INSTR(0x69, 0x0A), ILI9881C_COMMAND_INSTR(0x6A, 0x02), ILI9881C_COMMAND_INSTR(0x6B, 0x02), ILI9881C_COMMAND_INSTR(0x6C, 0x02), ILI9881C_COMMAND_INSTR(0x6D, 0x02), ILI9881C_COMMAND_INSTR(0x6E, 0x02), ILI9881C_COMMAND_INSTR(0x6F, 0x02), ILI9881C_COMMAND_INSTR(0x70, 0x02), ILI9881C_COMMAND_INSTR(0x71, 0x02), ILI9881C_COMMAND_INSTR(0x72, 0x06), ILI9881C_COMMAND_INSTR(0x73, 0x02), ILI9881C_COMMAND_INSTR(0x74, 0x02), ILI9881C_COMMAND_INSTR(0x75, 0x14), ILI9881C_COMMAND_INSTR(0x76, 0x15), ILI9881C_COMMAND_INSTR(0x77, 0x0F), ILI9881C_COMMAND_INSTR(0x78, 0x0E), ILI9881C_COMMAND_INSTR(0x79, 0x0D), ILI9881C_COMMAND_INSTR(0x7A, 0x0C), ILI9881C_COMMAND_INSTR(0x7B, 0x11), ILI9881C_COMMAND_INSTR(0x7C, 0x10), ILI9881C_COMMAND_INSTR(0x7D, 0x06), ILI9881C_COMMAND_INSTR(0x7E, 0x02), ILI9881C_COMMAND_INSTR(0x7F, 0x0A), ILI9881C_COMMAND_INSTR(0x80, 0x02), ILI9881C_COMMAND_INSTR(0x81, 0x02), ILI9881C_COMMAND_INSTR(0x82, 0x02), ILI9881C_COMMAND_INSTR(0x83, 0x02), ILI9881C_COMMAND_INSTR(0x84, 0x02), ILI9881C_COMMAND_INSTR(0x85, 0x02), ILI9881C_COMMAND_INSTR(0x86, 0x02), ILI9881C_COMMAND_INSTR(0x87, 0x02), ILI9881C_COMMAND_INSTR(0x88, 0x08), ILI9881C_COMMAND_INSTR(0x89, 0x02), ILI9881C_COMMAND_INSTR(0x8A, 0x02), ILI9881C_SWITCH_PAGE_INSTR(4), ILI9881C_COMMAND_INSTR(0x00, 0x80), ILI9881C_COMMAND_INSTR(0x70, 0x00), ILI9881C_COMMAND_INSTR(0x71, 0x00), ILI9881C_COMMAND_INSTR(0x66, 0xFE), ILI9881C_COMMAND_INSTR(0x82, 0x15), ILI9881C_COMMAND_INSTR(0x84, 0x15), ILI9881C_COMMAND_INSTR(0x85, 0x15), ILI9881C_COMMAND_INSTR(0x3a, 0x24), ILI9881C_COMMAND_INSTR(0x32, 0xAC), ILI9881C_COMMAND_INSTR(0x8C, 0x80), ILI9881C_COMMAND_INSTR(0x3C, 0xF5), ILI9881C_COMMAND_INSTR(0x88, 0x33), ILI9881C_SWITCH_PAGE_INSTR(1), ILI9881C_COMMAND_INSTR(0x22, 0x0A), ILI9881C_COMMAND_INSTR(0x31, 0x00), ILI9881C_COMMAND_INSTR(0x53, 0x78), ILI9881C_COMMAND_INSTR(0x50, 0x5B), ILI9881C_COMMAND_INSTR(0x51, 0x5B), ILI9881C_COMMAND_INSTR(0x60, 0x20), ILI9881C_COMMAND_INSTR(0x61, 0x00), ILI9881C_COMMAND_INSTR(0x62, 0x0D), ILI9881C_COMMAND_INSTR(0x63, 0x00), ILI9881C_COMMAND_INSTR(0xA0, 0x00), ILI9881C_COMMAND_INSTR(0xA1, 0x10), ILI9881C_COMMAND_INSTR(0xA2, 0x1C), ILI9881C_COMMAND_INSTR(0xA3, 0x13), ILI9881C_COMMAND_INSTR(0xA4, 0x15), ILI9881C_COMMAND_INSTR(0xA5, 0x26), ILI9881C_COMMAND_INSTR(0xA6, 0x1A), ILI9881C_COMMAND_INSTR(0xA7, 0x1D), ILI9881C_COMMAND_INSTR(0xA8, 0x67), ILI9881C_COMMAND_INSTR(0xA9, 0x1C), ILI9881C_COMMAND_INSTR(0xAA, 0x29), ILI9881C_COMMAND_INSTR(0xAB, 0x5B), ILI9881C_COMMAND_INSTR(0xAC, 0x26), ILI9881C_COMMAND_INSTR(0xAD, 0x28), ILI9881C_COMMAND_INSTR(0xAE, 0x5C), ILI9881C_COMMAND_INSTR(0xAF, 0x30), ILI9881C_COMMAND_INSTR(0xB0, 0x31), ILI9881C_COMMAND_INSTR(0xB1, 0x2E), ILI9881C_COMMAND_INSTR(0xB2, 0x32), ILI9881C_COMMAND_INSTR(0xB3, 0x00), ILI9881C_COMMAND_INSTR(0xC0, 0x00), ILI9881C_COMMAND_INSTR(0xC1, 0x10), ILI9881C_COMMAND_INSTR(0xC2, 0x1C), ILI9881C_COMMAND_INSTR(0xC3, 0x13), ILI9881C_COMMAND_INSTR(0xC4, 0x15), ILI9881C_COMMAND_INSTR(0xC5, 0x26), ILI9881C_COMMAND_INSTR(0xC6, 0x1A), ILI9881C_COMMAND_INSTR(0xC7, 0x1D), ILI9881C_COMMAND_INSTR(0xC8, 0x67), ILI9881C_COMMAND_INSTR(0xC9, 0x1C), ILI9881C_COMMAND_INSTR(0xCA, 0x29), ILI9881C_COMMAND_INSTR(0xCB, 0x5B), ILI9881C_COMMAND_INSTR(0xCC, 0x26), ILI9881C_COMMAND_INSTR(0xCD, 0x28), ILI9881C_COMMAND_INSTR(0xCE, 0x5C), ILI9881C_COMMAND_INSTR(0xCF, 0x30), ILI9881C_COMMAND_INSTR(0xD0, 0x31), ILI9881C_COMMAND_INSTR(0xD1, 0x2E), ILI9881C_COMMAND_INSTR(0xD2, 0x32), ILI9881C_COMMAND_INSTR(0xD3, 0x00), ILI9881C_SWITCH_PAGE_INSTR(0), }; static inline struct ili9881c *panel_to_ili9881c(struct drm_panel *panel) { return container_of(panel, struct ili9881c, panel); } /* * The panel seems to accept some private DCS commands that map * directly to registers. * * It is organised by page, with each page having its own set of * registers, and the first page looks like it's holding the standard * DCS commands. * * So before any attempt at sending a command or data, we have to be * sure if we're in the right page or not. */ static int ili9881c_switch_page(struct ili9881c *ctx, u8 page) { u8 buf[4] = { 0xff, 0x98, 0x81, page }; int ret; ret = mipi_dsi_dcs_write_buffer(ctx->dsi, buf, sizeof(buf)); if (ret < 0) return ret; return 0; } static int ili9881c_send_cmd_data(struct ili9881c *ctx, u8 cmd, u8 data) { u8 buf[2] = { cmd, data }; int ret; ret = mipi_dsi_dcs_write_buffer(ctx->dsi, buf, sizeof(buf)); if (ret < 0) return ret; return 0; } static int ili9881c_prepare(struct drm_panel *panel) { struct ili9881c *ctx = panel_to_ili9881c(panel); unsigned int i; int ret; /* Power the panel */ ret = regulator_enable(ctx->power); if (ret) return ret; msleep(5); /* And reset it */ gpiod_set_value(ctx->reset, 1); msleep(20); gpiod_set_value(ctx->reset, 0); msleep(20); for (i = 0; i < ctx->desc->init_length; i++) { const struct ili9881c_instr *instr = &ctx->desc->init[i]; if (instr->op == ILI9881C_SWITCH_PAGE) ret = ili9881c_switch_page(ctx, instr->arg.page); else if (instr->op == ILI9881C_COMMAND) ret = ili9881c_send_cmd_data(ctx, instr->arg.cmd.cmd, instr->arg.cmd.data); if (ret) return ret; } ret = ili9881c_switch_page(ctx, 0); if (ret) return ret; ret = mipi_dsi_dcs_set_tear_on(ctx->dsi, MIPI_DSI_DCS_TEAR_MODE_VBLANK); if (ret) return ret; ret = mipi_dsi_dcs_exit_sleep_mode(ctx->dsi); if (ret) return ret; return 0; } static int ili9881c_enable(struct drm_panel *panel) { struct ili9881c *ctx = panel_to_ili9881c(panel); msleep(120); mipi_dsi_dcs_set_display_on(ctx->dsi); return 0; } static int ili9881c_disable(struct drm_panel *panel) { struct ili9881c *ctx = panel_to_ili9881c(panel); return mipi_dsi_dcs_set_display_off(ctx->dsi); } static int ili9881c_unprepare(struct drm_panel *panel) { struct ili9881c *ctx = panel_to_ili9881c(panel); mipi_dsi_dcs_enter_sleep_mode(ctx->dsi); regulator_disable(ctx->power); gpiod_set_value(ctx->reset, 1); return 0; } static const struct drm_display_mode lhr050h41_default_mode = { .clock = 62000, .hdisplay = 720, .hsync_start = 720 + 10, .hsync_end = 720 + 10 + 20, .htotal = 720 + 10 + 20 + 30, .vdisplay = 1280, .vsync_start = 1280 + 10, .vsync_end = 1280 + 10 + 10, .vtotal = 1280 + 10 + 10 + 20, .width_mm = 62, .height_mm = 110, }; static const struct drm_display_mode k101_im2byl02_default_mode = { .clock = 69700, .hdisplay = 800, .hsync_start = 800 + 52, .hsync_end = 800 + 52 + 8, .htotal = 800 + 52 + 8 + 48, .vdisplay = 1280, .vsync_start = 1280 + 16, .vsync_end = 1280 + 16 + 6, .vtotal = 1280 + 16 + 6 + 15, .width_mm = 135, .height_mm = 217, }; static const struct drm_display_mode tl050hdv35_default_mode = { .clock = 59400, .hdisplay = 720, .hsync_start = 720 + 18, .hsync_end = 720 + 18 + 3, .htotal = 720 + 18 + 3 + 20, .vdisplay = 1280, .vsync_start = 1280 + 26, .vsync_end = 1280 + 26 + 6, .vtotal = 1280 + 26 + 6 + 28, .width_mm = 62, .height_mm = 110, }; static const struct drm_display_mode w552946aba_default_mode = { .clock = 64000, .hdisplay = 720, .hsync_start = 720 + 40, .hsync_end = 720 + 40 + 10, .htotal = 720 + 40 + 10 + 40, .vdisplay = 1280, .vsync_start = 1280 + 22, .vsync_end = 1280 + 22 + 4, .vtotal = 1280 + 22 + 4 + 11, .width_mm = 68, .height_mm = 121, }; static int ili9881c_get_modes(struct drm_panel *panel, struct drm_connector *connector) { struct ili9881c *ctx = panel_to_ili9881c(panel); struct drm_display_mode *mode; mode = drm_mode_duplicate(connector->dev, ctx->desc->mode); if (!mode) { dev_err(&ctx->dsi->dev, "failed to add mode %ux%ux@%u\n", ctx->desc->mode->hdisplay, ctx->desc->mode->vdisplay, drm_mode_vrefresh(ctx->desc->mode)); return -ENOMEM; } drm_mode_set_name(mode); mode->type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED; drm_mode_probed_add(connector, mode); connector->display_info.width_mm = mode->width_mm; connector->display_info.height_mm = mode->height_mm; /* * TODO: Remove once all drm drivers call * drm_connector_set_orientation_from_panel() */ drm_connector_set_panel_orientation(connector, ctx->orientation); return 1; } static enum drm_panel_orientation ili9881c_get_orientation(struct drm_panel *panel) { struct ili9881c *ctx = panel_to_ili9881c(panel); return ctx->orientation; } static const struct drm_panel_funcs ili9881c_funcs = { .prepare = ili9881c_prepare, .unprepare = ili9881c_unprepare, .enable = ili9881c_enable, .disable = ili9881c_disable, .get_modes = ili9881c_get_modes, .get_orientation = ili9881c_get_orientation, }; static int ili9881c_dsi_probe(struct mipi_dsi_device *dsi) { struct ili9881c *ctx; int ret; ctx = devm_kzalloc(&dsi->dev, sizeof(*ctx), GFP_KERNEL); if (!ctx) return -ENOMEM; mipi_dsi_set_drvdata(dsi, ctx); ctx->dsi = dsi; ctx->desc = of_device_get_match_data(&dsi->dev); drm_panel_init(&ctx->panel, &dsi->dev, &ili9881c_funcs, DRM_MODE_CONNECTOR_DSI); ctx->power = devm_regulator_get(&dsi->dev, "power"); if (IS_ERR(ctx->power)) return dev_err_probe(&dsi->dev, PTR_ERR(ctx->power), "Couldn't get our power regulator\n"); ctx->reset = devm_gpiod_get_optional(&dsi->dev, "reset", GPIOD_OUT_LOW); if (IS_ERR(ctx->reset)) return dev_err_probe(&dsi->dev, PTR_ERR(ctx->reset), "Couldn't get our reset GPIO\n"); ret = of_drm_get_panel_orientation(dsi->dev.of_node, &ctx->orientation); if (ret) { dev_err(&dsi->dev, "%pOF: failed to get orientation: %d\n", dsi->dev.of_node, ret); return ret; } ret = drm_panel_of_backlight(&ctx->panel); if (ret) return ret; drm_panel_add(&ctx->panel); dsi->mode_flags = ctx->desc->mode_flags; dsi->format = MIPI_DSI_FMT_RGB888; dsi->lanes = 4; return mipi_dsi_attach(dsi); } static void ili9881c_dsi_remove(struct mipi_dsi_device *dsi) { struct ili9881c *ctx = mipi_dsi_get_drvdata(dsi); mipi_dsi_detach(dsi); drm_panel_remove(&ctx->panel); } static const struct ili9881c_desc lhr050h41_desc = { .init = lhr050h41_init, .init_length = ARRAY_SIZE(lhr050h41_init), .mode = &lhr050h41_default_mode, .mode_flags = MIPI_DSI_MODE_VIDEO_SYNC_PULSE, }; static const struct ili9881c_desc k101_im2byl02_desc = { .init = k101_im2byl02_init, .init_length = ARRAY_SIZE(k101_im2byl02_init), .mode = &k101_im2byl02_default_mode, .mode_flags = MIPI_DSI_MODE_VIDEO_SYNC_PULSE, }; static const struct ili9881c_desc tl050hdv35_desc = { .init = tl050hdv35_init, .init_length = ARRAY_SIZE(tl050hdv35_init), .mode = &tl050hdv35_default_mode, .mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_SYNC_PULSE | MIPI_DSI_MODE_LPM, }; static const struct ili9881c_desc w552946aba_desc = { .init = w552946ab_init, .init_length = ARRAY_SIZE(w552946ab_init), .mode = &w552946aba_default_mode, .mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_BURST | MIPI_DSI_MODE_LPM | MIPI_DSI_MODE_NO_EOT_PACKET, }; static const struct of_device_id ili9881c_of_match[] = { { .compatible = "bananapi,lhr050h41", .data = &lhr050h41_desc }, { .compatible = "feixin,k101-im2byl02", .data = &k101_im2byl02_desc }, { .compatible = "tdo,tl050hdv35", .data = &tl050hdv35_desc }, { .compatible = "wanchanglong,w552946aba", .data = &w552946aba_desc }, { } }; MODULE_DEVICE_TABLE(of, ili9881c_of_match); static struct mipi_dsi_driver ili9881c_dsi_driver = { .probe = ili9881c_dsi_probe, .remove = ili9881c_dsi_remove, .driver = { .name = "ili9881c-dsi", .of_match_table = ili9881c_of_match, }, }; module_mipi_dsi_driver(ili9881c_dsi_driver); MODULE_AUTHOR("Maxime Ripard <[email protected]>"); MODULE_DESCRIPTION("Ilitek ILI9881C Controller Driver"); MODULE_LICENSE("GPL v2");
linux-master
drivers/gpu/drm/panel/panel-ilitek-ili9881c.c
// SPDX-License-Identifier: GPL-2.0-only /* * Copyright (C) 2016 InforceComputing * Author: Vinay Simha BN <[email protected]> * * Copyright (C) 2016 Linaro Ltd * Author: Sumit Semwal <[email protected]> * * From internet archives, the panel for Nexus 7 2nd Gen, 2013 model is a * JDI model LT070ME05000, and its data sheet is at: * http://panelone.net/en/7-0-inch/JDI_LT070ME05000_7.0_inch-datasheet */ #include <linux/backlight.h> #include <linux/delay.h> #include <linux/gpio/consumer.h> #include <linux/module.h> #include <linux/of.h> #include <linux/regulator/consumer.h> #include <video/mipi_display.h> #include <drm/drm_crtc.h> #include <drm/drm_mipi_dsi.h> #include <drm/drm_modes.h> #include <drm/drm_panel.h> static const char * const regulator_names[] = { "vddp", "iovcc" }; struct jdi_panel { struct drm_panel base; struct mipi_dsi_device *dsi; struct regulator_bulk_data supplies[ARRAY_SIZE(regulator_names)]; struct gpio_desc *enable_gpio; struct gpio_desc *reset_gpio; struct gpio_desc *dcdc_en_gpio; struct backlight_device *backlight; bool prepared; bool enabled; const struct drm_display_mode *mode; }; static inline struct jdi_panel *to_jdi_panel(struct drm_panel *panel) { return container_of(panel, struct jdi_panel, base); } static int jdi_panel_init(struct jdi_panel *jdi) { struct mipi_dsi_device *dsi = jdi->dsi; struct device *dev = &jdi->dsi->dev; int ret; dsi->mode_flags |= MIPI_DSI_MODE_LPM; ret = mipi_dsi_dcs_soft_reset(dsi); if (ret < 0) return ret; usleep_range(10000, 20000); ret = mipi_dsi_dcs_set_pixel_format(dsi, MIPI_DCS_PIXEL_FMT_24BIT << 4); if (ret < 0) { dev_err(dev, "failed to set pixel format: %d\n", ret); return ret; } ret = mipi_dsi_dcs_set_column_address(dsi, 0, jdi->mode->hdisplay - 1); if (ret < 0) { dev_err(dev, "failed to set column address: %d\n", ret); return ret; } ret = mipi_dsi_dcs_set_page_address(dsi, 0, jdi->mode->vdisplay - 1); if (ret < 0) { dev_err(dev, "failed to set page address: %d\n", ret); return ret; } /* * BIT(5) BCTRL = 1 Backlight Control Block On, Brightness registers * are active * BIT(3) BL = 1 Backlight Control On * BIT(2) DD = 0 Display Dimming is Off */ ret = mipi_dsi_dcs_write(dsi, MIPI_DCS_WRITE_CONTROL_DISPLAY, (u8[]){ 0x24 }, 1); if (ret < 0) { dev_err(dev, "failed to write control display: %d\n", ret); return ret; } /* CABC off */ ret = mipi_dsi_dcs_write(dsi, MIPI_DCS_WRITE_POWER_SAVE, (u8[]){ 0x00 }, 1); if (ret < 0) { dev_err(dev, "failed to set cabc off: %d\n", ret); return ret; } ret = mipi_dsi_dcs_exit_sleep_mode(dsi); if (ret < 0) { dev_err(dev, "failed to set exit sleep mode: %d\n", ret); return ret; } msleep(120); ret = mipi_dsi_generic_write(dsi, (u8[]){0xB0, 0x00}, 2); if (ret < 0) { dev_err(dev, "failed to set mcap: %d\n", ret); return ret; } mdelay(10); /* Interface setting, video mode */ ret = mipi_dsi_generic_write(dsi, (u8[]) {0xB3, 0x26, 0x08, 0x00, 0x20, 0x00}, 6); if (ret < 0) { dev_err(dev, "failed to set display interface setting: %d\n" , ret); return ret; } mdelay(20); ret = mipi_dsi_generic_write(dsi, (u8[]){0xB0, 0x03}, 2); if (ret < 0) { dev_err(dev, "failed to set default values for mcap: %d\n" , ret); return ret; } return 0; } static int jdi_panel_on(struct jdi_panel *jdi) { struct mipi_dsi_device *dsi = jdi->dsi; struct device *dev = &jdi->dsi->dev; int ret; dsi->mode_flags |= MIPI_DSI_MODE_LPM; ret = mipi_dsi_dcs_set_display_on(dsi); if (ret < 0) dev_err(dev, "failed to set display on: %d\n", ret); return ret; } static void jdi_panel_off(struct jdi_panel *jdi) { struct mipi_dsi_device *dsi = jdi->dsi; struct device *dev = &jdi->dsi->dev; int ret; dsi->mode_flags &= ~MIPI_DSI_MODE_LPM; ret = mipi_dsi_dcs_set_display_off(dsi); if (ret < 0) dev_err(dev, "failed to set display off: %d\n", ret); ret = mipi_dsi_dcs_enter_sleep_mode(dsi); if (ret < 0) dev_err(dev, "failed to enter sleep mode: %d\n", ret); msleep(100); } static int jdi_panel_disable(struct drm_panel *panel) { struct jdi_panel *jdi = to_jdi_panel(panel); if (!jdi->enabled) return 0; backlight_disable(jdi->backlight); jdi->enabled = false; return 0; } static int jdi_panel_unprepare(struct drm_panel *panel) { struct jdi_panel *jdi = to_jdi_panel(panel); struct device *dev = &jdi->dsi->dev; int ret; if (!jdi->prepared) return 0; jdi_panel_off(jdi); ret = regulator_bulk_disable(ARRAY_SIZE(jdi->supplies), jdi->supplies); if (ret < 0) dev_err(dev, "regulator disable failed, %d\n", ret); gpiod_set_value(jdi->enable_gpio, 0); gpiod_set_value(jdi->reset_gpio, 1); gpiod_set_value(jdi->dcdc_en_gpio, 0); jdi->prepared = false; return 0; } static int jdi_panel_prepare(struct drm_panel *panel) { struct jdi_panel *jdi = to_jdi_panel(panel); struct device *dev = &jdi->dsi->dev; int ret; if (jdi->prepared) return 0; ret = regulator_bulk_enable(ARRAY_SIZE(jdi->supplies), jdi->supplies); if (ret < 0) { dev_err(dev, "regulator enable failed, %d\n", ret); return ret; } msleep(20); gpiod_set_value(jdi->dcdc_en_gpio, 1); usleep_range(10, 20); gpiod_set_value(jdi->reset_gpio, 0); usleep_range(10, 20); gpiod_set_value(jdi->enable_gpio, 1); usleep_range(10, 20); ret = jdi_panel_init(jdi); if (ret < 0) { dev_err(dev, "failed to init panel: %d\n", ret); goto poweroff; } ret = jdi_panel_on(jdi); if (ret < 0) { dev_err(dev, "failed to set panel on: %d\n", ret); goto poweroff; } jdi->prepared = true; return 0; poweroff: ret = regulator_bulk_disable(ARRAY_SIZE(jdi->supplies), jdi->supplies); if (ret < 0) dev_err(dev, "regulator disable failed, %d\n", ret); gpiod_set_value(jdi->enable_gpio, 0); gpiod_set_value(jdi->reset_gpio, 1); gpiod_set_value(jdi->dcdc_en_gpio, 0); return ret; } static int jdi_panel_enable(struct drm_panel *panel) { struct jdi_panel *jdi = to_jdi_panel(panel); if (jdi->enabled) return 0; backlight_enable(jdi->backlight); jdi->enabled = true; return 0; } static const struct drm_display_mode default_mode = { .clock = 155493, .hdisplay = 1200, .hsync_start = 1200 + 48, .hsync_end = 1200 + 48 + 32, .htotal = 1200 + 48 + 32 + 60, .vdisplay = 1920, .vsync_start = 1920 + 3, .vsync_end = 1920 + 3 + 5, .vtotal = 1920 + 3 + 5 + 6, .flags = 0, }; static int jdi_panel_get_modes(struct drm_panel *panel, struct drm_connector *connector) { struct drm_display_mode *mode; struct jdi_panel *jdi = to_jdi_panel(panel); struct device *dev = &jdi->dsi->dev; mode = drm_mode_duplicate(connector->dev, &default_mode); if (!mode) { dev_err(dev, "failed to add mode %ux%ux@%u\n", default_mode.hdisplay, default_mode.vdisplay, drm_mode_vrefresh(&default_mode)); return -ENOMEM; } drm_mode_set_name(mode); drm_mode_probed_add(connector, mode); connector->display_info.width_mm = 95; connector->display_info.height_mm = 151; return 1; } static int dsi_dcs_bl_get_brightness(struct backlight_device *bl) { struct mipi_dsi_device *dsi = bl_get_data(bl); int ret; u16 brightness = bl->props.brightness; dsi->mode_flags &= ~MIPI_DSI_MODE_LPM; ret = mipi_dsi_dcs_get_display_brightness(dsi, &brightness); if (ret < 0) return ret; dsi->mode_flags |= MIPI_DSI_MODE_LPM; return brightness & 0xff; } static int dsi_dcs_bl_update_status(struct backlight_device *bl) { struct mipi_dsi_device *dsi = bl_get_data(bl); int ret; dsi->mode_flags &= ~MIPI_DSI_MODE_LPM; ret = mipi_dsi_dcs_set_display_brightness(dsi, bl->props.brightness); if (ret < 0) return ret; dsi->mode_flags |= MIPI_DSI_MODE_LPM; return 0; } static const struct backlight_ops dsi_bl_ops = { .update_status = dsi_dcs_bl_update_status, .get_brightness = dsi_dcs_bl_get_brightness, }; static struct backlight_device * drm_panel_create_dsi_backlight(struct mipi_dsi_device *dsi) { struct device *dev = &dsi->dev; struct backlight_properties props; memset(&props, 0, sizeof(props)); props.type = BACKLIGHT_RAW; props.brightness = 255; props.max_brightness = 255; return devm_backlight_device_register(dev, dev_name(dev), dev, dsi, &dsi_bl_ops, &props); } static const struct drm_panel_funcs jdi_panel_funcs = { .disable = jdi_panel_disable, .unprepare = jdi_panel_unprepare, .prepare = jdi_panel_prepare, .enable = jdi_panel_enable, .get_modes = jdi_panel_get_modes, }; static const struct of_device_id jdi_of_match[] = { { .compatible = "jdi,lt070me05000", }, { } }; MODULE_DEVICE_TABLE(of, jdi_of_match); static int jdi_panel_add(struct jdi_panel *jdi) { struct device *dev = &jdi->dsi->dev; int ret; unsigned int i; jdi->mode = &default_mode; for (i = 0; i < ARRAY_SIZE(jdi->supplies); i++) jdi->supplies[i].supply = regulator_names[i]; ret = devm_regulator_bulk_get(dev, ARRAY_SIZE(jdi->supplies), jdi->supplies); if (ret < 0) return dev_err_probe(dev, ret, "failed to init regulator, ret=%d\n", ret); jdi->enable_gpio = devm_gpiod_get(dev, "enable", GPIOD_OUT_LOW); if (IS_ERR(jdi->enable_gpio)) { return dev_err_probe(dev, PTR_ERR(jdi->enable_gpio), "cannot get enable-gpio %d\n", ret); } jdi->reset_gpio = devm_gpiod_get(dev, "reset", GPIOD_OUT_HIGH); if (IS_ERR(jdi->reset_gpio)) return dev_err_probe(dev, PTR_ERR(jdi->reset_gpio), "cannot get reset-gpios %d\n", ret); jdi->dcdc_en_gpio = devm_gpiod_get(dev, "dcdc-en", GPIOD_OUT_LOW); if (IS_ERR(jdi->dcdc_en_gpio)) return dev_err_probe(dev, PTR_ERR(jdi->dcdc_en_gpio), "cannot get dcdc-en-gpio %d\n", ret); jdi->backlight = drm_panel_create_dsi_backlight(jdi->dsi); if (IS_ERR(jdi->backlight)) return dev_err_probe(dev, PTR_ERR(jdi->backlight), "failed to register backlight %d\n", ret); drm_panel_init(&jdi->base, &jdi->dsi->dev, &jdi_panel_funcs, DRM_MODE_CONNECTOR_DSI); drm_panel_add(&jdi->base); return 0; } static void jdi_panel_del(struct jdi_panel *jdi) { if (jdi->base.dev) drm_panel_remove(&jdi->base); } static int jdi_panel_probe(struct mipi_dsi_device *dsi) { struct jdi_panel *jdi; int ret; dsi->lanes = 4; dsi->format = MIPI_DSI_FMT_RGB888; dsi->mode_flags = MIPI_DSI_MODE_VIDEO_HSE | MIPI_DSI_MODE_VIDEO | MIPI_DSI_CLOCK_NON_CONTINUOUS; jdi = devm_kzalloc(&dsi->dev, sizeof(*jdi), GFP_KERNEL); if (!jdi) return -ENOMEM; mipi_dsi_set_drvdata(dsi, jdi); jdi->dsi = dsi; ret = jdi_panel_add(jdi); if (ret < 0) return ret; ret = mipi_dsi_attach(dsi); if (ret < 0) { jdi_panel_del(jdi); return ret; } return 0; } static void jdi_panel_remove(struct mipi_dsi_device *dsi) { struct jdi_panel *jdi = mipi_dsi_get_drvdata(dsi); int ret; ret = jdi_panel_disable(&jdi->base); if (ret < 0) dev_err(&dsi->dev, "failed to disable panel: %d\n", ret); ret = mipi_dsi_detach(dsi); if (ret < 0) dev_err(&dsi->dev, "failed to detach from DSI host: %d\n", ret); jdi_panel_del(jdi); } static void jdi_panel_shutdown(struct mipi_dsi_device *dsi) { struct jdi_panel *jdi = mipi_dsi_get_drvdata(dsi); jdi_panel_disable(&jdi->base); } static struct mipi_dsi_driver jdi_panel_driver = { .driver = { .name = "panel-jdi-lt070me05000", .of_match_table = jdi_of_match, }, .probe = jdi_panel_probe, .remove = jdi_panel_remove, .shutdown = jdi_panel_shutdown, }; module_mipi_dsi_driver(jdi_panel_driver); MODULE_AUTHOR("Sumit Semwal <[email protected]>"); MODULE_AUTHOR("Vinay Simha BN <[email protected]>"); MODULE_DESCRIPTION("JDI LT070ME05000 WUXGA"); MODULE_LICENSE("GPL v2");
linux-master
drivers/gpu/drm/panel/panel-jdi-lt070me05000.c
// SPDX-License-Identifier: GPL-2.0+ /* * MIPI-DSI Samsung s6d16d0 panel driver. This is a 864x480 * AMOLED panel with a command-only DSI interface. */ #include <drm/drm_modes.h> #include <drm/drm_mipi_dsi.h> #include <drm/drm_panel.h> #include <linux/gpio/consumer.h> #include <linux/regulator/consumer.h> #include <linux/delay.h> #include <linux/mod_devicetable.h> #include <linux/module.h> struct s6d16d0 { struct device *dev; struct drm_panel panel; struct regulator *supply; struct gpio_desc *reset_gpio; }; /* * The timings are not very helpful as the display is used in * command mode. */ static const struct drm_display_mode samsung_s6d16d0_mode = { /* HS clock, (htotal*vtotal*vrefresh)/1000 */ .clock = 420160, .hdisplay = 864, .hsync_start = 864 + 154, .hsync_end = 864 + 154 + 16, .htotal = 864 + 154 + 16 + 32, .vdisplay = 480, .vsync_start = 480 + 1, .vsync_end = 480 + 1 + 1, .vtotal = 480 + 1 + 1 + 1, .width_mm = 84, .height_mm = 48, }; static inline struct s6d16d0 *panel_to_s6d16d0(struct drm_panel *panel) { return container_of(panel, struct s6d16d0, panel); } static int s6d16d0_unprepare(struct drm_panel *panel) { struct s6d16d0 *s6 = panel_to_s6d16d0(panel); struct mipi_dsi_device *dsi = to_mipi_dsi_device(s6->dev); int ret; /* Enter sleep mode */ ret = mipi_dsi_dcs_enter_sleep_mode(dsi); if (ret) { dev_err(s6->dev, "failed to enter sleep mode (%d)\n", ret); return ret; } /* Assert RESET */ gpiod_set_value_cansleep(s6->reset_gpio, 1); regulator_disable(s6->supply); return 0; } static int s6d16d0_prepare(struct drm_panel *panel) { struct s6d16d0 *s6 = panel_to_s6d16d0(panel); struct mipi_dsi_device *dsi = to_mipi_dsi_device(s6->dev); int ret; ret = regulator_enable(s6->supply); if (ret) { dev_err(s6->dev, "failed to enable supply (%d)\n", ret); return ret; } /* Assert RESET */ gpiod_set_value_cansleep(s6->reset_gpio, 1); udelay(10); /* De-assert RESET */ gpiod_set_value_cansleep(s6->reset_gpio, 0); msleep(120); /* Enabe tearing mode: send TE (tearing effect) at VBLANK */ ret = mipi_dsi_dcs_set_tear_on(dsi, MIPI_DSI_DCS_TEAR_MODE_VBLANK); if (ret) { dev_err(s6->dev, "failed to enable vblank TE (%d)\n", ret); return ret; } /* Exit sleep mode and power on */ ret = mipi_dsi_dcs_exit_sleep_mode(dsi); if (ret) { dev_err(s6->dev, "failed to exit sleep mode (%d)\n", ret); return ret; } return 0; } static int s6d16d0_enable(struct drm_panel *panel) { struct s6d16d0 *s6 = panel_to_s6d16d0(panel); struct mipi_dsi_device *dsi = to_mipi_dsi_device(s6->dev); int ret; ret = mipi_dsi_dcs_set_display_on(dsi); if (ret) { dev_err(s6->dev, "failed to turn display on (%d)\n", ret); return ret; } return 0; } static int s6d16d0_disable(struct drm_panel *panel) { struct s6d16d0 *s6 = panel_to_s6d16d0(panel); struct mipi_dsi_device *dsi = to_mipi_dsi_device(s6->dev); int ret; ret = mipi_dsi_dcs_set_display_off(dsi); if (ret) { dev_err(s6->dev, "failed to turn display off (%d)\n", ret); return ret; } return 0; } static int s6d16d0_get_modes(struct drm_panel *panel, struct drm_connector *connector) { struct drm_display_mode *mode; mode = drm_mode_duplicate(connector->dev, &samsung_s6d16d0_mode); if (!mode) { dev_err(panel->dev, "bad mode or failed to add mode\n"); return -EINVAL; } drm_mode_set_name(mode); mode->type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED; connector->display_info.width_mm = mode->width_mm; connector->display_info.height_mm = mode->height_mm; drm_mode_probed_add(connector, mode); return 1; /* Number of modes */ } static const struct drm_panel_funcs s6d16d0_drm_funcs = { .disable = s6d16d0_disable, .unprepare = s6d16d0_unprepare, .prepare = s6d16d0_prepare, .enable = s6d16d0_enable, .get_modes = s6d16d0_get_modes, }; static int s6d16d0_probe(struct mipi_dsi_device *dsi) { struct device *dev = &dsi->dev; struct s6d16d0 *s6; int ret; s6 = devm_kzalloc(dev, sizeof(struct s6d16d0), GFP_KERNEL); if (!s6) return -ENOMEM; mipi_dsi_set_drvdata(dsi, s6); s6->dev = dev; dsi->lanes = 2; dsi->format = MIPI_DSI_FMT_RGB888; dsi->hs_rate = 420160000; dsi->lp_rate = 19200000; /* * This display uses command mode so no MIPI_DSI_MODE_VIDEO * or MIPI_DSI_MODE_VIDEO_SYNC_PULSE * * As we only send commands we do not need to be continuously * clocked. */ dsi->mode_flags = MIPI_DSI_CLOCK_NON_CONTINUOUS; s6->supply = devm_regulator_get(dev, "vdd1"); if (IS_ERR(s6->supply)) return PTR_ERR(s6->supply); /* This asserts RESET by default */ s6->reset_gpio = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_HIGH); if (IS_ERR(s6->reset_gpio)) { ret = PTR_ERR(s6->reset_gpio); if (ret != -EPROBE_DEFER) dev_err(dev, "failed to request GPIO (%d)\n", ret); return ret; } drm_panel_init(&s6->panel, dev, &s6d16d0_drm_funcs, DRM_MODE_CONNECTOR_DSI); drm_panel_add(&s6->panel); ret = mipi_dsi_attach(dsi); if (ret < 0) drm_panel_remove(&s6->panel); return ret; } static void s6d16d0_remove(struct mipi_dsi_device *dsi) { struct s6d16d0 *s6 = mipi_dsi_get_drvdata(dsi); mipi_dsi_detach(dsi); drm_panel_remove(&s6->panel); } static const struct of_device_id s6d16d0_of_match[] = { { .compatible = "samsung,s6d16d0" }, { } }; MODULE_DEVICE_TABLE(of, s6d16d0_of_match); static struct mipi_dsi_driver s6d16d0_driver = { .probe = s6d16d0_probe, .remove = s6d16d0_remove, .driver = { .name = "panel-samsung-s6d16d0", .of_match_table = s6d16d0_of_match, }, }; module_mipi_dsi_driver(s6d16d0_driver); MODULE_AUTHOR("Linus Wallei <[email protected]>"); MODULE_DESCRIPTION("MIPI-DSI s6d16d0 Panel Driver"); MODULE_LICENSE("GPL v2");
linux-master
drivers/gpu/drm/panel/panel-samsung-s6d16d0.c
// SPDX-License-Identifier: GPL-2.0-only /* * Novatek NT35950 DriverIC panels driver * * Copyright (c) 2021 AngeloGioacchino Del Regno * <[email protected]> */ #include <linux/delay.h> #include <linux/gpio/consumer.h> #include <linux/module.h> #include <linux/of.h> #include <linux/of_graph.h> #include <linux/regulator/consumer.h> #include <drm/drm_connector.h> #include <drm/drm_crtc.h> #include <drm/drm_mipi_dsi.h> #include <drm/drm_modes.h> #include <drm/drm_panel.h> #define MCS_CMD_MAUCCTR 0xf0 /* Manufacturer command enable */ #define MCS_PARAM_SCALER_FUNCTION 0x58 /* Scale-up function */ #define MCS_PARAM_SCALEUP_MODE 0xc9 #define MCS_SCALEUP_SIMPLE 0x0 #define MCS_SCALEUP_BILINEAR BIT(0) #define MCS_SCALEUP_DUPLICATE (BIT(0) | BIT(4)) /* VESA Display Stream Compression param */ #define MCS_PARAM_VESA_DSC_ON 0x03 /* Data Compression mode */ #define MCS_PARAM_DATA_COMPRESSION 0x90 #define MCS_DATA_COMPRESSION_NONE 0x00 #define MCS_DATA_COMPRESSION_FBC 0x02 #define MCS_DATA_COMPRESSION_DSC 0x03 /* Display Output control */ #define MCS_PARAM_DISP_OUTPUT_CTRL 0xb4 #define MCS_DISP_OUT_SRAM_EN BIT(0) #define MCS_DISP_OUT_VIDEO_MODE BIT(4) /* VESA Display Stream Compression setting */ #define MCS_PARAM_VESA_DSC_SETTING 0xc0 /* SubPixel Rendering (SPR) */ #define MCS_PARAM_SPR_EN 0xe3 #define MCS_PARAM_SPR_MODE 0xef #define MCS_SPR_MODE_YYG_RAINBOW_RGB 0x01 #define NT35950_VREG_MAX 4 struct nt35950 { struct drm_panel panel; struct drm_connector *connector; struct mipi_dsi_device *dsi[2]; struct regulator_bulk_data vregs[NT35950_VREG_MAX]; struct gpio_desc *reset_gpio; const struct nt35950_panel_desc *desc; int cur_mode; u8 last_page; bool prepared; }; struct nt35950_panel_mode { const struct drm_display_mode mode; bool enable_sram; bool is_video_mode; u8 scaler_on; u8 scaler_mode; u8 compression; u8 spr_en; u8 spr_mode; }; struct nt35950_panel_desc { const char *model_name; const struct mipi_dsi_device_info dsi_info; const struct nt35950_panel_mode *mode_data; bool is_dual_dsi; u8 num_lanes; u8 num_modes; }; static inline struct nt35950 *to_nt35950(struct drm_panel *panel) { return container_of(panel, struct nt35950, panel); } static void nt35950_reset(struct nt35950 *nt) { gpiod_set_value_cansleep(nt->reset_gpio, 1); usleep_range(12000, 13000); gpiod_set_value_cansleep(nt->reset_gpio, 0); usleep_range(300, 400); gpiod_set_value_cansleep(nt->reset_gpio, 1); usleep_range(12000, 13000); } /* * nt35950_set_cmd2_page - Select manufacturer control (CMD2) page * @nt: Main driver structure * @page: Page number (0-7) * * Return: Number of transferred bytes or negative number on error */ static int nt35950_set_cmd2_page(struct nt35950 *nt, u8 page) { const u8 mauc_cmd2_page[] = { MCS_CMD_MAUCCTR, 0x55, 0xaa, 0x52, 0x08, page }; int ret; ret = mipi_dsi_dcs_write_buffer(nt->dsi[0], mauc_cmd2_page, ARRAY_SIZE(mauc_cmd2_page)); if (ret < 0) return ret; nt->last_page = page; return 0; } /* * nt35950_set_data_compression - Set data compression mode * @nt: Main driver structure * @comp_mode: Compression mode * * Return: Number of transferred bytes or negative number on error */ static int nt35950_set_data_compression(struct nt35950 *nt, u8 comp_mode) { u8 cmd_data_compression[] = { MCS_PARAM_DATA_COMPRESSION, comp_mode }; u8 cmd_vesa_dsc_on[] = { MCS_PARAM_VESA_DSC_ON, !!comp_mode }; u8 cmd_vesa_dsc_setting[] = { MCS_PARAM_VESA_DSC_SETTING, 0x03 }; u8 last_page = nt->last_page; int ret; /* Set CMD2 Page 0 if we're not there yet */ if (last_page != 0) { ret = nt35950_set_cmd2_page(nt, 0); if (ret < 0) return ret; } ret = mipi_dsi_dcs_write_buffer(nt->dsi[0], cmd_data_compression, ARRAY_SIZE(cmd_data_compression)); if (ret < 0) return ret; ret = mipi_dsi_dcs_write_buffer(nt->dsi[0], cmd_vesa_dsc_on, ARRAY_SIZE(cmd_vesa_dsc_on)); if (ret < 0) return ret; /* Set the vesa dsc setting on Page 4 */ ret = nt35950_set_cmd2_page(nt, 4); if (ret < 0) return ret; /* Display Stream Compression setting, always 0x03 */ ret = mipi_dsi_dcs_write_buffer(nt->dsi[0], cmd_vesa_dsc_setting, ARRAY_SIZE(cmd_vesa_dsc_setting)); if (ret < 0) return ret; /* Get back to the previously set page */ return nt35950_set_cmd2_page(nt, last_page); } /* * nt35950_set_scaler - Enable/disable resolution upscaling * @nt: Main driver structure * @scale_up: Scale up function control * * Return: Number of transferred bytes or negative number on error */ static int nt35950_set_scaler(struct nt35950 *nt, u8 scale_up) { u8 cmd_scaler[] = { MCS_PARAM_SCALER_FUNCTION, scale_up }; return mipi_dsi_dcs_write_buffer(nt->dsi[0], cmd_scaler, ARRAY_SIZE(cmd_scaler)); } /* * nt35950_set_scale_mode - Resolution upscaling mode * @nt: Main driver structure * @mode: Scaler mode (MCS_DATA_COMPRESSION_*) * * Return: Number of transferred bytes or negative number on error */ static int nt35950_set_scale_mode(struct nt35950 *nt, u8 mode) { u8 cmd_scaler[] = { MCS_PARAM_SCALEUP_MODE, mode }; return mipi_dsi_dcs_write_buffer(nt->dsi[0], cmd_scaler, ARRAY_SIZE(cmd_scaler)); } /* * nt35950_inject_black_image - Display a completely black image * @nt: Main driver structure * * After IC setup, the attached panel may show random data * due to driveric behavior changes (resolution, compression, * scaling, etc). This function, called after parameters setup, * makes the driver ic to output a completely black image to * the display. * It makes sense to push a black image before sending the sleep-out * and display-on commands. * * Return: Number of transferred bytes or negative number on error */ static int nt35950_inject_black_image(struct nt35950 *nt) { const u8 cmd0_black_img[] = { 0x6f, 0x01 }; const u8 cmd1_black_img[] = { 0xf3, 0x10 }; u8 cmd_test[] = { 0xff, 0xaa, 0x55, 0xa5, 0x80 }; int ret; /* Enable test command */ ret = mipi_dsi_dcs_write_buffer(nt->dsi[0], cmd_test, ARRAY_SIZE(cmd_test)); if (ret < 0) return ret; /* Send a black image */ ret = mipi_dsi_dcs_write_buffer(nt->dsi[0], cmd0_black_img, ARRAY_SIZE(cmd0_black_img)); if (ret < 0) return ret; ret = mipi_dsi_dcs_write_buffer(nt->dsi[0], cmd1_black_img, ARRAY_SIZE(cmd1_black_img)); if (ret < 0) return ret; /* Disable test command */ cmd_test[ARRAY_SIZE(cmd_test) - 1] = 0x00; return mipi_dsi_dcs_write_buffer(nt->dsi[0], cmd_test, ARRAY_SIZE(cmd_test)); } /* * nt35950_set_dispout - Set Display Output register parameters * @nt: Main driver structure * * Return: Number of transferred bytes or negative number on error */ static int nt35950_set_dispout(struct nt35950 *nt) { u8 cmd_dispout[] = { MCS_PARAM_DISP_OUTPUT_CTRL, 0x00 }; const struct nt35950_panel_mode *mode_data = nt->desc->mode_data; if (mode_data[nt->cur_mode].is_video_mode) cmd_dispout[1] |= MCS_DISP_OUT_VIDEO_MODE; if (mode_data[nt->cur_mode].enable_sram) cmd_dispout[1] |= MCS_DISP_OUT_SRAM_EN; return mipi_dsi_dcs_write_buffer(nt->dsi[0], cmd_dispout, ARRAY_SIZE(cmd_dispout)); } static int nt35950_get_current_mode(struct nt35950 *nt) { struct drm_connector *connector = nt->connector; struct drm_crtc_state *crtc_state; int i; /* Return the default (first) mode if no info available yet */ if (!connector->state || !connector->state->crtc) return 0; crtc_state = connector->state->crtc->state; for (i = 0; i < nt->desc->num_modes; i++) { if (drm_mode_match(&crtc_state->mode, &nt->desc->mode_data[i].mode, DRM_MODE_MATCH_TIMINGS | DRM_MODE_MATCH_CLOCK)) return i; } return 0; } static int nt35950_on(struct nt35950 *nt) { const struct nt35950_panel_mode *mode_data = nt->desc->mode_data; struct mipi_dsi_device *dsi = nt->dsi[0]; struct device *dev = &dsi->dev; int ret; nt->cur_mode = nt35950_get_current_mode(nt); nt->dsi[0]->mode_flags |= MIPI_DSI_MODE_LPM; nt->dsi[1]->mode_flags |= MIPI_DSI_MODE_LPM; ret = nt35950_set_cmd2_page(nt, 0); if (ret < 0) return ret; ret = nt35950_set_data_compression(nt, mode_data[nt->cur_mode].compression); if (ret < 0) return ret; ret = nt35950_set_scale_mode(nt, mode_data[nt->cur_mode].scaler_mode); if (ret < 0) return ret; ret = nt35950_set_scaler(nt, mode_data[nt->cur_mode].scaler_on); if (ret < 0) return ret; ret = nt35950_set_dispout(nt); if (ret < 0) return ret; ret = mipi_dsi_dcs_set_tear_on(dsi, MIPI_DSI_DCS_TEAR_MODE_VBLANK); if (ret < 0) { dev_err(dev, "Failed to set tear on: %d\n", ret); return ret; } ret = mipi_dsi_dcs_set_tear_scanline(dsi, 0); if (ret < 0) { dev_err(dev, "Failed to set tear scanline: %d\n", ret); return ret; } /* CMD2 Page 1 */ ret = nt35950_set_cmd2_page(nt, 1); if (ret < 0) return ret; /* Unknown command */ mipi_dsi_dcs_write_seq(dsi, 0xd4, 0x88, 0x88); /* CMD2 Page 7 */ ret = nt35950_set_cmd2_page(nt, 7); if (ret < 0) return ret; /* Enable SubPixel Rendering */ mipi_dsi_dcs_write_seq(dsi, MCS_PARAM_SPR_EN, 0x01); /* SPR Mode: YYG Rainbow-RGB */ mipi_dsi_dcs_write_seq(dsi, MCS_PARAM_SPR_MODE, MCS_SPR_MODE_YYG_RAINBOW_RGB); /* CMD3 */ ret = nt35950_inject_black_image(nt); if (ret < 0) return ret; ret = mipi_dsi_dcs_exit_sleep_mode(dsi); if (ret < 0) return ret; msleep(120); ret = mipi_dsi_dcs_set_display_on(dsi); if (ret < 0) return ret; msleep(120); nt->dsi[0]->mode_flags &= ~MIPI_DSI_MODE_LPM; nt->dsi[1]->mode_flags &= ~MIPI_DSI_MODE_LPM; return 0; } static int nt35950_off(struct nt35950 *nt) { struct device *dev = &nt->dsi[0]->dev; int ret; ret = mipi_dsi_dcs_set_display_off(nt->dsi[0]); if (ret < 0) { dev_err(dev, "Failed to set display off: %d\n", ret); goto set_lpm; } usleep_range(10000, 11000); ret = mipi_dsi_dcs_enter_sleep_mode(nt->dsi[0]); if (ret < 0) { dev_err(dev, "Failed to enter sleep mode: %d\n", ret); goto set_lpm; } msleep(150); set_lpm: nt->dsi[0]->mode_flags |= MIPI_DSI_MODE_LPM; nt->dsi[1]->mode_flags |= MIPI_DSI_MODE_LPM; return 0; } static int nt35950_sharp_init_vregs(struct nt35950 *nt, struct device *dev) { int ret; nt->vregs[0].supply = "vddio"; nt->vregs[1].supply = "avdd"; nt->vregs[2].supply = "avee"; nt->vregs[3].supply = "dvdd"; ret = devm_regulator_bulk_get(dev, ARRAY_SIZE(nt->vregs), nt->vregs); if (ret < 0) return ret; ret = regulator_is_supported_voltage(nt->vregs[0].consumer, 1750000, 1950000); if (!ret) return -EINVAL; ret = regulator_is_supported_voltage(nt->vregs[1].consumer, 5200000, 5900000); if (!ret) return -EINVAL; /* AVEE is negative: -5.90V to -5.20V */ ret = regulator_is_supported_voltage(nt->vregs[2].consumer, 5200000, 5900000); if (!ret) return -EINVAL; ret = regulator_is_supported_voltage(nt->vregs[3].consumer, 1300000, 1400000); if (!ret) return -EINVAL; return 0; } static int nt35950_prepare(struct drm_panel *panel) { struct nt35950 *nt = to_nt35950(panel); struct device *dev = &nt->dsi[0]->dev; int ret; if (nt->prepared) return 0; ret = regulator_enable(nt->vregs[0].consumer); if (ret) return ret; usleep_range(2000, 5000); ret = regulator_enable(nt->vregs[3].consumer); if (ret) goto end; usleep_range(15000, 18000); ret = regulator_enable(nt->vregs[1].consumer); if (ret) goto end; ret = regulator_enable(nt->vregs[2].consumer); if (ret) goto end; usleep_range(12000, 13000); nt35950_reset(nt); ret = nt35950_on(nt); if (ret < 0) { dev_err(dev, "Failed to initialize panel: %d\n", ret); goto end; } nt->prepared = true; end: if (ret < 0) { regulator_bulk_disable(ARRAY_SIZE(nt->vregs), nt->vregs); return ret; } return 0; } static int nt35950_unprepare(struct drm_panel *panel) { struct nt35950 *nt = to_nt35950(panel); struct device *dev = &nt->dsi[0]->dev; int ret; if (!nt->prepared) return 0; ret = nt35950_off(nt); if (ret < 0) dev_err(dev, "Failed to deinitialize panel: %d\n", ret); gpiod_set_value_cansleep(nt->reset_gpio, 0); regulator_bulk_disable(ARRAY_SIZE(nt->vregs), nt->vregs); nt->prepared = false; return 0; } static int nt35950_get_modes(struct drm_panel *panel, struct drm_connector *connector) { struct nt35950 *nt = to_nt35950(panel); int i; for (i = 0; i < nt->desc->num_modes; i++) { struct drm_display_mode *mode; mode = drm_mode_duplicate(connector->dev, &nt->desc->mode_data[i].mode); if (!mode) return -ENOMEM; drm_mode_set_name(mode); mode->type |= DRM_MODE_TYPE_DRIVER; if (nt->desc->num_modes == 1) mode->type |= DRM_MODE_TYPE_PREFERRED; drm_mode_probed_add(connector, mode); } connector->display_info.bpc = 8; connector->display_info.height_mm = nt->desc->mode_data[0].mode.height_mm; connector->display_info.width_mm = nt->desc->mode_data[0].mode.width_mm; nt->connector = connector; return nt->desc->num_modes; } static const struct drm_panel_funcs nt35950_panel_funcs = { .prepare = nt35950_prepare, .unprepare = nt35950_unprepare, .get_modes = nt35950_get_modes, }; static int nt35950_probe(struct mipi_dsi_device *dsi) { struct device *dev = &dsi->dev; struct device_node *dsi_r; struct mipi_dsi_host *dsi_r_host; struct nt35950 *nt; const struct mipi_dsi_device_info *info; int i, num_dsis = 1, ret; nt = devm_kzalloc(dev, sizeof(*nt), GFP_KERNEL); if (!nt) return -ENOMEM; ret = nt35950_sharp_init_vregs(nt, dev); if (ret) return dev_err_probe(dev, ret, "Regulator init failure.\n"); nt->desc = of_device_get_match_data(dev); if (!nt->desc) return -ENODEV; nt->reset_gpio = devm_gpiod_get(dev, "reset", GPIOD_ASIS); if (IS_ERR(nt->reset_gpio)) { return dev_err_probe(dev, PTR_ERR(nt->reset_gpio), "Failed to get reset gpio\n"); } /* If the panel is connected on two DSIs then DSI0 left, DSI1 right */ if (nt->desc->is_dual_dsi) { info = &nt->desc->dsi_info; dsi_r = of_graph_get_remote_node(dsi->dev.of_node, 1, -1); if (!dsi_r) { dev_err(dev, "Cannot get secondary DSI node.\n"); return -ENODEV; } dsi_r_host = of_find_mipi_dsi_host_by_node(dsi_r); of_node_put(dsi_r); if (!dsi_r_host) { dev_err(dev, "Cannot get secondary DSI host\n"); return -EPROBE_DEFER; } nt->dsi[1] = mipi_dsi_device_register_full(dsi_r_host, info); if (!nt->dsi[1]) { dev_err(dev, "Cannot get secondary DSI node\n"); return -ENODEV; } num_dsis++; } nt->dsi[0] = dsi; mipi_dsi_set_drvdata(dsi, nt); drm_panel_init(&nt->panel, dev, &nt35950_panel_funcs, DRM_MODE_CONNECTOR_DSI); ret = drm_panel_of_backlight(&nt->panel); if (ret) { if (num_dsis == 2) mipi_dsi_device_unregister(nt->dsi[1]); return dev_err_probe(dev, ret, "Failed to get backlight\n"); } drm_panel_add(&nt->panel); for (i = 0; i < num_dsis; i++) { nt->dsi[i]->lanes = nt->desc->num_lanes; nt->dsi[i]->format = MIPI_DSI_FMT_RGB888; nt->dsi[i]->mode_flags = MIPI_DSI_CLOCK_NON_CONTINUOUS | MIPI_DSI_MODE_LPM; if (nt->desc->mode_data[0].is_video_mode) nt->dsi[i]->mode_flags |= MIPI_DSI_MODE_VIDEO; ret = mipi_dsi_attach(nt->dsi[i]); if (ret < 0) { /* If we fail to attach to either host, we're done */ if (num_dsis == 2) mipi_dsi_device_unregister(nt->dsi[1]); return dev_err_probe(dev, ret, "Cannot attach to DSI%d host.\n", i); } } /* Make sure to set RESX LOW before starting the power-on sequence */ gpiod_set_value_cansleep(nt->reset_gpio, 0); return 0; } static void nt35950_remove(struct mipi_dsi_device *dsi) { struct nt35950 *nt = mipi_dsi_get_drvdata(dsi); int ret; ret = mipi_dsi_detach(nt->dsi[0]); if (ret < 0) dev_err(&dsi->dev, "Failed to detach from DSI0 host: %d\n", ret); if (nt->dsi[1]) { ret = mipi_dsi_detach(nt->dsi[1]); if (ret < 0) dev_err(&dsi->dev, "Failed to detach from DSI1 host: %d\n", ret); mipi_dsi_device_unregister(nt->dsi[1]); } drm_panel_remove(&nt->panel); } static const struct nt35950_panel_mode sharp_ls055d1sx04_modes[] = { { /* 1920x1080 60Hz no compression */ .mode = { .clock = 214537, .hdisplay = 1080, .hsync_start = 1080 + 400, .hsync_end = 1080 + 400 + 40, .htotal = 1080 + 400 + 40 + 300, .vdisplay = 1920, .vsync_start = 1920 + 12, .vsync_end = 1920 + 12 + 2, .vtotal = 1920 + 12 + 2 + 10, .width_mm = 68, .height_mm = 121, }, .compression = MCS_DATA_COMPRESSION_NONE, .enable_sram = true, .is_video_mode = false, .scaler_on = 1, .scaler_mode = MCS_SCALEUP_DUPLICATE, }, /* TODO: Add 2160x3840 60Hz when DSC is supported */ }; static const struct nt35950_panel_desc sharp_ls055d1sx04 = { .model_name = "Sharp LS055D1SX04", .dsi_info = { .type = "LS055D1SX04", .channel = 0, .node = NULL, }, .mode_data = sharp_ls055d1sx04_modes, .num_modes = ARRAY_SIZE(sharp_ls055d1sx04_modes), .is_dual_dsi = true, .num_lanes = 4, }; static const struct of_device_id nt35950_of_match[] = { { .compatible = "sharp,ls055d1sx04", .data = &sharp_ls055d1sx04 }, { } }; MODULE_DEVICE_TABLE(of, nt35950_of_match); static struct mipi_dsi_driver nt35950_driver = { .probe = nt35950_probe, .remove = nt35950_remove, .driver = { .name = "panel-novatek-nt35950", .of_match_table = nt35950_of_match, }, }; module_mipi_dsi_driver(nt35950_driver); MODULE_AUTHOR("AngeloGioacchino Del Regno <[email protected]>"); MODULE_DESCRIPTION("Novatek NT35950 DriverIC panels driver"); MODULE_LICENSE("GPL v2");
linux-master
drivers/gpu/drm/panel/panel-novatek-nt35950.c
// SPDX-License-Identifier: GPL-2.0 /* * Copyright 2021 Google Inc. * * Panel driver for the Samsung ATNA33XC20 panel. This panel can't be handled * by the DRM_PANEL_SIMPLE driver because its power sequencing is non-standard. */ #include <linux/backlight.h> #include <linux/delay.h> #include <linux/gpio/consumer.h> #include <linux/iopoll.h> #include <linux/module.h> #include <linux/pm_runtime.h> #include <linux/regulator/consumer.h> #include <drm/display/drm_dp_aux_bus.h> #include <drm/display/drm_dp_helper.h> #include <drm/drm_edid.h> #include <drm/drm_panel.h> /* T3 VCC to HPD high is max 200 ms */ #define HPD_MAX_MS 200 #define HPD_MAX_US (HPD_MAX_MS * 1000) struct atana33xc20_panel { struct drm_panel base; bool prepared; bool enabled; bool el3_was_on; bool no_hpd; struct gpio_desc *hpd_gpio; struct regulator *supply; struct gpio_desc *el_on3_gpio; struct drm_dp_aux *aux; struct edid *edid; ktime_t powered_off_time; ktime_t powered_on_time; ktime_t el_on3_off_time; }; static inline struct atana33xc20_panel *to_atana33xc20(struct drm_panel *panel) { return container_of(panel, struct atana33xc20_panel, base); } static void atana33xc20_wait(ktime_t start_ktime, unsigned int min_ms) { ktime_t now_ktime, min_ktime; min_ktime = ktime_add(start_ktime, ms_to_ktime(min_ms)); now_ktime = ktime_get_boottime(); if (ktime_before(now_ktime, min_ktime)) msleep(ktime_to_ms(ktime_sub(min_ktime, now_ktime)) + 1); } static int atana33xc20_suspend(struct device *dev) { struct atana33xc20_panel *p = dev_get_drvdata(dev); int ret; /* * Note 3 (Example of power off sequence in detail) in spec * specifies to wait 150 ms after deasserting EL3_ON before * powering off. */ if (p->el3_was_on) atana33xc20_wait(p->el_on3_off_time, 150); ret = regulator_disable(p->supply); if (ret) return ret; p->powered_off_time = ktime_get_boottime(); p->el3_was_on = false; return 0; } static int atana33xc20_resume(struct device *dev) { struct atana33xc20_panel *p = dev_get_drvdata(dev); int hpd_asserted; int ret; /* T12 (Power off time) is min 500 ms */ atana33xc20_wait(p->powered_off_time, 500); ret = regulator_enable(p->supply); if (ret) return ret; p->powered_on_time = ktime_get_boottime(); if (p->no_hpd) { msleep(HPD_MAX_MS); return 0; } if (p->hpd_gpio) { ret = readx_poll_timeout(gpiod_get_value_cansleep, p->hpd_gpio, hpd_asserted, hpd_asserted, 1000, HPD_MAX_US); if (hpd_asserted < 0) ret = hpd_asserted; if (ret) dev_warn(dev, "Error waiting for HPD GPIO: %d\n", ret); return ret; } if (p->aux->wait_hpd_asserted) { ret = p->aux->wait_hpd_asserted(p->aux, HPD_MAX_US); if (ret) dev_warn(dev, "Controller error waiting for HPD: %d\n", ret); return ret; } /* * Note that it's possible that no_hpd is false, hpd_gpio is * NULL, and wait_hpd_asserted is NULL. This is because * wait_hpd_asserted() is optional even if HPD is hooked up to * a dedicated pin on the eDP controller. In this case we just * assume that the controller driver will wait for HPD at the * right times. */ return 0; } static int atana33xc20_disable(struct drm_panel *panel) { struct atana33xc20_panel *p = to_atana33xc20(panel); /* Disabling when already disabled is a no-op */ if (!p->enabled) return 0; gpiod_set_value_cansleep(p->el_on3_gpio, 0); p->el_on3_off_time = ktime_get_boottime(); p->enabled = false; /* * Keep track of the fact that EL_ON3 was on but we haven't power * cycled yet. This lets us know that "el_on3_off_time" is recent (we * don't need to worry about ktime wraparounds) and also makes it * obvious if we try to enable again without a power cycle (see the * warning in atana33xc20_enable()). */ p->el3_was_on = true; /* * Sleeping 20 ms here (after setting the GPIO) avoids a glitch when * powering off. */ msleep(20); return 0; } static int atana33xc20_enable(struct drm_panel *panel) { struct atana33xc20_panel *p = to_atana33xc20(panel); /* Enabling when already enabled is a no-op */ if (p->enabled) return 0; /* * Once EL_ON3 drops we absolutely need a power cycle before the next * enable or the backlight will never come on again. The code ensures * this because disable() is _always_ followed by unprepare() and * unprepare() forces a suspend with pm_runtime_put_sync_suspend(), * but let's track just to make sure since the requirement is so * non-obvious. */ if (WARN_ON(p->el3_was_on)) return -EIO; /* * Note 2 (Example of power on sequence in detail) in spec specifies * to wait 400 ms after powering on before asserting EL3_on. */ atana33xc20_wait(p->powered_on_time, 400); gpiod_set_value_cansleep(p->el_on3_gpio, 1); p->enabled = true; return 0; } static int atana33xc20_unprepare(struct drm_panel *panel) { struct atana33xc20_panel *p = to_atana33xc20(panel); int ret; /* Unpreparing when already unprepared is a no-op */ if (!p->prepared) return 0; /* * Purposely do a put_sync, don't use autosuspend. The panel's tcon * seems to sometimes crash when you stop giving it data and this is * the best way to ensure it will come back. * * NOTE: we still want autosuspend for cases where we only turn on * to get the EDID or otherwise send DP AUX commands to the panel. */ ret = pm_runtime_put_sync_suspend(panel->dev); if (ret < 0) return ret; p->prepared = false; return 0; } static int atana33xc20_prepare(struct drm_panel *panel) { struct atana33xc20_panel *p = to_atana33xc20(panel); int ret; /* Preparing when already prepared is a no-op */ if (p->prepared) return 0; ret = pm_runtime_get_sync(panel->dev); if (ret < 0) { pm_runtime_put_autosuspend(panel->dev); return ret; } p->prepared = true; return 0; } static int atana33xc20_get_modes(struct drm_panel *panel, struct drm_connector *connector) { struct atana33xc20_panel *p = to_atana33xc20(panel); struct dp_aux_ep_device *aux_ep = to_dp_aux_ep_dev(panel->dev); int num = 0; pm_runtime_get_sync(panel->dev); if (!p->edid) p->edid = drm_get_edid(connector, &aux_ep->aux->ddc); num = drm_add_edid_modes(connector, p->edid); pm_runtime_mark_last_busy(panel->dev); pm_runtime_put_autosuspend(panel->dev); return num; } static const struct drm_panel_funcs atana33xc20_funcs = { .disable = atana33xc20_disable, .enable = atana33xc20_enable, .unprepare = atana33xc20_unprepare, .prepare = atana33xc20_prepare, .get_modes = atana33xc20_get_modes, }; static void atana33xc20_runtime_disable(void *data) { pm_runtime_disable(data); } static void atana33xc20_dont_use_autosuspend(void *data) { pm_runtime_dont_use_autosuspend(data); } static int atana33xc20_probe(struct dp_aux_ep_device *aux_ep) { struct atana33xc20_panel *panel; struct device *dev = &aux_ep->dev; int ret; panel = devm_kzalloc(dev, sizeof(*panel), GFP_KERNEL); if (!panel) return -ENOMEM; dev_set_drvdata(dev, panel); panel->aux = aux_ep->aux; panel->supply = devm_regulator_get(dev, "power"); if (IS_ERR(panel->supply)) return dev_err_probe(dev, PTR_ERR(panel->supply), "Failed to get power supply\n"); panel->el_on3_gpio = devm_gpiod_get(dev, "enable", GPIOD_OUT_LOW); if (IS_ERR(panel->el_on3_gpio)) return dev_err_probe(dev, PTR_ERR(panel->el_on3_gpio), "Failed to get enable GPIO\n"); panel->no_hpd = of_property_read_bool(dev->of_node, "no-hpd"); if (!panel->no_hpd) { panel->hpd_gpio = devm_gpiod_get_optional(dev, "hpd", GPIOD_IN); if (IS_ERR(panel->hpd_gpio)) return dev_err_probe(dev, PTR_ERR(panel->hpd_gpio), "Failed to get HPD GPIO\n"); } pm_runtime_enable(dev); ret = devm_add_action_or_reset(dev, atana33xc20_runtime_disable, dev); if (ret) return ret; pm_runtime_set_autosuspend_delay(dev, 2000); pm_runtime_use_autosuspend(dev); ret = devm_add_action_or_reset(dev, atana33xc20_dont_use_autosuspend, dev); if (ret) return ret; drm_panel_init(&panel->base, dev, &atana33xc20_funcs, DRM_MODE_CONNECTOR_eDP); pm_runtime_get_sync(dev); ret = drm_panel_dp_aux_backlight(&panel->base, aux_ep->aux); pm_runtime_mark_last_busy(dev); pm_runtime_put_autosuspend(dev); if (ret) return dev_err_probe(dev, ret, "failed to register dp aux backlight\n"); drm_panel_add(&panel->base); return 0; } static void atana33xc20_remove(struct dp_aux_ep_device *aux_ep) { struct device *dev = &aux_ep->dev; struct atana33xc20_panel *panel = dev_get_drvdata(dev); drm_panel_remove(&panel->base); drm_panel_disable(&panel->base); drm_panel_unprepare(&panel->base); kfree(panel->edid); } static void atana33xc20_shutdown(struct dp_aux_ep_device *aux_ep) { struct device *dev = &aux_ep->dev; struct atana33xc20_panel *panel = dev_get_drvdata(dev); drm_panel_disable(&panel->base); drm_panel_unprepare(&panel->base); } static const struct of_device_id atana33xc20_dt_match[] = { { .compatible = "samsung,atna33xc20", }, { /* sentinal */ } }; MODULE_DEVICE_TABLE(of, atana33xc20_dt_match); static const struct dev_pm_ops atana33xc20_pm_ops = { SET_RUNTIME_PM_OPS(atana33xc20_suspend, atana33xc20_resume, NULL) SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend, pm_runtime_force_resume) }; static struct dp_aux_ep_driver atana33xc20_driver = { .driver = { .name = "samsung_atana33xc20", .of_match_table = atana33xc20_dt_match, .pm = &atana33xc20_pm_ops, }, .probe = atana33xc20_probe, .remove = atana33xc20_remove, .shutdown = atana33xc20_shutdown, }; static int __init atana33xc20_init(void) { return dp_aux_dp_driver_register(&atana33xc20_driver); } module_init(atana33xc20_init); static void __exit atana33xc20_exit(void) { dp_aux_dp_driver_unregister(&atana33xc20_driver); } module_exit(atana33xc20_exit); MODULE_DESCRIPTION("Samsung ATANA33XC20 Panel Driver"); MODULE_LICENSE("GPL v2");
linux-master
drivers/gpu/drm/panel/panel-samsung-atna33xc20.c
// SPDX-License-Identifier: GPL-2.0+ /* * Copyright (C) 2018 Amarula Solutions * Author: Jagan Teki <[email protected]> */ #include <drm/drm_mipi_dsi.h> #include <drm/drm_modes.h> #include <drm/drm_panel.h> #include <linux/gpio/consumer.h> #include <linux/delay.h> #include <linux/module.h> #include <linux/mod_devicetable.h> #include <linux/regulator/consumer.h> #define FEIYANG_INIT_CMD_LEN 2 struct feiyang { struct drm_panel panel; struct mipi_dsi_device *dsi; struct regulator *dvdd; struct regulator *avdd; struct gpio_desc *reset; }; static inline struct feiyang *panel_to_feiyang(struct drm_panel *panel) { return container_of(panel, struct feiyang, panel); } struct feiyang_init_cmd { u8 data[FEIYANG_INIT_CMD_LEN]; }; static const struct feiyang_init_cmd feiyang_init_cmds[] = { { .data = { 0x80, 0x58 } }, { .data = { 0x81, 0x47 } }, { .data = { 0x82, 0xD4 } }, { .data = { 0x83, 0x88 } }, { .data = { 0x84, 0xA9 } }, { .data = { 0x85, 0xC3 } }, { .data = { 0x86, 0x82 } }, }; static int feiyang_prepare(struct drm_panel *panel) { struct feiyang *ctx = panel_to_feiyang(panel); struct mipi_dsi_device *dsi = ctx->dsi; unsigned int i; int ret; ret = regulator_enable(ctx->dvdd); if (ret) return ret; /* T1 (dvdd start + dvdd rise) 0 < T1 <= 10ms */ msleep(10); ret = regulator_enable(ctx->avdd); if (ret) return ret; /* T3 (dvdd rise + avdd start + avdd rise) T3 >= 20ms */ msleep(20); gpiod_set_value(ctx->reset, 0); /* * T5 + T6 (avdd rise + video & logic signal rise) * T5 >= 10ms, 0 < T6 <= 10ms */ msleep(20); gpiod_set_value(ctx->reset, 1); /* T12 (video & logic signal rise + backlight rise) T12 >= 200ms */ msleep(200); for (i = 0; i < ARRAY_SIZE(feiyang_init_cmds); i++) { const struct feiyang_init_cmd *cmd = &feiyang_init_cmds[i]; ret = mipi_dsi_dcs_write_buffer(dsi, cmd->data, FEIYANG_INIT_CMD_LEN); if (ret < 0) return ret; } return 0; } static int feiyang_enable(struct drm_panel *panel) { struct feiyang *ctx = panel_to_feiyang(panel); /* T12 (video & logic signal rise + backlight rise) T12 >= 200ms */ msleep(200); mipi_dsi_dcs_set_display_on(ctx->dsi); return 0; } static int feiyang_disable(struct drm_panel *panel) { struct feiyang *ctx = panel_to_feiyang(panel); return mipi_dsi_dcs_set_display_off(ctx->dsi); } static int feiyang_unprepare(struct drm_panel *panel) { struct feiyang *ctx = panel_to_feiyang(panel); int ret; ret = mipi_dsi_dcs_set_display_off(ctx->dsi); if (ret < 0) dev_err(panel->dev, "failed to set display off: %d\n", ret); ret = mipi_dsi_dcs_enter_sleep_mode(ctx->dsi); if (ret < 0) dev_err(panel->dev, "failed to enter sleep mode: %d\n", ret); /* T13 (backlight fall + video & logic signal fall) T13 >= 200ms */ msleep(200); gpiod_set_value(ctx->reset, 0); regulator_disable(ctx->avdd); /* T11 (dvdd rise to fall) 0 < T11 <= 10ms */ msleep(10); regulator_disable(ctx->dvdd); return 0; } static const struct drm_display_mode feiyang_default_mode = { .clock = 55000, .hdisplay = 1024, .hsync_start = 1024 + 310, .hsync_end = 1024 + 310 + 20, .htotal = 1024 + 310 + 20 + 90, .vdisplay = 600, .vsync_start = 600 + 12, .vsync_end = 600 + 12 + 2, .vtotal = 600 + 12 + 2 + 21, .type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED, }; static int feiyang_get_modes(struct drm_panel *panel, struct drm_connector *connector) { struct feiyang *ctx = panel_to_feiyang(panel); struct drm_display_mode *mode; mode = drm_mode_duplicate(connector->dev, &feiyang_default_mode); if (!mode) { dev_err(&ctx->dsi->dev, "failed to add mode %ux%u@%u\n", feiyang_default_mode.hdisplay, feiyang_default_mode.vdisplay, drm_mode_vrefresh(&feiyang_default_mode)); return -ENOMEM; } drm_mode_set_name(mode); drm_mode_probed_add(connector, mode); return 1; } static const struct drm_panel_funcs feiyang_funcs = { .disable = feiyang_disable, .unprepare = feiyang_unprepare, .prepare = feiyang_prepare, .enable = feiyang_enable, .get_modes = feiyang_get_modes, }; static int feiyang_dsi_probe(struct mipi_dsi_device *dsi) { struct feiyang *ctx; int ret; ctx = devm_kzalloc(&dsi->dev, sizeof(*ctx), GFP_KERNEL); if (!ctx) return -ENOMEM; mipi_dsi_set_drvdata(dsi, ctx); ctx->dsi = dsi; drm_panel_init(&ctx->panel, &dsi->dev, &feiyang_funcs, DRM_MODE_CONNECTOR_DSI); ctx->dvdd = devm_regulator_get(&dsi->dev, "dvdd"); if (IS_ERR(ctx->dvdd)) return dev_err_probe(&dsi->dev, PTR_ERR(ctx->dvdd), "Couldn't get dvdd regulator\n"); ctx->avdd = devm_regulator_get(&dsi->dev, "avdd"); if (IS_ERR(ctx->avdd)) return dev_err_probe(&dsi->dev, PTR_ERR(ctx->avdd), "Couldn't get avdd regulator\n"); ctx->reset = devm_gpiod_get_optional(&dsi->dev, "reset", GPIOD_OUT_LOW); if (IS_ERR(ctx->reset)) return dev_err_probe(&dsi->dev, PTR_ERR(ctx->reset), "Couldn't get our reset GPIO\n"); ret = drm_panel_of_backlight(&ctx->panel); if (ret) return ret; drm_panel_add(&ctx->panel); dsi->mode_flags = MIPI_DSI_MODE_VIDEO_BURST; dsi->format = MIPI_DSI_FMT_RGB888; dsi->lanes = 4; ret = mipi_dsi_attach(dsi); if (ret < 0) { drm_panel_remove(&ctx->panel); return ret; } return 0; } static void feiyang_dsi_remove(struct mipi_dsi_device *dsi) { struct feiyang *ctx = mipi_dsi_get_drvdata(dsi); mipi_dsi_detach(dsi); drm_panel_remove(&ctx->panel); } static const struct of_device_id feiyang_of_match[] = { { .compatible = "feiyang,fy07024di26a30d", }, { /* sentinel */ } }; MODULE_DEVICE_TABLE(of, feiyang_of_match); static struct mipi_dsi_driver feiyang_driver = { .probe = feiyang_dsi_probe, .remove = feiyang_dsi_remove, .driver = { .name = "feiyang-fy07024di26a30d", .of_match_table = feiyang_of_match, }, }; module_mipi_dsi_driver(feiyang_driver); MODULE_AUTHOR("Jagan Teki <[email protected]>"); MODULE_DESCRIPTION("Feiyang FY07024DI26A30-D MIPI-DSI LCD panel"); MODULE_LICENSE("GPL");
linux-master
drivers/gpu/drm/panel/panel-feiyang-fy07024di26a30d.c
// SPDX-License-Identifier: GPL-2.0 /* * Panel driver for the ARM Versatile family reference designs from * ARM Limited. * * Author: * Linus Walleij <[email protected]> * * On the Versatile AB, these panels come mounted on daughterboards * named "IB1" or "IB2" (Interface Board 1 & 2 respectively.) They * are documented in ARM DUI 0225D Appendix C and D. These daughter * boards support TFT display panels. * * - The IB1 is a passive board where the display connector defines a * few wires for encoding the display type for autodetection, * suitable display settings can then be looked up from this setting. * The magic bits can be read out from the system controller. * * - The IB2 is a more complex board intended for GSM phone development * with some logic and a control register, which needs to be accessed * and the board display needs to be turned on explicitly. * * On the Versatile PB, a special CLCD adaptor board is available * supporting the same displays as the Versatile AB, plus one more * Epson QCIF display. * */ #include <linux/bitops.h> #include <linux/init.h> #include <linux/kernel.h> #include <linux/mfd/syscon.h> #include <linux/mod_devicetable.h> #include <linux/module.h> #include <linux/platform_device.h> #include <linux/regmap.h> #include <video/of_videomode.h> #include <video/videomode.h> #include <drm/drm_modes.h> #include <drm/drm_panel.h> /* * This configuration register in the Versatile and RealView * family is uniformly present but appears more and more * unutilized starting with the RealView series. */ #define SYS_CLCD 0x50 /* The Versatile can detect the connected panel type */ #define SYS_CLCD_CLCDID_MASK (BIT(8)|BIT(9)|BIT(10)|BIT(11)|BIT(12)) #define SYS_CLCD_ID_SANYO_3_8 (0x00 << 8) #define SYS_CLCD_ID_SHARP_8_4 (0x01 << 8) #define SYS_CLCD_ID_EPSON_2_2 (0x02 << 8) #define SYS_CLCD_ID_SANYO_2_5 (0x07 << 8) #define SYS_CLCD_ID_VGA (0x1f << 8) /* IB2 control register for the Versatile daughterboard */ #define IB2_CTRL 0x00 #define IB2_CTRL_LCD_SD BIT(1) /* 1 = shut down LCD */ #define IB2_CTRL_LCD_BL_ON BIT(0) #define IB2_CTRL_LCD_MASK (BIT(0)|BIT(1)) /** * struct versatile_panel_type - lookup struct for the supported panels */ struct versatile_panel_type { /** * @name: the name of this panel */ const char *name; /** * @magic: the magic value from the detection register */ u32 magic; /** * @mode: the DRM display mode for this panel */ struct drm_display_mode mode; /** * @bus_flags: the DRM bus flags for this panel e.g. inverted clock */ u32 bus_flags; /** * @width_mm: the panel width in mm */ u32 width_mm; /** * @height_mm: the panel height in mm */ u32 height_mm; /** * @ib2: the panel may be connected on an IB2 daughterboard */ bool ib2; }; /** * struct versatile_panel - state container for the Versatile panels */ struct versatile_panel { /** * @dev: the container device */ struct device *dev; /** * @panel: the DRM panel instance for this device */ struct drm_panel panel; /** * @panel_type: the Versatile panel type as detected */ const struct versatile_panel_type *panel_type; /** * @map: map to the parent syscon where the main register reside */ struct regmap *map; /** * @ib2_map: map to the IB2 syscon, if applicable */ struct regmap *ib2_map; }; static const struct versatile_panel_type versatile_panels[] = { /* * Sanyo TM38QV67A02A - 3.8 inch QVGA (320x240) Color TFT * found on the Versatile AB IB1 connector or the Versatile * PB adaptor board connector. */ { .name = "Sanyo TM38QV67A02A", .magic = SYS_CLCD_ID_SANYO_3_8, .width_mm = 79, .height_mm = 54, .mode = { .clock = 10000, .hdisplay = 320, .hsync_start = 320 + 6, .hsync_end = 320 + 6 + 6, .htotal = 320 + 6 + 6 + 6, .vdisplay = 240, .vsync_start = 240 + 5, .vsync_end = 240 + 5 + 6, .vtotal = 240 + 5 + 6 + 5, .flags = DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC, }, }, /* * Sharp LQ084V1DG21 640x480 VGA Color TFT module * found on the Versatile AB IB1 connector or the Versatile * PB adaptor board connector. */ { .name = "Sharp LQ084V1DG21", .magic = SYS_CLCD_ID_SHARP_8_4, .width_mm = 171, .height_mm = 130, .mode = { .clock = 25000, .hdisplay = 640, .hsync_start = 640 + 24, .hsync_end = 640 + 24 + 96, .htotal = 640 + 24 + 96 + 24, .vdisplay = 480, .vsync_start = 480 + 11, .vsync_end = 480 + 11 + 2, .vtotal = 480 + 11 + 2 + 32, .flags = DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC, }, }, /* * Epson L2F50113T00 - 2.2 inch QCIF 176x220 Color TFT * found on the Versatile PB adaptor board connector. */ { .name = "Epson L2F50113T00", .magic = SYS_CLCD_ID_EPSON_2_2, .width_mm = 34, .height_mm = 45, .mode = { .clock = 62500, .hdisplay = 176, .hsync_start = 176 + 2, .hsync_end = 176 + 2 + 3, .htotal = 176 + 2 + 3 + 3, .vdisplay = 220, .vsync_start = 220 + 0, .vsync_end = 220 + 0 + 2, .vtotal = 220 + 0 + 2 + 1, .flags = DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC, }, .bus_flags = DRM_BUS_FLAG_PIXDATA_DRIVE_NEGEDGE, }, /* * Sanyo ALR252RGT 240x320 portrait display found on the * Versatile AB IB2 daughterboard for GSM prototyping. */ { .name = "Sanyo ALR252RGT", .magic = SYS_CLCD_ID_SANYO_2_5, .width_mm = 37, .height_mm = 50, .mode = { .clock = 5400, .hdisplay = 240, .hsync_start = 240 + 10, .hsync_end = 240 + 10 + 10, .htotal = 240 + 10 + 10 + 20, .vdisplay = 320, .vsync_start = 320 + 2, .vsync_end = 320 + 2 + 2, .vtotal = 320 + 2 + 2 + 2, .flags = DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC, }, .bus_flags = DRM_BUS_FLAG_PIXDATA_DRIVE_NEGEDGE, .ib2 = true, }, }; static inline struct versatile_panel * to_versatile_panel(struct drm_panel *panel) { return container_of(panel, struct versatile_panel, panel); } static int versatile_panel_disable(struct drm_panel *panel) { struct versatile_panel *vpanel = to_versatile_panel(panel); /* If we're on an IB2 daughterboard, turn off display */ if (vpanel->ib2_map) { dev_dbg(vpanel->dev, "disable IB2 display\n"); regmap_update_bits(vpanel->ib2_map, IB2_CTRL, IB2_CTRL_LCD_MASK, IB2_CTRL_LCD_SD); } return 0; } static int versatile_panel_enable(struct drm_panel *panel) { struct versatile_panel *vpanel = to_versatile_panel(panel); /* If we're on an IB2 daughterboard, turn on display */ if (vpanel->ib2_map) { dev_dbg(vpanel->dev, "enable IB2 display\n"); regmap_update_bits(vpanel->ib2_map, IB2_CTRL, IB2_CTRL_LCD_MASK, IB2_CTRL_LCD_BL_ON); } return 0; } static int versatile_panel_get_modes(struct drm_panel *panel, struct drm_connector *connector) { struct versatile_panel *vpanel = to_versatile_panel(panel); struct drm_display_mode *mode; connector->display_info.width_mm = vpanel->panel_type->width_mm; connector->display_info.height_mm = vpanel->panel_type->height_mm; connector->display_info.bus_flags = vpanel->panel_type->bus_flags; mode = drm_mode_duplicate(connector->dev, &vpanel->panel_type->mode); drm_mode_set_name(mode); mode->type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED; mode->width_mm = vpanel->panel_type->width_mm; mode->height_mm = vpanel->panel_type->height_mm; drm_mode_probed_add(connector, mode); return 1; } static const struct drm_panel_funcs versatile_panel_drm_funcs = { .disable = versatile_panel_disable, .enable = versatile_panel_enable, .get_modes = versatile_panel_get_modes, }; static int versatile_panel_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; struct versatile_panel *vpanel; struct device *parent; struct regmap *map; int ret; u32 val; int i; parent = dev->parent; if (!parent) { dev_err(dev, "no parent for versatile panel\n"); return -ENODEV; } map = syscon_node_to_regmap(parent->of_node); if (IS_ERR(map)) { dev_err(dev, "no regmap for versatile panel parent\n"); return PTR_ERR(map); } vpanel = devm_kzalloc(dev, sizeof(*vpanel), GFP_KERNEL); if (!vpanel) return -ENOMEM; ret = regmap_read(map, SYS_CLCD, &val); if (ret) { dev_err(dev, "cannot access syscon regs\n"); return ret; } val &= SYS_CLCD_CLCDID_MASK; for (i = 0; i < ARRAY_SIZE(versatile_panels); i++) { const struct versatile_panel_type *pt; pt = &versatile_panels[i]; if (pt->magic == val) { vpanel->panel_type = pt; break; } } /* No panel detected or VGA, let's leave this show */ if (i == ARRAY_SIZE(versatile_panels)) { dev_info(dev, "no panel detected\n"); return -ENODEV; } dev_info(dev, "detected: %s\n", vpanel->panel_type->name); vpanel->dev = dev; vpanel->map = map; /* Check if the panel is mounted on an IB2 daughterboard */ if (vpanel->panel_type->ib2) { vpanel->ib2_map = syscon_regmap_lookup_by_compatible( "arm,versatile-ib2-syscon"); if (IS_ERR(vpanel->ib2_map)) vpanel->ib2_map = NULL; else dev_info(dev, "panel mounted on IB2 daughterboard\n"); } drm_panel_init(&vpanel->panel, dev, &versatile_panel_drm_funcs, DRM_MODE_CONNECTOR_DPI); drm_panel_add(&vpanel->panel); return 0; } static const struct of_device_id versatile_panel_match[] = { { .compatible = "arm,versatile-tft-panel", }, {}, }; MODULE_DEVICE_TABLE(of, versatile_panel_match); static struct platform_driver versatile_panel_driver = { .probe = versatile_panel_probe, .driver = { .name = "versatile-tft-panel", .of_match_table = versatile_panel_match, }, }; module_platform_driver(versatile_panel_driver); MODULE_AUTHOR("Linus Walleij <[email protected]>"); MODULE_DESCRIPTION("ARM Versatile panel driver"); MODULE_LICENSE("GPL v2");
linux-master
drivers/gpu/drm/panel/panel-arm-versatile.c
// SPDX-License-Identifier: GPL-2.0 /* * Magnachip d53e6ea8966 MIPI-DSI panel driver * Copyright (C) 2023 Chris Morgan */ #include <drm/drm_mipi_dbi.h> #include <drm/drm_mipi_dsi.h> #include <drm/drm_modes.h> #include <drm/drm_of.h> #include <drm/drm_panel.h> #include <linux/backlight.h> #include <linux/delay.h> #include <linux/gpio/consumer.h> #include <linux/init.h> #include <linux/kernel.h> #include <linux/media-bus-format.h> #include <linux/module.h> #include <linux/of.h> #include <linux/regulator/consumer.h> #include <linux/spi/spi.h> #include <video/mipi_display.h> /* Forward declaration for use in backlight function */ struct d53e6ea8966; /* Panel info, unique to each panel */ struct d53e6ea8966_panel_info { /** @display_modes: the supported display modes */ const struct drm_display_mode *display_modes; /** @num_modes: the number of supported display modes */ unsigned int num_modes; /** @width_mm: panel width in mm */ u16 width_mm; /** @height_mm: panel height in mm */ u16 height_mm; /** @bus_flags: drm bus flags for panel */ u32 bus_flags; /** @panel_init_seq: panel specific init sequence */ void (*panel_init_seq)(struct d53e6ea8966 *db); /** @backlight_register: panel backlight registration or NULL */ int (*backlight_register)(struct d53e6ea8966 *db); }; struct d53e6ea8966 { /** @dev: the container device */ struct device *dev; /** @dbi: the DBI bus abstraction handle */ struct mipi_dbi dbi; /** @panel: the DRM panel instance for this device */ struct drm_panel panel; /** @reset: reset GPIO line */ struct gpio_desc *reset; /** @enable: enable GPIO line */ struct gpio_desc *enable; /** @reg_vdd: VDD supply regulator for panel logic */ struct regulator *reg_vdd; /** @reg_elvdd: ELVDD supply regulator for panel display */ struct regulator *reg_elvdd; /** @dsi_dev: DSI child device (panel) */ struct mipi_dsi_device *dsi_dev; /** @bl_dev: pseudo-backlight device for oled panel */ struct backlight_device *bl_dev; /** @panel_info: struct containing panel timing and info */ const struct d53e6ea8966_panel_info *panel_info; }; #define NUM_GAMMA_LEVELS 16 #define GAMMA_TABLE_COUNT 23 #define MAX_BRIGHTNESS (NUM_GAMMA_LEVELS - 1) #define MCS_ELVSS_ON 0xb1 #define MCS_TEMP_SWIRE 0xb2 #define MCS_PASSWORD_0 0xf0 #define MCS_PASSWORD_1 0xf1 #define MCS_ANALOG_PWR_CTL_0 0xf4 #define MCS_ANALOG_PWR_CTL_1 0xf5 #define MCS_GTCON_SET 0xf7 #define MCS_GATELESS_SIGNAL_SET 0xf8 #define MCS_SET_GAMMA 0xf9 static inline struct d53e6ea8966 *to_d53e6ea8966(struct drm_panel *panel) { return container_of(panel, struct d53e6ea8966, panel); } /* Table of gamma values provided in datasheet */ static u8 ams495qa01_gamma[NUM_GAMMA_LEVELS][GAMMA_TABLE_COUNT] = { {0x01, 0x79, 0x78, 0x8d, 0xd9, 0xdf, 0xd5, 0xcb, 0xcf, 0xc5, 0xe5, 0xe0, 0xe4, 0xdc, 0xb8, 0xd4, 0xfa, 0xed, 0xe6, 0x2f, 0x00, 0x2f}, {0x01, 0x7d, 0x7c, 0x92, 0xd7, 0xdd, 0xd2, 0xcb, 0xd0, 0xc6, 0xe5, 0xe1, 0xe3, 0xda, 0xbd, 0xd3, 0xfa, 0xed, 0xe6, 0x2f, 0x00, 0x2f}, {0x01, 0x7f, 0x7e, 0x95, 0xd7, 0xde, 0xd2, 0xcb, 0xcf, 0xc5, 0xe5, 0xe3, 0xe3, 0xda, 0xbf, 0xd3, 0xfa, 0xed, 0xe6, 0x2f, 0x00, 0x2f}, {0x01, 0x82, 0x81, 0x99, 0xd6, 0xdd, 0xd1, 0xca, 0xcf, 0xc3, 0xe4, 0xe3, 0xe3, 0xda, 0xc2, 0xd3, 0xfa, 0xed, 0xe6, 0x2f, 0x00, 0x2f}, {0x01, 0x84, 0x83, 0x9b, 0xd7, 0xde, 0xd2, 0xc8, 0xce, 0xc2, 0xe4, 0xe3, 0xe2, 0xd9, 0xc3, 0xd3, 0xfa, 0xed, 0xe6, 0x2f, 0x00, 0x2f}, {0x01, 0x87, 0x86, 0x9f, 0xd6, 0xdd, 0xd1, 0xc7, 0xce, 0xc1, 0xe4, 0xe3, 0xe2, 0xd9, 0xc6, 0xd3, 0xfa, 0xed, 0xe6, 0x2f, 0x00, 0x2f}, {0x01, 0x89, 0x89, 0xa2, 0xd5, 0xdb, 0xcf, 0xc8, 0xcf, 0xc2, 0xe3, 0xe3, 0xe1, 0xd9, 0xc7, 0xd3, 0xfa, 0xed, 0xe6, 0x2f, 0x00, 0x2f}, {0x01, 0x8b, 0x8b, 0xa5, 0xd5, 0xdb, 0xcf, 0xc7, 0xce, 0xc0, 0xe3, 0xe3, 0xe1, 0xd8, 0xc7, 0xd3, 0xfa, 0xed, 0xe6, 0x2f, 0x00, 0x2f}, {0x01, 0x8d, 0x8d, 0xa7, 0xd5, 0xdb, 0xcf, 0xc6, 0xce, 0xc0, 0xe4, 0xe4, 0xe1, 0xd7, 0xc8, 0xd3, 0xfa, 0xed, 0xe6, 0x2f, 0x00, 0x2f}, {0x01, 0x8f, 0x8f, 0xaa, 0xd4, 0xdb, 0xce, 0xc6, 0xcd, 0xbf, 0xe3, 0xe3, 0xe1, 0xd7, 0xca, 0xd3, 0xfa, 0xed, 0xe6, 0x2f, 0x00, 0x2f}, {0x01, 0x91, 0x91, 0xac, 0xd3, 0xda, 0xce, 0xc5, 0xcd, 0xbe, 0xe3, 0xe3, 0xe0, 0xd7, 0xca, 0xd3, 0xfa, 0xed, 0xe6, 0x2f, 0x00, 0x2f}, {0x01, 0x93, 0x93, 0xaf, 0xd3, 0xda, 0xcd, 0xc5, 0xcd, 0xbe, 0xe2, 0xe3, 0xdf, 0xd6, 0xca, 0xd3, 0xfa, 0xed, 0xe6, 0x2f, 0x00, 0x2f}, {0x01, 0x95, 0x95, 0xb1, 0xd2, 0xd9, 0xcc, 0xc4, 0xcd, 0xbe, 0xe2, 0xe3, 0xdf, 0xd7, 0xcc, 0xd3, 0xfa, 0xed, 0xe6, 0x2f, 0x00, 0x2f}, {0x01, 0x99, 0x99, 0xb6, 0xd1, 0xd9, 0xcc, 0xc3, 0xcb, 0xbc, 0xe2, 0xe4, 0xdf, 0xd6, 0xcc, 0xd3, 0xfa, 0xed, 0xe6, 0x2f, 0x00, 0x2f}, {0x01, 0x9c, 0x9c, 0xba, 0xd0, 0xd8, 0xcb, 0xc3, 0xcb, 0xbb, 0xe2, 0xe4, 0xdf, 0xd6, 0xce, 0xd3, 0xfa, 0xed, 0xe6, 0x2f, 0x00, 0x2f}, {0x01, 0x9f, 0x9f, 0xbe, 0xcf, 0xd7, 0xc9, 0xc2, 0xcb, 0xbb, 0xe1, 0xe3, 0xde, 0xd6, 0xd0, 0xd3, 0xfa, 0xed, 0xe6, 0x2f, 0x00, 0x2f}, }; /* * Table of elvss values provided in datasheet and corresponds to * gamma values. */ static u8 ams495qa01_elvss[NUM_GAMMA_LEVELS] = { 0x15, 0x15, 0x15, 0x15, 0x15, 0x15, 0x15, 0x15, 0x15, 0x15, 0x15, 0x15, 0x14, 0x14, 0x13, 0x12, }; static int ams495qa01_update_gamma(struct mipi_dbi *dbi, int brightness) { int tmp = brightness; mipi_dbi_command_buf(dbi, MCS_SET_GAMMA, ams495qa01_gamma[tmp], ARRAY_SIZE(ams495qa01_gamma[tmp])); mipi_dbi_command(dbi, MCS_SET_GAMMA, 0x00); /* Undocumented command */ mipi_dbi_command(dbi, 0x26, 0x00); mipi_dbi_command(dbi, MCS_TEMP_SWIRE, ams495qa01_elvss[tmp]); return 0; } static void ams495qa01_panel_init(struct d53e6ea8966 *db) { struct mipi_dbi *dbi = &db->dbi; mipi_dbi_command(dbi, MCS_PASSWORD_0, 0x5a, 0x5a); mipi_dbi_command(dbi, MCS_PASSWORD_1, 0x5a, 0x5a); /* Undocumented commands */ mipi_dbi_command(dbi, 0xb0, 0x02); mipi_dbi_command(dbi, 0xf3, 0x3b); mipi_dbi_command(dbi, MCS_ANALOG_PWR_CTL_0, 0x33, 0x42, 0x00, 0x08); mipi_dbi_command(dbi, MCS_ANALOG_PWR_CTL_1, 0x00, 0x06, 0x26, 0x35, 0x03); /* Undocumented commands */ mipi_dbi_command(dbi, 0xf6, 0x02); mipi_dbi_command(dbi, 0xc6, 0x0b, 0x00, 0x00, 0x3c, 0x00, 0x22, 0x00, 0x00, 0x00, 0x00); mipi_dbi_command(dbi, MCS_GTCON_SET, 0x20); mipi_dbi_command(dbi, MCS_TEMP_SWIRE, 0x06, 0x06, 0x06, 0x06); mipi_dbi_command(dbi, MCS_ELVSS_ON, 0x07, 0x00, 0x10); mipi_dbi_command(dbi, MCS_GATELESS_SIGNAL_SET, 0x7f, 0x7a, 0x89, 0x67, 0x26, 0x38, 0x00, 0x00, 0x09, 0x67, 0x70, 0x88, 0x7a, 0x76, 0x05, 0x09, 0x23, 0x23, 0x23); /* Undocumented commands */ mipi_dbi_command(dbi, 0xb5, 0xff, 0xef, 0x35, 0x42, 0x0d, 0xd7, 0xff, 0x07, 0xff, 0xff, 0xfd, 0x00, 0x01, 0xff, 0x05, 0x12, 0x0f, 0xff, 0xff, 0xff, 0xff); mipi_dbi_command(dbi, 0xb4, 0x15); mipi_dbi_command(dbi, 0xb3, 0x00); ams495qa01_update_gamma(dbi, MAX_BRIGHTNESS); } static int d53e6ea8966_prepare(struct drm_panel *panel) { struct d53e6ea8966 *db = to_d53e6ea8966(panel); int ret; /* Power up */ ret = regulator_enable(db->reg_vdd); if (ret) { dev_err(db->dev, "failed to enable vdd regulator: %d\n", ret); return ret; } if (db->reg_elvdd) { ret = regulator_enable(db->reg_elvdd); if (ret) { dev_err(db->dev, "failed to enable elvdd regulator: %d\n", ret); regulator_disable(db->reg_vdd); return ret; } } /* Enable */ if (db->enable) gpiod_set_value_cansleep(db->enable, 1); msleep(50); /* Reset */ gpiod_set_value_cansleep(db->reset, 1); usleep_range(1000, 5000); gpiod_set_value_cansleep(db->reset, 0); msleep(20); db->panel_info->panel_init_seq(db); return 0; } static int d53e6ea8966_enable(struct drm_panel *panel) { struct d53e6ea8966 *db = to_d53e6ea8966(panel); struct mipi_dbi *dbi = &db->dbi; mipi_dbi_command(dbi, MIPI_DCS_EXIT_SLEEP_MODE); msleep(200); mipi_dbi_command(dbi, MIPI_DCS_SET_DISPLAY_ON); usleep_range(10000, 15000); return 0; } static int d53e6ea8966_disable(struct drm_panel *panel) { struct d53e6ea8966 *db = to_d53e6ea8966(panel); struct mipi_dbi *dbi = &db->dbi; mipi_dbi_command(dbi, MIPI_DCS_SET_DISPLAY_OFF); msleep(20); mipi_dbi_command(dbi, MIPI_DCS_ENTER_SLEEP_MODE); msleep(100); return 0; } static int d53e6ea8966_unprepare(struct drm_panel *panel) { struct d53e6ea8966 *db = to_d53e6ea8966(panel); if (db->enable) gpiod_set_value_cansleep(db->enable, 0); gpiod_set_value_cansleep(db->reset, 1); if (db->reg_elvdd) regulator_disable(db->reg_elvdd); regulator_disable(db->reg_vdd); msleep(100); return 0; } static int d53e6ea8966_get_modes(struct drm_panel *panel, struct drm_connector *connector) { struct d53e6ea8966 *db = to_d53e6ea8966(panel); const struct d53e6ea8966_panel_info *panel_info = db->panel_info; struct drm_display_mode *mode; static const u32 bus_format = MEDIA_BUS_FMT_RGB888_1X24; unsigned int i; for (i = 0; i < panel_info->num_modes; i++) { mode = drm_mode_duplicate(connector->dev, &panel_info->display_modes[i]); if (!mode) return -ENOMEM; drm_mode_set_name(mode); drm_mode_probed_add(connector, mode); } connector->display_info.bpc = 8; connector->display_info.width_mm = panel_info->width_mm; connector->display_info.height_mm = panel_info->height_mm; connector->display_info.bus_flags = panel_info->bus_flags; drm_display_info_set_bus_formats(&connector->display_info, &bus_format, 1); return 1; } static const struct drm_panel_funcs d53e6ea8966_panel_funcs = { .disable = d53e6ea8966_disable, .enable = d53e6ea8966_enable, .get_modes = d53e6ea8966_get_modes, .prepare = d53e6ea8966_prepare, .unprepare = d53e6ea8966_unprepare, }; static int ams495qa01_set_brightness(struct backlight_device *bd) { struct d53e6ea8966 *db = bl_get_data(bd); struct mipi_dbi *dbi = &db->dbi; int brightness = backlight_get_brightness(bd); ams495qa01_update_gamma(dbi, brightness); return 0; } static const struct backlight_ops ams495qa01_backlight_ops = { .update_status = ams495qa01_set_brightness, }; static int ams495qa01_backlight_register(struct d53e6ea8966 *db) { struct backlight_properties props = { .type = BACKLIGHT_RAW, .brightness = MAX_BRIGHTNESS, .max_brightness = MAX_BRIGHTNESS, }; struct device *dev = db->dev; int ret = 0; db->bl_dev = devm_backlight_device_register(dev, "panel", dev, db, &ams495qa01_backlight_ops, &props); if (IS_ERR(db->bl_dev)) { ret = PTR_ERR(db->bl_dev); dev_err(dev, "error registering backlight device (%d)\n", ret); } return ret; } static int d53e6ea8966_probe(struct spi_device *spi) { struct device *dev = &spi->dev; struct mipi_dsi_host *dsi_host; struct d53e6ea8966 *db; int ret; struct mipi_dsi_device_info info = { .type = "d53e6ea8966", .channel = 0, .node = NULL, }; db = devm_kzalloc(dev, sizeof(*db), GFP_KERNEL); if (!db) return -ENOMEM; spi_set_drvdata(spi, db); db->dev = dev; db->panel_info = of_device_get_match_data(dev); if (!db->panel_info) return -EINVAL; db->reg_vdd = devm_regulator_get(dev, "vdd"); if (IS_ERR(db->reg_vdd)) return dev_err_probe(dev, PTR_ERR(db->reg_vdd), "Failed to get vdd supply\n"); db->reg_elvdd = devm_regulator_get_optional(dev, "elvdd"); if (IS_ERR(db->reg_elvdd)) db->reg_elvdd = NULL; db->reset = devm_gpiod_get(dev, "reset", GPIOD_OUT_HIGH); if (IS_ERR(db->reset)) { ret = PTR_ERR(db->reset); return dev_err_probe(dev, ret, "no RESET GPIO\n"); } db->enable = devm_gpiod_get_optional(dev, "enable", GPIOD_OUT_LOW); if (IS_ERR(db->enable)) { ret = PTR_ERR(db->enable); return dev_err_probe(dev, ret, "cannot get ENABLE GPIO\n"); } ret = mipi_dbi_spi_init(spi, &db->dbi, NULL); if (ret) return dev_err_probe(dev, ret, "MIPI DBI init failed\n"); dsi_host = drm_of_get_dsi_bus(dev); if (IS_ERR(dsi_host)) { ret = PTR_ERR(dsi_host); return dev_err_probe(dev, ret, "Error attaching DSI bus\n"); } db->dsi_dev = devm_mipi_dsi_device_register_full(dev, dsi_host, &info); if (IS_ERR(db->dsi_dev)) { dev_err(dev, "failed to register dsi device: %ld\n", PTR_ERR(db->dsi_dev)); return PTR_ERR(db->dsi_dev); } db->dsi_dev->lanes = 2; db->dsi_dev->format = MIPI_DSI_FMT_RGB888; db->dsi_dev->mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_BURST | MIPI_DSI_MODE_LPM | MIPI_DSI_MODE_NO_EOT_PACKET; drm_panel_init(&db->panel, dev, &d53e6ea8966_panel_funcs, DRM_MODE_CONNECTOR_DSI); if (db->panel_info->backlight_register) { ret = db->panel_info->backlight_register(db); if (ret < 0) return ret; db->panel.backlight = db->bl_dev; } drm_panel_add(&db->panel); ret = devm_mipi_dsi_attach(dev, db->dsi_dev); if (ret < 0) { dev_err(dev, "mipi_dsi_attach failed: %d\n", ret); drm_panel_remove(&db->panel); return ret; } return 0; } static void d53e6ea8966_remove(struct spi_device *spi) { struct d53e6ea8966 *db = spi_get_drvdata(spi); drm_panel_remove(&db->panel); } static const struct drm_display_mode ams495qa01_modes[] = { { /* 60hz */ .clock = 33500, .hdisplay = 960, .hsync_start = 960 + 10, .hsync_end = 960 + 10 + 2, .htotal = 960 + 10 + 2 + 10, .vdisplay = 544, .vsync_start = 544 + 10, .vsync_end = 544 + 10 + 2, .vtotal = 544 + 10 + 2 + 10, .flags = DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC, .type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED, }, { /* 50hz */ .clock = 27800, .hdisplay = 960, .hsync_start = 960 + 10, .hsync_end = 960 + 10 + 2, .htotal = 960 + 10 + 2 + 10, .vdisplay = 544, .vsync_start = 544 + 10, .vsync_end = 544 + 10 + 2, .vtotal = 544 + 10 + 2 + 10, .flags = DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC, .type = DRM_MODE_TYPE_DRIVER, }, }; static const struct d53e6ea8966_panel_info ams495qa01_info = { .display_modes = ams495qa01_modes, .num_modes = ARRAY_SIZE(ams495qa01_modes), .width_mm = 117, .height_mm = 74, .bus_flags = DRM_BUS_FLAG_DE_LOW | DRM_BUS_FLAG_PIXDATA_DRIVE_NEGEDGE, .panel_init_seq = ams495qa01_panel_init, .backlight_register = ams495qa01_backlight_register, }; static const struct of_device_id d53e6ea8966_match[] = { { .compatible = "samsung,ams495qa01", .data = &ams495qa01_info }, { /* sentinel */ }, }; MODULE_DEVICE_TABLE(of, d53e6ea8966_match); static const struct spi_device_id d53e6ea8966_ids[] = { { "ams495qa01", 0 }, { /* sentinel */ }, }; MODULE_DEVICE_TABLE(spi, d53e6ea8966_ids); static struct spi_driver d53e6ea8966_driver = { .driver = { .name = "d53e6ea8966-panel", .of_match_table = d53e6ea8966_match, }, .id_table = d53e6ea8966_ids, .probe = d53e6ea8966_probe, .remove = d53e6ea8966_remove, }; module_spi_driver(d53e6ea8966_driver); MODULE_AUTHOR("Chris Morgan <[email protected]>"); MODULE_DESCRIPTION("Magnachip d53e6ea8966 panel driver"); MODULE_LICENSE("GPL");
linux-master
drivers/gpu/drm/panel/panel-magnachip-d53e6ea8966.c
// SPDX-License-Identifier: GPL-2.0-only #include <linux/backlight.h> #include <linux/delay.h> #include <linux/gpio/consumer.h> #include <linux/module.h> #include <linux/of.h> #include <linux/regulator/consumer.h> #include <drm/drm_mipi_dsi.h> #include <drm/drm_modes.h> #include <drm/drm_panel.h> struct tm5p5_nt35596 { struct drm_panel panel; struct mipi_dsi_device *dsi; struct regulator_bulk_data supplies[2]; struct gpio_desc *reset_gpio; bool prepared; }; static inline struct tm5p5_nt35596 *to_tm5p5_nt35596(struct drm_panel *panel) { return container_of(panel, struct tm5p5_nt35596, panel); } static void tm5p5_nt35596_reset(struct tm5p5_nt35596 *ctx) { gpiod_set_value_cansleep(ctx->reset_gpio, 1); usleep_range(1000, 2000); gpiod_set_value_cansleep(ctx->reset_gpio, 0); usleep_range(1000, 2000); gpiod_set_value_cansleep(ctx->reset_gpio, 1); usleep_range(15000, 16000); } static int tm5p5_nt35596_on(struct tm5p5_nt35596 *ctx) { struct mipi_dsi_device *dsi = ctx->dsi; mipi_dsi_generic_write_seq(dsi, 0xff, 0x05); mipi_dsi_generic_write_seq(dsi, 0xfb, 0x01); mipi_dsi_generic_write_seq(dsi, 0xc5, 0x31); mipi_dsi_generic_write_seq(dsi, 0xff, 0x04); mipi_dsi_generic_write_seq(dsi, 0x01, 0x84); mipi_dsi_generic_write_seq(dsi, 0x05, 0x25); mipi_dsi_generic_write_seq(dsi, 0x06, 0x01); mipi_dsi_generic_write_seq(dsi, 0x07, 0x20); mipi_dsi_generic_write_seq(dsi, 0x08, 0x06); mipi_dsi_generic_write_seq(dsi, 0x09, 0x08); mipi_dsi_generic_write_seq(dsi, 0x0a, 0x10); mipi_dsi_generic_write_seq(dsi, 0x0b, 0x10); mipi_dsi_generic_write_seq(dsi, 0x0c, 0x10); mipi_dsi_generic_write_seq(dsi, 0x0d, 0x14); mipi_dsi_generic_write_seq(dsi, 0x0e, 0x14); mipi_dsi_generic_write_seq(dsi, 0x0f, 0x14); mipi_dsi_generic_write_seq(dsi, 0x10, 0x14); mipi_dsi_generic_write_seq(dsi, 0x11, 0x14); mipi_dsi_generic_write_seq(dsi, 0x12, 0x14); mipi_dsi_generic_write_seq(dsi, 0x17, 0xf3); mipi_dsi_generic_write_seq(dsi, 0x18, 0xc0); mipi_dsi_generic_write_seq(dsi, 0x19, 0xc0); mipi_dsi_generic_write_seq(dsi, 0x1a, 0xc0); mipi_dsi_generic_write_seq(dsi, 0x1b, 0xb3); mipi_dsi_generic_write_seq(dsi, 0x1c, 0xb3); mipi_dsi_generic_write_seq(dsi, 0x1d, 0xb3); mipi_dsi_generic_write_seq(dsi, 0x1e, 0xb3); mipi_dsi_generic_write_seq(dsi, 0x1f, 0xb3); mipi_dsi_generic_write_seq(dsi, 0x20, 0xb3); mipi_dsi_generic_write_seq(dsi, 0xfb, 0x01); mipi_dsi_generic_write_seq(dsi, 0xff, 0x00); mipi_dsi_generic_write_seq(dsi, 0xfb, 0x01); mipi_dsi_generic_write_seq(dsi, 0x35, 0x01); mipi_dsi_generic_write_seq(dsi, 0xd3, 0x06); mipi_dsi_generic_write_seq(dsi, 0xd4, 0x04); mipi_dsi_generic_write_seq(dsi, 0x5e, 0x0d); mipi_dsi_generic_write_seq(dsi, 0x11, 0x00); msleep(100); mipi_dsi_generic_write_seq(dsi, 0x29, 0x00); mipi_dsi_generic_write_seq(dsi, 0x53, 0x24); return 0; } static int tm5p5_nt35596_off(struct tm5p5_nt35596 *ctx) { struct mipi_dsi_device *dsi = ctx->dsi; struct device *dev = &dsi->dev; int ret; ret = mipi_dsi_dcs_set_display_off(dsi); if (ret < 0) { dev_err(dev, "Failed to set display off: %d\n", ret); return ret; } msleep(60); ret = mipi_dsi_dcs_enter_sleep_mode(dsi); if (ret < 0) { dev_err(dev, "Failed to enter sleep mode: %d\n", ret); return ret; } mipi_dsi_dcs_write_seq(dsi, 0x4f, 0x01); return 0; } static int tm5p5_nt35596_prepare(struct drm_panel *panel) { struct tm5p5_nt35596 *ctx = to_tm5p5_nt35596(panel); struct device *dev = &ctx->dsi->dev; int ret; if (ctx->prepared) return 0; ret = regulator_bulk_enable(ARRAY_SIZE(ctx->supplies), ctx->supplies); if (ret < 0) { dev_err(dev, "Failed to enable regulators: %d\n", ret); return ret; } tm5p5_nt35596_reset(ctx); ret = tm5p5_nt35596_on(ctx); if (ret < 0) { dev_err(dev, "Failed to initialize panel: %d\n", ret); gpiod_set_value_cansleep(ctx->reset_gpio, 0); regulator_bulk_disable(ARRAY_SIZE(ctx->supplies), ctx->supplies); return ret; } ctx->prepared = true; return 0; } static int tm5p5_nt35596_unprepare(struct drm_panel *panel) { struct tm5p5_nt35596 *ctx = to_tm5p5_nt35596(panel); struct device *dev = &ctx->dsi->dev; int ret; if (!ctx->prepared) return 0; ret = tm5p5_nt35596_off(ctx); if (ret < 0) dev_err(dev, "Failed to un-initialize panel: %d\n", ret); gpiod_set_value_cansleep(ctx->reset_gpio, 0); regulator_bulk_disable(ARRAY_SIZE(ctx->supplies), ctx->supplies); ctx->prepared = false; return 0; } static const struct drm_display_mode tm5p5_nt35596_mode = { .clock = (1080 + 100 + 8 + 16) * (1920 + 4 + 2 + 4) * 60 / 1000, .hdisplay = 1080, .hsync_start = 1080 + 100, .hsync_end = 1080 + 100 + 8, .htotal = 1080 + 100 + 8 + 16, .vdisplay = 1920, .vsync_start = 1920 + 4, .vsync_end = 1920 + 4 + 2, .vtotal = 1920 + 4 + 2 + 4, .width_mm = 68, .height_mm = 121, }; static int tm5p5_nt35596_get_modes(struct drm_panel *panel, struct drm_connector *connector) { struct drm_display_mode *mode; mode = drm_mode_duplicate(connector->dev, &tm5p5_nt35596_mode); if (!mode) return -ENOMEM; drm_mode_set_name(mode); mode->type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED; connector->display_info.width_mm = mode->width_mm; connector->display_info.height_mm = mode->height_mm; drm_mode_probed_add(connector, mode); return 1; } static const struct drm_panel_funcs tm5p5_nt35596_panel_funcs = { .prepare = tm5p5_nt35596_prepare, .unprepare = tm5p5_nt35596_unprepare, .get_modes = tm5p5_nt35596_get_modes, }; static int tm5p5_nt35596_bl_update_status(struct backlight_device *bl) { struct mipi_dsi_device *dsi = bl_get_data(bl); u16 brightness = backlight_get_brightness(bl); int ret; dsi->mode_flags &= ~MIPI_DSI_MODE_LPM; ret = mipi_dsi_dcs_set_display_brightness(dsi, brightness); if (ret < 0) return ret; dsi->mode_flags |= MIPI_DSI_MODE_LPM; return 0; } static int tm5p5_nt35596_bl_get_brightness(struct backlight_device *bl) { struct mipi_dsi_device *dsi = bl_get_data(bl); u16 brightness = bl->props.brightness; int ret; dsi->mode_flags &= ~MIPI_DSI_MODE_LPM; ret = mipi_dsi_dcs_get_display_brightness(dsi, &brightness); if (ret < 0) return ret; dsi->mode_flags |= MIPI_DSI_MODE_LPM; return brightness & 0xff; } static const struct backlight_ops tm5p5_nt35596_bl_ops = { .update_status = tm5p5_nt35596_bl_update_status, .get_brightness = tm5p5_nt35596_bl_get_brightness, }; static struct backlight_device * tm5p5_nt35596_create_backlight(struct mipi_dsi_device *dsi) { struct device *dev = &dsi->dev; const struct backlight_properties props = { .type = BACKLIGHT_RAW, .brightness = 255, .max_brightness = 255, }; return devm_backlight_device_register(dev, dev_name(dev), dev, dsi, &tm5p5_nt35596_bl_ops, &props); } static int tm5p5_nt35596_probe(struct mipi_dsi_device *dsi) { struct device *dev = &dsi->dev; struct tm5p5_nt35596 *ctx; int ret; ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL); if (!ctx) return -ENOMEM; ctx->supplies[0].supply = "vdd"; ctx->supplies[1].supply = "vddio"; ret = devm_regulator_bulk_get(dev, ARRAY_SIZE(ctx->supplies), ctx->supplies); if (ret < 0) { dev_err(dev, "Failed to get regulators: %d\n", ret); return ret; } ctx->reset_gpio = devm_gpiod_get(dev, "reset", GPIOD_OUT_LOW); if (IS_ERR(ctx->reset_gpio)) { ret = PTR_ERR(ctx->reset_gpio); dev_err(dev, "Failed to get reset-gpios: %d\n", ret); return ret; } ctx->dsi = dsi; mipi_dsi_set_drvdata(dsi, ctx); dsi->lanes = 4; dsi->format = MIPI_DSI_FMT_RGB888; dsi->mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_BURST | MIPI_DSI_MODE_VIDEO_HSE | MIPI_DSI_MODE_NO_EOT_PACKET | MIPI_DSI_CLOCK_NON_CONTINUOUS | MIPI_DSI_MODE_LPM; drm_panel_init(&ctx->panel, dev, &tm5p5_nt35596_panel_funcs, DRM_MODE_CONNECTOR_DSI); ctx->panel.backlight = tm5p5_nt35596_create_backlight(dsi); if (IS_ERR(ctx->panel.backlight)) { ret = PTR_ERR(ctx->panel.backlight); dev_err(dev, "Failed to create backlight: %d\n", ret); return ret; } drm_panel_add(&ctx->panel); ret = mipi_dsi_attach(dsi); if (ret < 0) { dev_err(dev, "Failed to attach to DSI host: %d\n", ret); return ret; } return 0; } static void tm5p5_nt35596_remove(struct mipi_dsi_device *dsi) { struct tm5p5_nt35596 *ctx = mipi_dsi_get_drvdata(dsi); int ret; ret = mipi_dsi_detach(dsi); if (ret < 0) dev_err(&dsi->dev, "Failed to detach from DSI host: %d\n", ret); drm_panel_remove(&ctx->panel); } static const struct of_device_id tm5p5_nt35596_of_match[] = { { .compatible = "asus,z00t-tm5p5-n35596" }, { /* sentinel */ } }; MODULE_DEVICE_TABLE(of, tm5p5_nt35596_of_match); static struct mipi_dsi_driver tm5p5_nt35596_driver = { .probe = tm5p5_nt35596_probe, .remove = tm5p5_nt35596_remove, .driver = { .name = "panel-tm5p5-nt35596", .of_match_table = tm5p5_nt35596_of_match, }, }; module_mipi_dsi_driver(tm5p5_nt35596_driver); MODULE_AUTHOR("Konrad Dybcio <[email protected]>"); MODULE_DESCRIPTION("DRM driver for tm5p5 nt35596 1080p video mode dsi panel"); MODULE_LICENSE("GPL v2");
linux-master
drivers/gpu/drm/panel/panel-asus-z00t-tm5p5-n35596.c
// SPDX-License-Identifier: GPL-2.0 /* * Asia Better Technology Ltd. Y030XX067A IPS LCD panel driver * * Copyright (C) 2020, Paul Cercueil <[email protected]> * Copyright (C) 2020, Christophe Branchereau <[email protected]> */ #include <linux/delay.h> #include <linux/device.h> #include <linux/gpio/consumer.h> #include <linux/media-bus-format.h> #include <linux/module.h> #include <linux/of.h> #include <linux/platform_device.h> #include <linux/regmap.h> #include <linux/regulator/consumer.h> #include <linux/spi/spi.h> #include <drm/drm_modes.h> #include <drm/drm_panel.h> #define REG00_VBRT_CTRL(val) (val) #define REG01_COM_DC(val) (val) #define REG02_DA_CONTRAST(val) (val) #define REG02_VESA_SEL(val) ((val) << 5) #define REG02_COMDC_SW BIT(7) #define REG03_VPOSITION(val) (val) #define REG03_BSMOUNT BIT(5) #define REG03_COMTST BIT(6) #define REG03_HPOSITION1 BIT(7) #define REG04_HPOSITION1(val) (val) #define REG05_CLIP BIT(0) #define REG05_NVM_VREFRESH BIT(1) #define REG05_SLFR BIT(2) #define REG05_SLBRCHARGE(val) ((val) << 3) #define REG05_PRECHARGE_LEVEL(val) ((val) << 6) #define REG06_TEST5 BIT(0) #define REG06_SLDWN BIT(1) #define REG06_SLRGT BIT(2) #define REG06_TEST2 BIT(3) #define REG06_XPSAVE BIT(4) #define REG06_GAMMA_SEL(val) ((val) << 5) #define REG06_NT BIT(7) #define REG07_TEST1 BIT(0) #define REG07_HDVD_POL BIT(1) #define REG07_CK_POL BIT(2) #define REG07_TEST3 BIT(3) #define REG07_TEST4 BIT(4) #define REG07_480_LINEMASK BIT(5) #define REG07_AMPTST(val) ((val) << 6) #define REG08_SLHRC(val) (val) #define REG08_CLOCK_DIV(val) ((val) << 2) #define REG08_PANEL(val) ((val) << 5) #define REG09_SUB_BRIGHT_R(val) (val) #define REG09_NW_NB BIT(6) #define REG09_IPCON BIT(7) #define REG0A_SUB_BRIGHT_B(val) (val) #define REG0A_PAIR BIT(6) #define REG0A_DE_SEL BIT(7) #define REG0B_MBK_POSITION(val) (val) #define REG0B_HD_FREERUN BIT(4) #define REG0B_VD_FREERUN BIT(5) #define REG0B_YUV2BIN(val) ((val) << 6) #define REG0C_CONTRAST_R(val) (val) #define REG0C_DOUBLEREAD BIT(7) #define REG0D_CONTRAST_G(val) (val) #define REG0D_RGB_YUV BIT(7) #define REG0E_CONTRAST_B(val) (val) #define REG0E_PIXELCOLORDRIVE BIT(7) #define REG0F_ASPECT BIT(0) #define REG0F_OVERSCAN(val) ((val) << 1) #define REG0F_FRAMEWIDTH(val) ((val) << 3) #define REG10_BRIGHT(val) (val) #define REG11_SIG_GAIN(val) (val) #define REG11_SIGC_CNTL BIT(6) #define REG11_SIGC_POL BIT(7) #define REG12_COLOR(val) (val) #define REG12_PWCKSEL(val) ((val) << 6) #define REG13_4096LEVEL_CNTL(val) (val) #define REG13_SL4096(val) ((val) << 4) #define REG13_LIMITER_CONTROL BIT(7) #define REG14_PANEL_TEST(val) (val) #define REG15_NVM_LINK0 BIT(0) #define REG15_NVM_LINK1 BIT(1) #define REG15_NVM_LINK2 BIT(2) #define REG15_NVM_LINK3 BIT(3) #define REG15_NVM_LINK4 BIT(4) #define REG15_NVM_LINK5 BIT(5) #define REG15_NVM_LINK6 BIT(6) #define REG15_NVM_LINK7 BIT(7) struct y030xx067a_info { const struct drm_display_mode *display_modes; unsigned int num_modes; u16 width_mm, height_mm; u32 bus_format, bus_flags; }; struct y030xx067a { struct drm_panel panel; struct spi_device *spi; struct regmap *map; const struct y030xx067a_info *panel_info; struct regulator *supply; struct gpio_desc *reset_gpio; }; static inline struct y030xx067a *to_y030xx067a(struct drm_panel *panel) { return container_of(panel, struct y030xx067a, panel); } static const struct reg_sequence y030xx067a_init_sequence[] = { { 0x00, REG00_VBRT_CTRL(0x7f) }, { 0x01, REG01_COM_DC(0x3c) }, { 0x02, REG02_VESA_SEL(0x3) | REG02_DA_CONTRAST(0x1f) }, { 0x03, REG03_VPOSITION(0x0a) }, { 0x04, REG04_HPOSITION1(0xd2) }, { 0x05, REG05_CLIP | REG05_NVM_VREFRESH | REG05_SLBRCHARGE(0x2) }, { 0x06, REG06_NT }, { 0x07, 0 }, { 0x08, REG08_PANEL(0x1) | REG08_CLOCK_DIV(0x2) }, { 0x09, REG09_SUB_BRIGHT_R(0x20) }, { 0x0a, REG0A_SUB_BRIGHT_B(0x20) }, { 0x0b, REG0B_HD_FREERUN | REG0B_VD_FREERUN }, { 0x0c, REG0C_CONTRAST_R(0x00) }, { 0x0d, REG0D_CONTRAST_G(0x00) }, { 0x0e, REG0E_CONTRAST_B(0x10) }, { 0x0f, 0 }, { 0x10, REG10_BRIGHT(0x7f) }, { 0x11, REG11_SIGC_CNTL | REG11_SIG_GAIN(0x3f) }, { 0x12, REG12_COLOR(0x20) | REG12_PWCKSEL(0x1) }, { 0x13, REG13_4096LEVEL_CNTL(0x8) }, { 0x14, 0 }, { 0x15, 0 }, }; static int y030xx067a_prepare(struct drm_panel *panel) { struct y030xx067a *priv = to_y030xx067a(panel); struct device *dev = &priv->spi->dev; int err; err = regulator_enable(priv->supply); if (err) { dev_err(dev, "Failed to enable power supply: %d\n", err); return err; } /* Reset the chip */ gpiod_set_value_cansleep(priv->reset_gpio, 1); usleep_range(1000, 20000); gpiod_set_value_cansleep(priv->reset_gpio, 0); usleep_range(1000, 20000); err = regmap_multi_reg_write(priv->map, y030xx067a_init_sequence, ARRAY_SIZE(y030xx067a_init_sequence)); if (err) { dev_err(dev, "Failed to init registers: %d\n", err); goto err_disable_regulator; } return 0; err_disable_regulator: regulator_disable(priv->supply); return err; } static int y030xx067a_unprepare(struct drm_panel *panel) { struct y030xx067a *priv = to_y030xx067a(panel); gpiod_set_value_cansleep(priv->reset_gpio, 1); regulator_disable(priv->supply); return 0; } static int y030xx067a_enable(struct drm_panel *panel) { struct y030xx067a *priv = to_y030xx067a(panel); regmap_set_bits(priv->map, 0x06, REG06_XPSAVE); if (panel->backlight) { /* Wait for the picture to be ready before enabling backlight */ msleep(120); } return 0; } static int y030xx067a_disable(struct drm_panel *panel) { struct y030xx067a *priv = to_y030xx067a(panel); regmap_clear_bits(priv->map, 0x06, REG06_XPSAVE); return 0; } static int y030xx067a_get_modes(struct drm_panel *panel, struct drm_connector *connector) { struct y030xx067a *priv = to_y030xx067a(panel); const struct y030xx067a_info *panel_info = priv->panel_info; struct drm_display_mode *mode; unsigned int i; for (i = 0; i < panel_info->num_modes; i++) { mode = drm_mode_duplicate(connector->dev, &panel_info->display_modes[i]); if (!mode) return -ENOMEM; drm_mode_set_name(mode); mode->type = DRM_MODE_TYPE_DRIVER; if (panel_info->num_modes == 1) mode->type |= DRM_MODE_TYPE_PREFERRED; drm_mode_probed_add(connector, mode); } connector->display_info.bpc = 8; connector->display_info.width_mm = panel_info->width_mm; connector->display_info.height_mm = panel_info->height_mm; drm_display_info_set_bus_formats(&connector->display_info, &panel_info->bus_format, 1); connector->display_info.bus_flags = panel_info->bus_flags; return panel_info->num_modes; } static const struct drm_panel_funcs y030xx067a_funcs = { .prepare = y030xx067a_prepare, .unprepare = y030xx067a_unprepare, .enable = y030xx067a_enable, .disable = y030xx067a_disable, .get_modes = y030xx067a_get_modes, }; static const struct regmap_config y030xx067a_regmap_config = { .reg_bits = 8, .val_bits = 8, .max_register = 0x15, .cache_type = REGCACHE_FLAT, }; static int y030xx067a_probe(struct spi_device *spi) { struct device *dev = &spi->dev; struct y030xx067a *priv; int err; priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL); if (!priv) return -ENOMEM; priv->spi = spi; spi_set_drvdata(spi, priv); priv->map = devm_regmap_init_spi(spi, &y030xx067a_regmap_config); if (IS_ERR(priv->map)) { dev_err(dev, "Unable to init regmap\n"); return PTR_ERR(priv->map); } priv->panel_info = of_device_get_match_data(dev); if (!priv->panel_info) return -EINVAL; priv->supply = devm_regulator_get(dev, "power"); if (IS_ERR(priv->supply)) return dev_err_probe(dev, PTR_ERR(priv->supply), "Failed to get power supply\n"); priv->reset_gpio = devm_gpiod_get(dev, "reset", GPIOD_OUT_HIGH); if (IS_ERR(priv->reset_gpio)) return dev_err_probe(dev, PTR_ERR(priv->reset_gpio), "Failed to get reset GPIO\n"); drm_panel_init(&priv->panel, dev, &y030xx067a_funcs, DRM_MODE_CONNECTOR_DPI); err = drm_panel_of_backlight(&priv->panel); if (err) return err; drm_panel_add(&priv->panel); return 0; } static void y030xx067a_remove(struct spi_device *spi) { struct y030xx067a *priv = spi_get_drvdata(spi); drm_panel_remove(&priv->panel); drm_panel_disable(&priv->panel); drm_panel_unprepare(&priv->panel); } static const struct drm_display_mode y030xx067a_modes[] = { { /* 60 Hz */ .clock = 14400, .hdisplay = 320, .hsync_start = 320 + 10, .hsync_end = 320 + 10 + 37, .htotal = 320 + 10 + 37 + 33, .vdisplay = 480, .vsync_start = 480 + 84, .vsync_end = 480 + 84 + 20, .vtotal = 480 + 84 + 20 + 16, .flags = DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC, }, { /* 50 Hz */ .clock = 12000, .hdisplay = 320, .hsync_start = 320 + 10, .hsync_end = 320 + 10 + 37, .htotal = 320 + 10 + 37 + 33, .vdisplay = 480, .vsync_start = 480 + 84, .vsync_end = 480 + 84 + 20, .vtotal = 480 + 84 + 20 + 16, .flags = DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC, }, }; static const struct y030xx067a_info y030xx067a_info = { .display_modes = y030xx067a_modes, .num_modes = ARRAY_SIZE(y030xx067a_modes), .width_mm = 69, .height_mm = 51, .bus_format = MEDIA_BUS_FMT_RGB888_3X8_DELTA, .bus_flags = DRM_BUS_FLAG_PIXDATA_SAMPLE_POSEDGE | DRM_BUS_FLAG_DE_LOW, }; static const struct of_device_id y030xx067a_of_match[] = { { .compatible = "abt,y030xx067a", .data = &y030xx067a_info }, { /* sentinel */ } }; MODULE_DEVICE_TABLE(of, y030xx067a_of_match); static struct spi_driver y030xx067a_driver = { .driver = { .name = "abt-y030xx067a", .of_match_table = y030xx067a_of_match, }, .probe = y030xx067a_probe, .remove = y030xx067a_remove, }; module_spi_driver(y030xx067a_driver); MODULE_AUTHOR("Paul Cercueil <[email protected]>"); MODULE_AUTHOR("Christophe Branchereau <[email protected]>"); MODULE_LICENSE("GPL v2");
linux-master
drivers/gpu/drm/panel/panel-abt-y030xx067a.c
// SPDX-License-Identifier: GPL-2.0+ /* * Copyright (C) 2018-2019, Bridge Systems BV * Copyright (C) 2018-2019, Bootlin * Copyright (C) 2017, Free Electrons * * This file based on panel-ilitek-ili9881c.c */ #include <linux/delay.h> #include <linux/device.h> #include <linux/err.h> #include <linux/errno.h> #include <linux/kernel.h> #include <linux/media-bus-format.h> #include <linux/module.h> #include <linux/of.h> #include <linux/gpio/consumer.h> #include <linux/regulator/consumer.h> #include <drm/drm_connector.h> #include <drm/drm_mipi_dsi.h> #include <drm/drm_modes.h> #include <drm/drm_panel.h> struct rb070d30_panel { struct drm_panel panel; struct mipi_dsi_device *dsi; struct regulator *supply; struct { struct gpio_desc *power; struct gpio_desc *reset; struct gpio_desc *updn; struct gpio_desc *shlr; } gpios; }; static inline struct rb070d30_panel *panel_to_rb070d30_panel(struct drm_panel *panel) { return container_of(panel, struct rb070d30_panel, panel); } static int rb070d30_panel_prepare(struct drm_panel *panel) { struct rb070d30_panel *ctx = panel_to_rb070d30_panel(panel); int ret; ret = regulator_enable(ctx->supply); if (ret < 0) { dev_err(&ctx->dsi->dev, "Failed to enable supply: %d\n", ret); return ret; } msleep(20); gpiod_set_value(ctx->gpios.power, 1); msleep(20); gpiod_set_value(ctx->gpios.reset, 1); msleep(20); return 0; } static int rb070d30_panel_unprepare(struct drm_panel *panel) { struct rb070d30_panel *ctx = panel_to_rb070d30_panel(panel); gpiod_set_value(ctx->gpios.reset, 0); gpiod_set_value(ctx->gpios.power, 0); regulator_disable(ctx->supply); return 0; } static int rb070d30_panel_enable(struct drm_panel *panel) { struct rb070d30_panel *ctx = panel_to_rb070d30_panel(panel); return mipi_dsi_dcs_exit_sleep_mode(ctx->dsi); } static int rb070d30_panel_disable(struct drm_panel *panel) { struct rb070d30_panel *ctx = panel_to_rb070d30_panel(panel); return mipi_dsi_dcs_enter_sleep_mode(ctx->dsi); } /* Default timings */ static const struct drm_display_mode default_mode = { .clock = 51206, .hdisplay = 1024, .hsync_start = 1024 + 160, .hsync_end = 1024 + 160 + 80, .htotal = 1024 + 160 + 80 + 80, .vdisplay = 600, .vsync_start = 600 + 12, .vsync_end = 600 + 12 + 10, .vtotal = 600 + 12 + 10 + 13, .width_mm = 154, .height_mm = 85, }; static int rb070d30_panel_get_modes(struct drm_panel *panel, struct drm_connector *connector) { struct rb070d30_panel *ctx = panel_to_rb070d30_panel(panel); struct drm_display_mode *mode; static const u32 bus_format = MEDIA_BUS_FMT_RGB888_1X24; mode = drm_mode_duplicate(connector->dev, &default_mode); if (!mode) { dev_err(&ctx->dsi->dev, "Failed to add mode " DRM_MODE_FMT "\n", DRM_MODE_ARG(&default_mode)); return -EINVAL; } drm_mode_set_name(mode); mode->type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED; drm_mode_probed_add(connector, mode); connector->display_info.bpc = 8; connector->display_info.width_mm = mode->width_mm; connector->display_info.height_mm = mode->height_mm; drm_display_info_set_bus_formats(&connector->display_info, &bus_format, 1); return 1; } static const struct drm_panel_funcs rb070d30_panel_funcs = { .get_modes = rb070d30_panel_get_modes, .prepare = rb070d30_panel_prepare, .enable = rb070d30_panel_enable, .disable = rb070d30_panel_disable, .unprepare = rb070d30_panel_unprepare, }; static int rb070d30_panel_dsi_probe(struct mipi_dsi_device *dsi) { struct rb070d30_panel *ctx; int ret; ctx = devm_kzalloc(&dsi->dev, sizeof(*ctx), GFP_KERNEL); if (!ctx) return -ENOMEM; ctx->supply = devm_regulator_get(&dsi->dev, "vcc-lcd"); if (IS_ERR(ctx->supply)) return PTR_ERR(ctx->supply); mipi_dsi_set_drvdata(dsi, ctx); ctx->dsi = dsi; drm_panel_init(&ctx->panel, &dsi->dev, &rb070d30_panel_funcs, DRM_MODE_CONNECTOR_DSI); ctx->gpios.reset = devm_gpiod_get(&dsi->dev, "reset", GPIOD_OUT_LOW); if (IS_ERR(ctx->gpios.reset)) { dev_err(&dsi->dev, "Couldn't get our reset GPIO\n"); return PTR_ERR(ctx->gpios.reset); } ctx->gpios.power = devm_gpiod_get(&dsi->dev, "power", GPIOD_OUT_LOW); if (IS_ERR(ctx->gpios.power)) { dev_err(&dsi->dev, "Couldn't get our power GPIO\n"); return PTR_ERR(ctx->gpios.power); } /* * We don't change the state of that GPIO later on but we need * to force it into a low state. */ ctx->gpios.updn = devm_gpiod_get(&dsi->dev, "updn", GPIOD_OUT_LOW); if (IS_ERR(ctx->gpios.updn)) { dev_err(&dsi->dev, "Couldn't get our updn GPIO\n"); return PTR_ERR(ctx->gpios.updn); } /* * We don't change the state of that GPIO later on but we need * to force it into a low state. */ ctx->gpios.shlr = devm_gpiod_get(&dsi->dev, "shlr", GPIOD_OUT_LOW); if (IS_ERR(ctx->gpios.shlr)) { dev_err(&dsi->dev, "Couldn't get our shlr GPIO\n"); return PTR_ERR(ctx->gpios.shlr); } ret = drm_panel_of_backlight(&ctx->panel); if (ret) return ret; drm_panel_add(&ctx->panel); dsi->mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_BURST | MIPI_DSI_MODE_LPM; dsi->format = MIPI_DSI_FMT_RGB888; dsi->lanes = 4; ret = mipi_dsi_attach(dsi); if (ret < 0) { drm_panel_remove(&ctx->panel); return ret; } return 0; } static void rb070d30_panel_dsi_remove(struct mipi_dsi_device *dsi) { struct rb070d30_panel *ctx = mipi_dsi_get_drvdata(dsi); mipi_dsi_detach(dsi); drm_panel_remove(&ctx->panel); } static const struct of_device_id rb070d30_panel_of_match[] = { { .compatible = "ronbo,rb070d30" }, { /* sentinel */ }, }; MODULE_DEVICE_TABLE(of, rb070d30_panel_of_match); static struct mipi_dsi_driver rb070d30_panel_driver = { .probe = rb070d30_panel_dsi_probe, .remove = rb070d30_panel_dsi_remove, .driver = { .name = "panel-ronbo-rb070d30", .of_match_table = rb070d30_panel_of_match, }, }; module_mipi_dsi_driver(rb070d30_panel_driver); MODULE_AUTHOR("Boris Brezillon <[email protected]>"); MODULE_AUTHOR("Konstantin Sudakov <[email protected]>"); MODULE_DESCRIPTION("Ronbo RB070D30 Panel Driver"); MODULE_LICENSE("GPL");
linux-master
drivers/gpu/drm/panel/panel-ronbo-rb070d30.c
// SPDX-License-Identifier: GPL-2.0+ /* * MIPI-DSI Novatek NT35560-based panel controller. * * Supported panels include: * Sony ACX424AKM - a 480x854 AMOLED DSI panel * Sony ACX424AKP - a 480x864 AMOLED DSI panel * * Copyright (C) Linaro Ltd. 2019-2021 * Author: Linus Walleij * Based on code and know-how from Marcus Lorentzon * Copyright (C) ST-Ericsson SA 2010 * Based on code and know-how from Johan Olson and Joakim Wesslen * Copyright (C) Sony Ericsson Mobile Communications 2010 */ #include <linux/backlight.h> #include <linux/delay.h> #include <linux/gpio/consumer.h> #include <linux/module.h> #include <linux/of.h> #include <linux/regulator/consumer.h> #include <video/mipi_display.h> #include <drm/drm_mipi_dsi.h> #include <drm/drm_modes.h> #include <drm/drm_panel.h> #define NT35560_DCS_READ_ID1 0xDA #define NT35560_DCS_READ_ID2 0xDB #define NT35560_DCS_READ_ID3 0xDC #define NT35560_DCS_SET_MDDI 0xAE /* * Sony seems to use vendor ID 0x81 */ #define DISPLAY_SONY_ACX424AKP_ID1 0x8103 #define DISPLAY_SONY_ACX424AKP_ID2 0x811a #define DISPLAY_SONY_ACX424AKP_ID3 0x811b /* * The fourth ID looks like a bug, vendor IDs begin at 0x80 * and panel 00 ... seems like default values. */ #define DISPLAY_SONY_ACX424AKP_ID4 0x8000 struct nt35560_config { const struct drm_display_mode *vid_mode; const struct drm_display_mode *cmd_mode; }; struct nt35560 { const struct nt35560_config *conf; struct drm_panel panel; struct device *dev; struct regulator *supply; struct gpio_desc *reset_gpio; bool video_mode; }; static const struct drm_display_mode sony_acx424akp_vid_mode = { .clock = 27234, .hdisplay = 480, .hsync_start = 480 + 15, .hsync_end = 480 + 15 + 0, .htotal = 480 + 15 + 0 + 15, .vdisplay = 864, .vsync_start = 864 + 14, .vsync_end = 864 + 14 + 1, .vtotal = 864 + 14 + 1 + 11, .width_mm = 48, .height_mm = 84, .flags = DRM_MODE_FLAG_PVSYNC, }; /* * The timings are not very helpful as the display is used in * command mode using the maximum HS frequency. */ static const struct drm_display_mode sony_acx424akp_cmd_mode = { .clock = 35478, .hdisplay = 480, .hsync_start = 480 + 154, .hsync_end = 480 + 154 + 16, .htotal = 480 + 154 + 16 + 32, .vdisplay = 864, .vsync_start = 864 + 1, .vsync_end = 864 + 1 + 1, .vtotal = 864 + 1 + 1 + 1, /* * Some desired refresh rate, experiments at the maximum "pixel" * clock speed (HS clock 420 MHz) yields around 117Hz. */ .width_mm = 48, .height_mm = 84, }; static const struct nt35560_config sony_acx424akp_data = { .vid_mode = &sony_acx424akp_vid_mode, .cmd_mode = &sony_acx424akp_cmd_mode, }; static const struct drm_display_mode sony_acx424akm_vid_mode = { .clock = 27234, .hdisplay = 480, .hsync_start = 480 + 15, .hsync_end = 480 + 15 + 0, .htotal = 480 + 15 + 0 + 15, .vdisplay = 854, .vsync_start = 854 + 14, .vsync_end = 854 + 14 + 1, .vtotal = 854 + 14 + 1 + 11, .width_mm = 46, .height_mm = 82, .flags = DRM_MODE_FLAG_PVSYNC, }; /* * The timings are not very helpful as the display is used in * command mode using the maximum HS frequency. */ static const struct drm_display_mode sony_acx424akm_cmd_mode = { .clock = 35478, .hdisplay = 480, .hsync_start = 480 + 154, .hsync_end = 480 + 154 + 16, .htotal = 480 + 154 + 16 + 32, .vdisplay = 854, .vsync_start = 854 + 1, .vsync_end = 854 + 1 + 1, .vtotal = 854 + 1 + 1 + 1, .width_mm = 46, .height_mm = 82, }; static const struct nt35560_config sony_acx424akm_data = { .vid_mode = &sony_acx424akm_vid_mode, .cmd_mode = &sony_acx424akm_cmd_mode, }; static inline struct nt35560 *panel_to_nt35560(struct drm_panel *panel) { return container_of(panel, struct nt35560, panel); } #define FOSC 20 /* 20Mhz */ #define SCALE_FACTOR_NS_DIV_MHZ 1000 static int nt35560_set_brightness(struct backlight_device *bl) { struct nt35560 *nt = bl_get_data(bl); struct mipi_dsi_device *dsi = to_mipi_dsi_device(nt->dev); int period_ns = 1023; int duty_ns = bl->props.brightness; u8 pwm_ratio; u8 pwm_div; u8 par; int ret; if (backlight_is_blank(bl)) { /* Disable backlight */ par = 0x00; ret = mipi_dsi_dcs_write(dsi, MIPI_DCS_WRITE_CONTROL_DISPLAY, &par, 1); if (ret) { dev_err(nt->dev, "failed to disable display backlight (%d)\n", ret); return ret; } return 0; } /* Calculate the PWM duty cycle in n/256's */ pwm_ratio = max(((duty_ns * 256) / period_ns) - 1, 1); pwm_div = max(1, ((FOSC * period_ns) / 256) / SCALE_FACTOR_NS_DIV_MHZ); /* Set up PWM dutycycle ONE byte (differs from the standard) */ dev_dbg(nt->dev, "calculated duty cycle %02x\n", pwm_ratio); ret = mipi_dsi_dcs_write(dsi, MIPI_DCS_SET_DISPLAY_BRIGHTNESS, &pwm_ratio, 1); if (ret < 0) { dev_err(nt->dev, "failed to set display PWM ratio (%d)\n", ret); return ret; } /* * Sequence to write PWMDIV: * address data * 0xF3 0xAA CMD2 Unlock * 0x00 0x01 Enter CMD2 page 0 * 0X7D 0x01 No reload MTP of CMD2 P1 * 0x22 PWMDIV * 0x7F 0xAA CMD2 page 1 lock */ par = 0xaa; ret = mipi_dsi_dcs_write(dsi, 0xf3, &par, 1); if (ret < 0) { dev_err(nt->dev, "failed to unlock CMD 2 (%d)\n", ret); return ret; } par = 0x01; ret = mipi_dsi_dcs_write(dsi, 0x00, &par, 1); if (ret < 0) { dev_err(nt->dev, "failed to enter page 1 (%d)\n", ret); return ret; } par = 0x01; ret = mipi_dsi_dcs_write(dsi, 0x7d, &par, 1); if (ret < 0) { dev_err(nt->dev, "failed to disable MTP reload (%d)\n", ret); return ret; } ret = mipi_dsi_dcs_write(dsi, 0x22, &pwm_div, 1); if (ret < 0) { dev_err(nt->dev, "failed to set PWM divisor (%d)\n", ret); return ret; } par = 0xaa; ret = mipi_dsi_dcs_write(dsi, 0x7f, &par, 1); if (ret < 0) { dev_err(nt->dev, "failed to lock CMD 2 (%d)\n", ret); return ret; } /* Enable backlight */ par = 0x24; ret = mipi_dsi_dcs_write(dsi, MIPI_DCS_WRITE_CONTROL_DISPLAY, &par, 1); if (ret < 0) { dev_err(nt->dev, "failed to enable display backlight (%d)\n", ret); return ret; } return 0; } static const struct backlight_ops nt35560_bl_ops = { .update_status = nt35560_set_brightness, }; static const struct backlight_properties nt35560_bl_props = { .type = BACKLIGHT_RAW, .brightness = 512, .max_brightness = 1023, }; static int nt35560_read_id(struct nt35560 *nt) { struct mipi_dsi_device *dsi = to_mipi_dsi_device(nt->dev); u8 vendor, version, panel; u16 val; int ret; ret = mipi_dsi_dcs_read(dsi, NT35560_DCS_READ_ID1, &vendor, 1); if (ret < 0) { dev_err(nt->dev, "could not vendor ID byte\n"); return ret; } ret = mipi_dsi_dcs_read(dsi, NT35560_DCS_READ_ID2, &version, 1); if (ret < 0) { dev_err(nt->dev, "could not read device version byte\n"); return ret; } ret = mipi_dsi_dcs_read(dsi, NT35560_DCS_READ_ID3, &panel, 1); if (ret < 0) { dev_err(nt->dev, "could not read panel ID byte\n"); return ret; } if (vendor == 0x00) { dev_err(nt->dev, "device vendor ID is zero\n"); return -ENODEV; } val = (vendor << 8) | panel; switch (val) { case DISPLAY_SONY_ACX424AKP_ID1: case DISPLAY_SONY_ACX424AKP_ID2: case DISPLAY_SONY_ACX424AKP_ID3: case DISPLAY_SONY_ACX424AKP_ID4: dev_info(nt->dev, "MTP vendor: %02x, version: %02x, panel: %02x\n", vendor, version, panel); break; default: dev_info(nt->dev, "unknown vendor: %02x, version: %02x, panel: %02x\n", vendor, version, panel); break; } return 0; } static int nt35560_power_on(struct nt35560 *nt) { int ret; ret = regulator_enable(nt->supply); if (ret) { dev_err(nt->dev, "failed to enable supply (%d)\n", ret); return ret; } /* Assert RESET */ gpiod_set_value_cansleep(nt->reset_gpio, 1); udelay(20); /* De-assert RESET */ gpiod_set_value_cansleep(nt->reset_gpio, 0); usleep_range(11000, 20000); return 0; } static void nt35560_power_off(struct nt35560 *nt) { /* Assert RESET */ gpiod_set_value_cansleep(nt->reset_gpio, 1); usleep_range(11000, 20000); regulator_disable(nt->supply); } static int nt35560_prepare(struct drm_panel *panel) { struct nt35560 *nt = panel_to_nt35560(panel); struct mipi_dsi_device *dsi = to_mipi_dsi_device(nt->dev); const u8 mddi = 3; int ret; ret = nt35560_power_on(nt); if (ret) return ret; ret = nt35560_read_id(nt); if (ret) { dev_err(nt->dev, "failed to read panel ID (%d)\n", ret); goto err_power_off; } /* Enabe tearing mode: send TE (tearing effect) at VBLANK */ ret = mipi_dsi_dcs_set_tear_on(dsi, MIPI_DSI_DCS_TEAR_MODE_VBLANK); if (ret) { dev_err(nt->dev, "failed to enable vblank TE (%d)\n", ret); goto err_power_off; } /* * Set MDDI * * This presumably deactivates the Qualcomm MDDI interface and * selects DSI, similar code is found in other drivers such as the * Sharp LS043T1LE01 which makes us suspect that this panel may be * using a Novatek NT35565 or similar display driver chip that shares * this command. Due to the lack of documentation we cannot know for * sure. */ ret = mipi_dsi_dcs_write(dsi, NT35560_DCS_SET_MDDI, &mddi, sizeof(mddi)); if (ret < 0) { dev_err(nt->dev, "failed to set MDDI (%d)\n", ret); goto err_power_off; } /* Exit sleep mode */ ret = mipi_dsi_dcs_exit_sleep_mode(dsi); if (ret) { dev_err(nt->dev, "failed to exit sleep mode (%d)\n", ret); goto err_power_off; } msleep(140); ret = mipi_dsi_dcs_set_display_on(dsi); if (ret) { dev_err(nt->dev, "failed to turn display on (%d)\n", ret); goto err_power_off; } if (nt->video_mode) { /* In video mode turn peripheral on */ ret = mipi_dsi_turn_on_peripheral(dsi); if (ret) { dev_err(nt->dev, "failed to turn on peripheral\n"); goto err_power_off; } } return 0; err_power_off: nt35560_power_off(nt); return ret; } static int nt35560_unprepare(struct drm_panel *panel) { struct nt35560 *nt = panel_to_nt35560(panel); struct mipi_dsi_device *dsi = to_mipi_dsi_device(nt->dev); int ret; ret = mipi_dsi_dcs_set_display_off(dsi); if (ret) { dev_err(nt->dev, "failed to turn display off (%d)\n", ret); return ret; } /* Enter sleep mode */ ret = mipi_dsi_dcs_enter_sleep_mode(dsi); if (ret) { dev_err(nt->dev, "failed to enter sleep mode (%d)\n", ret); return ret; } msleep(85); nt35560_power_off(nt); return 0; } static int nt35560_get_modes(struct drm_panel *panel, struct drm_connector *connector) { struct nt35560 *nt = panel_to_nt35560(panel); const struct nt35560_config *conf = nt->conf; struct drm_display_mode *mode; if (nt->video_mode) mode = drm_mode_duplicate(connector->dev, conf->vid_mode); else mode = drm_mode_duplicate(connector->dev, conf->cmd_mode); if (!mode) { dev_err(panel->dev, "bad mode or failed to add mode\n"); return -EINVAL; } drm_mode_set_name(mode); mode->type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED; connector->display_info.width_mm = mode->width_mm; connector->display_info.height_mm = mode->height_mm; drm_mode_probed_add(connector, mode); return 1; /* Number of modes */ } static const struct drm_panel_funcs nt35560_drm_funcs = { .unprepare = nt35560_unprepare, .prepare = nt35560_prepare, .get_modes = nt35560_get_modes, }; static int nt35560_probe(struct mipi_dsi_device *dsi) { struct device *dev = &dsi->dev; struct nt35560 *nt; int ret; nt = devm_kzalloc(dev, sizeof(struct nt35560), GFP_KERNEL); if (!nt) return -ENOMEM; nt->video_mode = of_property_read_bool(dev->of_node, "enforce-video-mode"); mipi_dsi_set_drvdata(dsi, nt); nt->dev = dev; nt->conf = of_device_get_match_data(dev); if (!nt->conf) { dev_err(dev, "missing device configuration\n"); return -ENODEV; } dsi->lanes = 2; dsi->format = MIPI_DSI_FMT_RGB888; /* * FIXME: these come from the ST-Ericsson vendor driver for the * HREF520 and seems to reflect limitations in the PLLs on that * platform, if you have the datasheet, please cross-check the * actual max rates. */ dsi->lp_rate = 19200000; dsi->hs_rate = 420160000; if (nt->video_mode) /* Burst mode using event for sync */ dsi->mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_BURST; else dsi->mode_flags = MIPI_DSI_CLOCK_NON_CONTINUOUS; nt->supply = devm_regulator_get(dev, "vddi"); if (IS_ERR(nt->supply)) return PTR_ERR(nt->supply); /* This asserts RESET by default */ nt->reset_gpio = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_HIGH); if (IS_ERR(nt->reset_gpio)) return dev_err_probe(dev, PTR_ERR(nt->reset_gpio), "failed to request GPIO\n"); drm_panel_init(&nt->panel, dev, &nt35560_drm_funcs, DRM_MODE_CONNECTOR_DSI); nt->panel.backlight = devm_backlight_device_register(dev, "nt35560", dev, nt, &nt35560_bl_ops, &nt35560_bl_props); if (IS_ERR(nt->panel.backlight)) return dev_err_probe(dev, PTR_ERR(nt->panel.backlight), "failed to register backlight device\n"); drm_panel_add(&nt->panel); ret = mipi_dsi_attach(dsi); if (ret < 0) { drm_panel_remove(&nt->panel); return ret; } return 0; } static void nt35560_remove(struct mipi_dsi_device *dsi) { struct nt35560 *nt = mipi_dsi_get_drvdata(dsi); mipi_dsi_detach(dsi); drm_panel_remove(&nt->panel); } static const struct of_device_id nt35560_of_match[] = { { .compatible = "sony,acx424akp", .data = &sony_acx424akp_data, }, { .compatible = "sony,acx424akm", .data = &sony_acx424akm_data, }, { /* sentinel */ } }; MODULE_DEVICE_TABLE(of, nt35560_of_match); static struct mipi_dsi_driver nt35560_driver = { .probe = nt35560_probe, .remove = nt35560_remove, .driver = { .name = "panel-novatek-nt35560", .of_match_table = nt35560_of_match, }, }; module_mipi_dsi_driver(nt35560_driver); MODULE_AUTHOR("Linus Wallei <[email protected]>"); MODULE_DESCRIPTION("MIPI-DSI Novatek NT35560 Panel Driver"); MODULE_LICENSE("GPL v2");
linux-master
drivers/gpu/drm/panel/panel-novatek-nt35560.c
// SPDX-License-Identifier: GPL-2.0-only /* * MIPI-DSI based S6E63J0X03 AMOLED lcd 1.63 inch panel driver. * * Copyright (c) 2014-2017 Samsung Electronics Co., Ltd * * Inki Dae <[email protected]> * Hoegeun Kwon <[email protected]> */ #include <linux/backlight.h> #include <linux/delay.h> #include <linux/gpio/consumer.h> #include <linux/module.h> #include <linux/regulator/consumer.h> #include <video/mipi_display.h> #include <drm/drm_mipi_dsi.h> #include <drm/drm_modes.h> #include <drm/drm_panel.h> #define MCS_LEVEL2_KEY 0xf0 #define MCS_MTP_KEY 0xf1 #define MCS_MTP_SET3 0xd4 #define MAX_BRIGHTNESS 100 #define DEFAULT_BRIGHTNESS 80 #define NUM_GAMMA_STEPS 9 #define GAMMA_CMD_CNT 28 #define FIRST_COLUMN 20 struct s6e63j0x03 { struct device *dev; struct drm_panel panel; struct backlight_device *bl_dev; struct regulator_bulk_data supplies[2]; struct gpio_desc *reset_gpio; }; static const struct drm_display_mode default_mode = { .clock = 4649, .hdisplay = 320, .hsync_start = 320 + 1, .hsync_end = 320 + 1 + 1, .htotal = 320 + 1 + 1 + 1, .vdisplay = 320, .vsync_start = 320 + 150, .vsync_end = 320 + 150 + 1, .vtotal = 320 + 150 + 1 + 2, .flags = 0, }; static const unsigned char gamma_tbl[NUM_GAMMA_STEPS][GAMMA_CMD_CNT] = { { /* Gamma 10 */ MCS_MTP_SET3, 0x00, 0x00, 0x00, 0x7f, 0x7f, 0x7f, 0x52, 0x6b, 0x6f, 0x26, 0x28, 0x2d, 0x28, 0x26, 0x27, 0x33, 0x34, 0x32, 0x36, 0x36, 0x35, 0x00, 0xab, 0x00, 0xae, 0x00, 0xbf }, { /* gamma 30 */ MCS_MTP_SET3, 0x00, 0x00, 0x00, 0x70, 0x7f, 0x7f, 0x4e, 0x64, 0x69, 0x26, 0x27, 0x2a, 0x28, 0x29, 0x27, 0x31, 0x32, 0x31, 0x35, 0x34, 0x35, 0x00, 0xc4, 0x00, 0xca, 0x00, 0xdc }, { /* gamma 60 */ MCS_MTP_SET3, 0x00, 0x00, 0x00, 0x65, 0x7b, 0x7d, 0x5f, 0x67, 0x68, 0x2a, 0x28, 0x29, 0x28, 0x2a, 0x27, 0x31, 0x2f, 0x30, 0x34, 0x33, 0x34, 0x00, 0xd9, 0x00, 0xe4, 0x00, 0xf5 }, { /* gamma 90 */ MCS_MTP_SET3, 0x00, 0x00, 0x00, 0x4d, 0x6f, 0x71, 0x67, 0x6a, 0x6c, 0x29, 0x28, 0x28, 0x28, 0x29, 0x27, 0x30, 0x2e, 0x30, 0x32, 0x31, 0x31, 0x00, 0xea, 0x00, 0xf6, 0x01, 0x09 }, { /* gamma 120 */ MCS_MTP_SET3, 0x00, 0x00, 0x00, 0x3d, 0x66, 0x68, 0x69, 0x69, 0x69, 0x28, 0x28, 0x27, 0x28, 0x28, 0x27, 0x30, 0x2e, 0x2f, 0x31, 0x31, 0x30, 0x00, 0xf9, 0x01, 0x05, 0x01, 0x1b }, { /* gamma 150 */ MCS_MTP_SET3, 0x00, 0x00, 0x00, 0x31, 0x51, 0x53, 0x66, 0x66, 0x67, 0x28, 0x29, 0x27, 0x28, 0x27, 0x27, 0x2e, 0x2d, 0x2e, 0x31, 0x31, 0x30, 0x01, 0x04, 0x01, 0x11, 0x01, 0x29 }, { /* gamma 200 */ MCS_MTP_SET3, 0x00, 0x00, 0x00, 0x2f, 0x4f, 0x51, 0x67, 0x65, 0x65, 0x29, 0x2a, 0x28, 0x27, 0x25, 0x26, 0x2d, 0x2c, 0x2c, 0x30, 0x30, 0x30, 0x01, 0x14, 0x01, 0x23, 0x01, 0x3b }, { /* gamma 240 */ MCS_MTP_SET3, 0x00, 0x00, 0x00, 0x2c, 0x4d, 0x50, 0x65, 0x63, 0x64, 0x2a, 0x2c, 0x29, 0x26, 0x24, 0x25, 0x2c, 0x2b, 0x2b, 0x30, 0x30, 0x30, 0x01, 0x1e, 0x01, 0x2f, 0x01, 0x47 }, { /* gamma 300 */ MCS_MTP_SET3, 0x00, 0x00, 0x00, 0x38, 0x61, 0x64, 0x65, 0x63, 0x64, 0x28, 0x2a, 0x27, 0x26, 0x23, 0x25, 0x2b, 0x2b, 0x2a, 0x30, 0x2f, 0x30, 0x01, 0x2d, 0x01, 0x3f, 0x01, 0x57 } }; static inline struct s6e63j0x03 *panel_to_s6e63j0x03(struct drm_panel *panel) { return container_of(panel, struct s6e63j0x03, panel); } static inline ssize_t s6e63j0x03_dcs_write_seq(struct s6e63j0x03 *ctx, const void *seq, size_t len) { struct mipi_dsi_device *dsi = to_mipi_dsi_device(ctx->dev); return mipi_dsi_dcs_write_buffer(dsi, seq, len); } #define s6e63j0x03_dcs_write_seq_static(ctx, seq...) \ ({ \ static const u8 d[] = { seq }; \ s6e63j0x03_dcs_write_seq(ctx, d, ARRAY_SIZE(d)); \ }) static inline int s6e63j0x03_enable_lv2_command(struct s6e63j0x03 *ctx) { return s6e63j0x03_dcs_write_seq_static(ctx, MCS_LEVEL2_KEY, 0x5a, 0x5a); } static inline int s6e63j0x03_apply_mtp_key(struct s6e63j0x03 *ctx, bool on) { if (on) return s6e63j0x03_dcs_write_seq_static(ctx, MCS_MTP_KEY, 0x5a, 0x5a); return s6e63j0x03_dcs_write_seq_static(ctx, MCS_MTP_KEY, 0xa5, 0xa5); } static int s6e63j0x03_power_on(struct s6e63j0x03 *ctx) { int ret; ret = regulator_bulk_enable(ARRAY_SIZE(ctx->supplies), ctx->supplies); if (ret < 0) return ret; msleep(30); gpiod_set_value(ctx->reset_gpio, 1); usleep_range(1000, 2000); gpiod_set_value(ctx->reset_gpio, 0); usleep_range(5000, 6000); return 0; } static int s6e63j0x03_power_off(struct s6e63j0x03 *ctx) { return regulator_bulk_disable(ARRAY_SIZE(ctx->supplies), ctx->supplies); } static unsigned int s6e63j0x03_get_brightness_index(unsigned int brightness) { unsigned int index; index = brightness / (MAX_BRIGHTNESS / NUM_GAMMA_STEPS); if (index >= NUM_GAMMA_STEPS) index = NUM_GAMMA_STEPS - 1; return index; } static int s6e63j0x03_update_gamma(struct s6e63j0x03 *ctx, unsigned int brightness) { struct backlight_device *bl_dev = ctx->bl_dev; unsigned int index = s6e63j0x03_get_brightness_index(brightness); int ret; ret = s6e63j0x03_apply_mtp_key(ctx, true); if (ret < 0) return ret; ret = s6e63j0x03_dcs_write_seq(ctx, gamma_tbl[index], GAMMA_CMD_CNT); if (ret < 0) return ret; ret = s6e63j0x03_apply_mtp_key(ctx, false); if (ret < 0) return ret; bl_dev->props.brightness = brightness; return 0; } static int s6e63j0x03_set_brightness(struct backlight_device *bl_dev) { struct s6e63j0x03 *ctx = bl_get_data(bl_dev); unsigned int brightness = bl_dev->props.brightness; return s6e63j0x03_update_gamma(ctx, brightness); } static const struct backlight_ops s6e63j0x03_bl_ops = { .update_status = s6e63j0x03_set_brightness, }; static int s6e63j0x03_disable(struct drm_panel *panel) { struct s6e63j0x03 *ctx = panel_to_s6e63j0x03(panel); struct mipi_dsi_device *dsi = to_mipi_dsi_device(ctx->dev); int ret; ret = mipi_dsi_dcs_set_display_off(dsi); if (ret < 0) return ret; ctx->bl_dev->props.power = FB_BLANK_NORMAL; ret = mipi_dsi_dcs_enter_sleep_mode(dsi); if (ret < 0) return ret; msleep(120); return 0; } static int s6e63j0x03_unprepare(struct drm_panel *panel) { struct s6e63j0x03 *ctx = panel_to_s6e63j0x03(panel); int ret; ret = s6e63j0x03_power_off(ctx); if (ret < 0) return ret; ctx->bl_dev->props.power = FB_BLANK_POWERDOWN; return 0; } static int s6e63j0x03_panel_init(struct s6e63j0x03 *ctx) { struct mipi_dsi_device *dsi = to_mipi_dsi_device(ctx->dev); int ret; ret = s6e63j0x03_enable_lv2_command(ctx); if (ret < 0) return ret; ret = s6e63j0x03_apply_mtp_key(ctx, true); if (ret < 0) return ret; /* set porch adjustment */ ret = s6e63j0x03_dcs_write_seq_static(ctx, 0xf2, 0x1c, 0x28); if (ret < 0) return ret; /* set frame freq */ ret = s6e63j0x03_dcs_write_seq_static(ctx, 0xb5, 0x00, 0x02, 0x00); if (ret < 0) return ret; /* set caset, paset */ ret = mipi_dsi_dcs_set_column_address(dsi, FIRST_COLUMN, default_mode.hdisplay - 1 + FIRST_COLUMN); if (ret < 0) return ret; ret = mipi_dsi_dcs_set_page_address(dsi, 0, default_mode.vdisplay - 1); if (ret < 0) return ret; /* set ltps timming 0, 1 */ ret = s6e63j0x03_dcs_write_seq_static(ctx, 0xf8, 0x08, 0x08, 0x08, 0x17, 0x00, 0x2a, 0x02, 0x26, 0x00, 0x00, 0x02, 0x00, 0x00); if (ret < 0) return ret; ret = s6e63j0x03_dcs_write_seq_static(ctx, 0xf7, 0x02); if (ret < 0) return ret; /* set param pos te_edge */ ret = s6e63j0x03_dcs_write_seq_static(ctx, 0xb0, 0x01); if (ret < 0) return ret; /* set te rising edge */ ret = s6e63j0x03_dcs_write_seq_static(ctx, 0xe2, 0x0f); if (ret < 0) return ret; /* set param pos default */ ret = s6e63j0x03_dcs_write_seq_static(ctx, 0xb0, 0x00); if (ret < 0) return ret; ret = mipi_dsi_dcs_exit_sleep_mode(dsi); if (ret < 0) return ret; ret = s6e63j0x03_apply_mtp_key(ctx, false); if (ret < 0) return ret; return 0; } static int s6e63j0x03_prepare(struct drm_panel *panel) { struct s6e63j0x03 *ctx = panel_to_s6e63j0x03(panel); int ret; ret = s6e63j0x03_power_on(ctx); if (ret < 0) return ret; ret = s6e63j0x03_panel_init(ctx); if (ret < 0) goto err; ctx->bl_dev->props.power = FB_BLANK_NORMAL; return 0; err: s6e63j0x03_power_off(ctx); return ret; } static int s6e63j0x03_enable(struct drm_panel *panel) { struct s6e63j0x03 *ctx = panel_to_s6e63j0x03(panel); struct mipi_dsi_device *dsi = to_mipi_dsi_device(ctx->dev); int ret; msleep(120); ret = s6e63j0x03_apply_mtp_key(ctx, true); if (ret < 0) return ret; /* set elvss_cond */ ret = s6e63j0x03_dcs_write_seq_static(ctx, 0xb1, 0x00, 0x09); if (ret < 0) return ret; /* set pos */ ret = s6e63j0x03_dcs_write_seq_static(ctx, MIPI_DCS_SET_ADDRESS_MODE, 0x40); if (ret < 0) return ret; /* set default white brightness */ ret = mipi_dsi_dcs_set_display_brightness(dsi, 0x00ff); if (ret < 0) return ret; /* set white ctrl */ ret = s6e63j0x03_dcs_write_seq_static(ctx, MIPI_DCS_WRITE_CONTROL_DISPLAY, 0x20); if (ret < 0) return ret; /* set acl off */ ret = s6e63j0x03_dcs_write_seq_static(ctx, MIPI_DCS_WRITE_POWER_SAVE, 0x00); if (ret < 0) return ret; ret = mipi_dsi_dcs_set_tear_on(dsi, MIPI_DSI_DCS_TEAR_MODE_VBLANK); if (ret < 0) return ret; ret = s6e63j0x03_apply_mtp_key(ctx, false); if (ret < 0) return ret; ret = mipi_dsi_dcs_set_display_on(dsi); if (ret < 0) return ret; ctx->bl_dev->props.power = FB_BLANK_UNBLANK; return 0; } static int s6e63j0x03_get_modes(struct drm_panel *panel, struct drm_connector *connector) { struct drm_display_mode *mode; mode = drm_mode_duplicate(connector->dev, &default_mode); if (!mode) { dev_err(panel->dev, "failed to add mode %ux%u@%u\n", default_mode.hdisplay, default_mode.vdisplay, drm_mode_vrefresh(&default_mode)); return -ENOMEM; } drm_mode_set_name(mode); mode->type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED; drm_mode_probed_add(connector, mode); connector->display_info.width_mm = 29; connector->display_info.height_mm = 29; return 1; } static const struct drm_panel_funcs s6e63j0x03_funcs = { .disable = s6e63j0x03_disable, .unprepare = s6e63j0x03_unprepare, .prepare = s6e63j0x03_prepare, .enable = s6e63j0x03_enable, .get_modes = s6e63j0x03_get_modes, }; static int s6e63j0x03_probe(struct mipi_dsi_device *dsi) { struct device *dev = &dsi->dev; struct s6e63j0x03 *ctx; int ret; ctx = devm_kzalloc(dev, sizeof(struct s6e63j0x03), GFP_KERNEL); if (!ctx) return -ENOMEM; mipi_dsi_set_drvdata(dsi, ctx); ctx->dev = dev; dsi->lanes = 1; dsi->format = MIPI_DSI_FMT_RGB888; dsi->mode_flags = MIPI_DSI_MODE_VIDEO_NO_HFP | MIPI_DSI_MODE_VIDEO_NO_HBP | MIPI_DSI_MODE_VIDEO_NO_HSA; ctx->supplies[0].supply = "vdd3"; ctx->supplies[1].supply = "vci"; ret = devm_regulator_bulk_get(dev, ARRAY_SIZE(ctx->supplies), ctx->supplies); if (ret < 0) return dev_err_probe(dev, ret, "failed to get regulators\n"); ctx->reset_gpio = devm_gpiod_get(dev, "reset", GPIOD_OUT_LOW); if (IS_ERR(ctx->reset_gpio)) return dev_err_probe(dev, PTR_ERR(ctx->reset_gpio), "cannot get reset-gpio\n"); drm_panel_init(&ctx->panel, dev, &s6e63j0x03_funcs, DRM_MODE_CONNECTOR_DSI); ctx->panel.prepare_prev_first = true; ctx->bl_dev = backlight_device_register("s6e63j0x03", dev, ctx, &s6e63j0x03_bl_ops, NULL); if (IS_ERR(ctx->bl_dev)) return dev_err_probe(dev, PTR_ERR(ctx->bl_dev), "failed to register backlight device\n"); ctx->bl_dev->props.max_brightness = MAX_BRIGHTNESS; ctx->bl_dev->props.brightness = DEFAULT_BRIGHTNESS; ctx->bl_dev->props.power = FB_BLANK_POWERDOWN; drm_panel_add(&ctx->panel); ret = mipi_dsi_attach(dsi); if (ret < 0) goto remove_panel; return ret; remove_panel: drm_panel_remove(&ctx->panel); backlight_device_unregister(ctx->bl_dev); return ret; } static void s6e63j0x03_remove(struct mipi_dsi_device *dsi) { struct s6e63j0x03 *ctx = mipi_dsi_get_drvdata(dsi); mipi_dsi_detach(dsi); drm_panel_remove(&ctx->panel); backlight_device_unregister(ctx->bl_dev); } static const struct of_device_id s6e63j0x03_of_match[] = { { .compatible = "samsung,s6e63j0x03" }, { } }; MODULE_DEVICE_TABLE(of, s6e63j0x03_of_match); static struct mipi_dsi_driver s6e63j0x03_driver = { .probe = s6e63j0x03_probe, .remove = s6e63j0x03_remove, .driver = { .name = "panel_samsung_s6e63j0x03", .of_match_table = s6e63j0x03_of_match, }, }; module_mipi_dsi_driver(s6e63j0x03_driver); MODULE_AUTHOR("Inki Dae <[email protected]>"); MODULE_AUTHOR("Hoegeun Kwon <[email protected]>"); MODULE_DESCRIPTION("MIPI-DSI based s6e63j0x03 AMOLED LCD Panel Driver"); MODULE_LICENSE("GPL v2");
linux-master
drivers/gpu/drm/panel/panel-samsung-s6e63j0x03.c
// SPDX-License-Identifier: GPL-2.0 /* * Panel driver for the TPO TPG110 400CH LTPS TFT LCD Single Chip * Digital Driver. * * This chip drives a TFT LCD, so it does not know what kind of * display is actually connected to it, so the width and height of that * display needs to be supplied from the machine configuration. * * Author: * Linus Walleij <[email protected]> */ #include <drm/drm_modes.h> #include <drm/drm_panel.h> #include <linux/bitops.h> #include <linux/delay.h> #include <linux/gpio/consumer.h> #include <linux/init.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/of.h> #include <linux/platform_device.h> #include <linux/spi/spi.h> #define TPG110_TEST 0x00 #define TPG110_CHIPID 0x01 #define TPG110_CTRL1 0x02 #define TPG110_RES_MASK GENMASK(2, 0) #define TPG110_RES_800X480 0x07 #define TPG110_RES_640X480 0x06 #define TPG110_RES_480X272 0x05 #define TPG110_RES_480X640 0x04 #define TPG110_RES_480X272_D 0x01 /* Dual scan: outputs 800x480 */ #define TPG110_RES_400X240_D 0x00 /* Dual scan: outputs 800x480 */ #define TPG110_CTRL2 0x03 #define TPG110_CTRL2_PM BIT(0) #define TPG110_CTRL2_RES_PM_CTRL BIT(7) /** * struct tpg110_panel_mode - lookup struct for the supported modes */ struct tpg110_panel_mode { /** * @name: the name of this panel */ const char *name; /** * @magic: the magic value from the detection register */ u32 magic; /** * @mode: the DRM display mode for this panel */ struct drm_display_mode mode; /** * @bus_flags: the DRM bus flags for this panel e.g. inverted clock */ u32 bus_flags; }; /** * struct tpg110 - state container for the TPG110 panel */ struct tpg110 { /** * @dev: the container device */ struct device *dev; /** * @spi: the corresponding SPI device */ struct spi_device *spi; /** * @panel: the DRM panel instance for this device */ struct drm_panel panel; /** * @panel_mode: the panel mode as detected */ const struct tpg110_panel_mode *panel_mode; /** * @width: the width of this panel in mm */ u32 width; /** * @height: the height of this panel in mm */ u32 height; /** * @grestb: reset GPIO line */ struct gpio_desc *grestb; }; /* * TPG110 modes, these are the simple modes, the dualscan modes that * take 400x240 or 480x272 in and display as 800x480 are not listed. */ static const struct tpg110_panel_mode tpg110_modes[] = { { .name = "800x480 RGB", .magic = TPG110_RES_800X480, .mode = { .clock = 33200, .hdisplay = 800, .hsync_start = 800 + 40, .hsync_end = 800 + 40 + 1, .htotal = 800 + 40 + 1 + 216, .vdisplay = 480, .vsync_start = 480 + 10, .vsync_end = 480 + 10 + 1, .vtotal = 480 + 10 + 1 + 35, }, .bus_flags = DRM_BUS_FLAG_PIXDATA_DRIVE_POSEDGE, }, { .name = "640x480 RGB", .magic = TPG110_RES_640X480, .mode = { .clock = 25200, .hdisplay = 640, .hsync_start = 640 + 24, .hsync_end = 640 + 24 + 1, .htotal = 640 + 24 + 1 + 136, .vdisplay = 480, .vsync_start = 480 + 18, .vsync_end = 480 + 18 + 1, .vtotal = 480 + 18 + 1 + 27, }, .bus_flags = DRM_BUS_FLAG_PIXDATA_DRIVE_POSEDGE, }, { .name = "480x272 RGB", .magic = TPG110_RES_480X272, .mode = { .clock = 9000, .hdisplay = 480, .hsync_start = 480 + 2, .hsync_end = 480 + 2 + 1, .htotal = 480 + 2 + 1 + 43, .vdisplay = 272, .vsync_start = 272 + 2, .vsync_end = 272 + 2 + 1, .vtotal = 272 + 2 + 1 + 12, }, .bus_flags = DRM_BUS_FLAG_PIXDATA_DRIVE_POSEDGE, }, { .name = "480x640 RGB", .magic = TPG110_RES_480X640, .mode = { .clock = 20500, .hdisplay = 480, .hsync_start = 480 + 2, .hsync_end = 480 + 2 + 1, .htotal = 480 + 2 + 1 + 43, .vdisplay = 640, .vsync_start = 640 + 4, .vsync_end = 640 + 4 + 1, .vtotal = 640 + 4 + 1 + 8, }, .bus_flags = DRM_BUS_FLAG_PIXDATA_DRIVE_POSEDGE, }, { .name = "400x240 RGB", .magic = TPG110_RES_400X240_D, .mode = { .clock = 8300, .hdisplay = 400, .hsync_start = 400 + 20, .hsync_end = 400 + 20 + 1, .htotal = 400 + 20 + 1 + 108, .vdisplay = 240, .vsync_start = 240 + 2, .vsync_end = 240 + 2 + 1, .vtotal = 240 + 2 + 1 + 20, }, .bus_flags = DRM_BUS_FLAG_PIXDATA_DRIVE_POSEDGE, }, }; static inline struct tpg110 * to_tpg110(struct drm_panel *panel) { return container_of(panel, struct tpg110, panel); } static u8 tpg110_readwrite_reg(struct tpg110 *tpg, bool write, u8 address, u8 outval) { struct spi_message m; struct spi_transfer t[2]; u8 buf[2]; int ret; spi_message_init(&m); memset(t, 0, sizeof(t)); if (write) { /* * Clear address bit 0, 1 when writing, just to be sure * The actual bit indicating a write here is bit 1, bit * 0 is just surplus to pad it up to 8 bits. */ buf[0] = address << 2; buf[0] &= ~0x03; buf[1] = outval; t[0].bits_per_word = 8; t[0].tx_buf = &buf[0]; t[0].len = 1; t[1].tx_buf = &buf[1]; t[1].len = 1; t[1].bits_per_word = 8; } else { /* Set address bit 0 to 1 to read */ buf[0] = address << 1; buf[0] |= 0x01; /* * The last bit/clock is Hi-Z turnaround cycle, so we need * to send only 7 bits here. The 8th bit is the high impedance * turn-around cycle. */ t[0].bits_per_word = 7; t[0].tx_buf = &buf[0]; t[0].len = 1; t[1].rx_buf = &buf[1]; t[1].len = 1; t[1].bits_per_word = 8; } spi_message_add_tail(&t[0], &m); spi_message_add_tail(&t[1], &m); ret = spi_sync(tpg->spi, &m); if (ret) { dev_err(tpg->dev, "SPI message error %d\n", ret); return ret; } if (write) return 0; /* Read */ return buf[1]; } static u8 tpg110_read_reg(struct tpg110 *tpg, u8 address) { return tpg110_readwrite_reg(tpg, false, address, 0); } static void tpg110_write_reg(struct tpg110 *tpg, u8 address, u8 outval) { tpg110_readwrite_reg(tpg, true, address, outval); } static int tpg110_startup(struct tpg110 *tpg) { u8 val; int i; /* De-assert the reset signal */ gpiod_set_value_cansleep(tpg->grestb, 0); usleep_range(1000, 2000); dev_dbg(tpg->dev, "de-asserted GRESTB\n"); /* Test display communication */ tpg110_write_reg(tpg, TPG110_TEST, 0x55); val = tpg110_read_reg(tpg, TPG110_TEST); if (val != 0x55) { dev_err(tpg->dev, "failed communication test\n"); return -ENODEV; } val = tpg110_read_reg(tpg, TPG110_CHIPID); dev_info(tpg->dev, "TPG110 chip ID: %d version: %d\n", val >> 4, val & 0x0f); /* Show display resolution */ val = tpg110_read_reg(tpg, TPG110_CTRL1); val &= TPG110_RES_MASK; switch (val) { case TPG110_RES_400X240_D: dev_info(tpg->dev, "IN 400x240 RGB -> OUT 800x480 RGB (dual scan)\n"); break; case TPG110_RES_480X272_D: dev_info(tpg->dev, "IN 480x272 RGB -> OUT 800x480 RGB (dual scan)\n"); break; case TPG110_RES_480X640: dev_info(tpg->dev, "480x640 RGB\n"); break; case TPG110_RES_480X272: dev_info(tpg->dev, "480x272 RGB\n"); break; case TPG110_RES_640X480: dev_info(tpg->dev, "640x480 RGB\n"); break; case TPG110_RES_800X480: dev_info(tpg->dev, "800x480 RGB\n"); break; default: dev_err(tpg->dev, "ILLEGAL RESOLUTION 0x%02x\n", val); break; } /* From the producer side, this is the same resolution */ if (val == TPG110_RES_480X272_D) val = TPG110_RES_480X272; for (i = 0; i < ARRAY_SIZE(tpg110_modes); i++) { const struct tpg110_panel_mode *pm; pm = &tpg110_modes[i]; if (pm->magic == val) { tpg->panel_mode = pm; break; } } if (i == ARRAY_SIZE(tpg110_modes)) { dev_err(tpg->dev, "unsupported mode (%02x) detected\n", val); return -ENODEV; } val = tpg110_read_reg(tpg, TPG110_CTRL2); dev_info(tpg->dev, "resolution and standby is controlled by %s\n", (val & TPG110_CTRL2_RES_PM_CTRL) ? "software" : "hardware"); /* Take control over resolution and standby */ val |= TPG110_CTRL2_RES_PM_CTRL; tpg110_write_reg(tpg, TPG110_CTRL2, val); return 0; } static int tpg110_disable(struct drm_panel *panel) { struct tpg110 *tpg = to_tpg110(panel); u8 val; /* Put chip into standby */ val = tpg110_read_reg(tpg, TPG110_CTRL2_PM); val &= ~TPG110_CTRL2_PM; tpg110_write_reg(tpg, TPG110_CTRL2_PM, val); return 0; } static int tpg110_enable(struct drm_panel *panel) { struct tpg110 *tpg = to_tpg110(panel); u8 val; /* Take chip out of standby */ val = tpg110_read_reg(tpg, TPG110_CTRL2_PM); val |= TPG110_CTRL2_PM; tpg110_write_reg(tpg, TPG110_CTRL2_PM, val); return 0; } /** * tpg110_get_modes() - return the appropriate mode * @panel: the panel to get the mode for * @connector: reference to the central DRM connector control structure * * This currently does not present a forest of modes, instead it * presents the mode that is configured for the system under use, * and which is detected by reading the registers of the display. */ static int tpg110_get_modes(struct drm_panel *panel, struct drm_connector *connector) { struct tpg110 *tpg = to_tpg110(panel); struct drm_display_mode *mode; connector->display_info.width_mm = tpg->width; connector->display_info.height_mm = tpg->height; connector->display_info.bus_flags = tpg->panel_mode->bus_flags; mode = drm_mode_duplicate(connector->dev, &tpg->panel_mode->mode); drm_mode_set_name(mode); mode->type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED; mode->width_mm = tpg->width; mode->height_mm = tpg->height; drm_mode_probed_add(connector, mode); return 1; } static const struct drm_panel_funcs tpg110_drm_funcs = { .disable = tpg110_disable, .enable = tpg110_enable, .get_modes = tpg110_get_modes, }; static int tpg110_probe(struct spi_device *spi) { struct device *dev = &spi->dev; struct device_node *np = dev->of_node; struct tpg110 *tpg; int ret; tpg = devm_kzalloc(dev, sizeof(*tpg), GFP_KERNEL); if (!tpg) return -ENOMEM; tpg->dev = dev; /* We get the physical display dimensions from the DT */ ret = of_property_read_u32(np, "width-mm", &tpg->width); if (ret) dev_err(dev, "no panel width specified\n"); ret = of_property_read_u32(np, "height-mm", &tpg->height); if (ret) dev_err(dev, "no panel height specified\n"); /* This asserts the GRESTB signal, putting the display into reset */ tpg->grestb = devm_gpiod_get(dev, "grestb", GPIOD_OUT_HIGH); if (IS_ERR(tpg->grestb)) { dev_err(dev, "no GRESTB GPIO\n"); return -ENODEV; } spi->bits_per_word = 8; spi->mode |= SPI_3WIRE_HIZ; ret = spi_setup(spi); if (ret < 0) { dev_err(dev, "spi setup failed.\n"); return ret; } tpg->spi = spi; ret = tpg110_startup(tpg); if (ret) return ret; drm_panel_init(&tpg->panel, dev, &tpg110_drm_funcs, DRM_MODE_CONNECTOR_DPI); ret = drm_panel_of_backlight(&tpg->panel); if (ret) return ret; spi_set_drvdata(spi, tpg); drm_panel_add(&tpg->panel); return 0; } static void tpg110_remove(struct spi_device *spi) { struct tpg110 *tpg = spi_get_drvdata(spi); drm_panel_remove(&tpg->panel); } static const struct of_device_id tpg110_match[] = { { .compatible = "tpo,tpg110", }, {}, }; MODULE_DEVICE_TABLE(of, tpg110_match); static const struct spi_device_id tpg110_ids[] = { { "tpg110" }, { }, }; MODULE_DEVICE_TABLE(spi, tpg110_ids); static struct spi_driver tpg110_driver = { .probe = tpg110_probe, .remove = tpg110_remove, .id_table = tpg110_ids, .driver = { .name = "tpo-tpg110-panel", .of_match_table = tpg110_match, }, }; module_spi_driver(tpg110_driver); MODULE_AUTHOR("Linus Walleij <[email protected]>"); MODULE_DESCRIPTION("TPO TPG110 panel driver"); MODULE_LICENSE("GPL v2");
linux-master
drivers/gpu/drm/panel/panel-tpo-tpg110.c
// SPDX-License-Identifier: GPL-2.0 /* * Copyright (C) 2020 Theobroma Systems Design und Consulting GmbH */ #include <linux/delay.h> #include <linux/gpio/consumer.h> #include <linux/media-bus-format.h> #include <linux/module.h> #include <linux/of.h> #include <linux/regulator/consumer.h> #include <video/display_timing.h> #include <video/mipi_display.h> #include <drm/drm_mipi_dsi.h> #include <drm/drm_modes.h> #include <drm/drm_panel.h> struct ltk050h3146w_cmd { char cmd; char data; }; struct ltk050h3146w; struct ltk050h3146w_desc { const struct drm_display_mode *mode; int (*init)(struct ltk050h3146w *ctx); }; struct ltk050h3146w { struct device *dev; struct drm_panel panel; struct gpio_desc *reset_gpio; struct regulator *vci; struct regulator *iovcc; const struct ltk050h3146w_desc *panel_desc; bool prepared; }; static const struct ltk050h3146w_cmd page1_cmds[] = { { 0x22, 0x0A }, /* BGR SS GS */ { 0x31, 0x00 }, /* column inversion */ { 0x53, 0xA2 }, /* VCOM1 */ { 0x55, 0xA2 }, /* VCOM2 */ { 0x50, 0x81 }, /* VREG1OUT=5V */ { 0x51, 0x85 }, /* VREG2OUT=-5V */ { 0x62, 0x0D }, /* EQT Time setting */ /* * The vendor init selected page 1 here _again_ * Is this supposed to be page 2? */ { 0xA0, 0x00 }, { 0xA1, 0x1A }, { 0xA2, 0x28 }, { 0xA3, 0x13 }, { 0xA4, 0x16 }, { 0xA5, 0x29 }, { 0xA6, 0x1D }, { 0xA7, 0x1E }, { 0xA8, 0x84 }, { 0xA9, 0x1C }, { 0xAA, 0x28 }, { 0xAB, 0x75 }, { 0xAC, 0x1A }, { 0xAD, 0x19 }, { 0xAE, 0x4D }, { 0xAF, 0x22 }, { 0xB0, 0x28 }, { 0xB1, 0x54 }, { 0xB2, 0x66 }, { 0xB3, 0x39 }, { 0xC0, 0x00 }, { 0xC1, 0x1A }, { 0xC2, 0x28 }, { 0xC3, 0x13 }, { 0xC4, 0x16 }, { 0xC5, 0x29 }, { 0xC6, 0x1D }, { 0xC7, 0x1E }, { 0xC8, 0x84 }, { 0xC9, 0x1C }, { 0xCA, 0x28 }, { 0xCB, 0x75 }, { 0xCC, 0x1A }, { 0xCD, 0x19 }, { 0xCE, 0x4D }, { 0xCF, 0x22 }, { 0xD0, 0x28 }, { 0xD1, 0x54 }, { 0xD2, 0x66 }, { 0xD3, 0x39 }, }; static const struct ltk050h3146w_cmd page3_cmds[] = { { 0x01, 0x00 }, { 0x02, 0x00 }, { 0x03, 0x73 }, { 0x04, 0x00 }, { 0x05, 0x00 }, { 0x06, 0x0a }, { 0x07, 0x00 }, { 0x08, 0x00 }, { 0x09, 0x01 }, { 0x0a, 0x00 }, { 0x0b, 0x00 }, { 0x0c, 0x01 }, { 0x0d, 0x00 }, { 0x0e, 0x00 }, { 0x0f, 0x1d }, { 0x10, 0x1d }, { 0x11, 0x00 }, { 0x12, 0x00 }, { 0x13, 0x00 }, { 0x14, 0x00 }, { 0x15, 0x00 }, { 0x16, 0x00 }, { 0x17, 0x00 }, { 0x18, 0x00 }, { 0x19, 0x00 }, { 0x1a, 0x00 }, { 0x1b, 0x00 }, { 0x1c, 0x00 }, { 0x1d, 0x00 }, { 0x1e, 0x40 }, { 0x1f, 0x80 }, { 0x20, 0x06 }, { 0x21, 0x02 }, { 0x22, 0x00 }, { 0x23, 0x00 }, { 0x24, 0x00 }, { 0x25, 0x00 }, { 0x26, 0x00 }, { 0x27, 0x00 }, { 0x28, 0x33 }, { 0x29, 0x03 }, { 0x2a, 0x00 }, { 0x2b, 0x00 }, { 0x2c, 0x00 }, { 0x2d, 0x00 }, { 0x2e, 0x00 }, { 0x2f, 0x00 }, { 0x30, 0x00 }, { 0x31, 0x00 }, { 0x32, 0x00 }, { 0x33, 0x00 }, { 0x34, 0x04 }, { 0x35, 0x00 }, { 0x36, 0x00 }, { 0x37, 0x00 }, { 0x38, 0x3C }, { 0x39, 0x35 }, { 0x3A, 0x01 }, { 0x3B, 0x40 }, { 0x3C, 0x00 }, { 0x3D, 0x01 }, { 0x3E, 0x00 }, { 0x3F, 0x00 }, { 0x40, 0x00 }, { 0x41, 0x88 }, { 0x42, 0x00 }, { 0x43, 0x00 }, { 0x44, 0x1F }, { 0x50, 0x01 }, { 0x51, 0x23 }, { 0x52, 0x45 }, { 0x53, 0x67 }, { 0x54, 0x89 }, { 0x55, 0xab }, { 0x56, 0x01 }, { 0x57, 0x23 }, { 0x58, 0x45 }, { 0x59, 0x67 }, { 0x5a, 0x89 }, { 0x5b, 0xab }, { 0x5c, 0xcd }, { 0x5d, 0xef }, { 0x5e, 0x11 }, { 0x5f, 0x01 }, { 0x60, 0x00 }, { 0x61, 0x15 }, { 0x62, 0x14 }, { 0x63, 0x0E }, { 0x64, 0x0F }, { 0x65, 0x0C }, { 0x66, 0x0D }, { 0x67, 0x06 }, { 0x68, 0x02 }, { 0x69, 0x07 }, { 0x6a, 0x02 }, { 0x6b, 0x02 }, { 0x6c, 0x02 }, { 0x6d, 0x02 }, { 0x6e, 0x02 }, { 0x6f, 0x02 }, { 0x70, 0x02 }, { 0x71, 0x02 }, { 0x72, 0x02 }, { 0x73, 0x02 }, { 0x74, 0x02 }, { 0x75, 0x01 }, { 0x76, 0x00 }, { 0x77, 0x14 }, { 0x78, 0x15 }, { 0x79, 0x0E }, { 0x7a, 0x0F }, { 0x7b, 0x0C }, { 0x7c, 0x0D }, { 0x7d, 0x06 }, { 0x7e, 0x02 }, { 0x7f, 0x07 }, { 0x80, 0x02 }, { 0x81, 0x02 }, { 0x82, 0x02 }, { 0x83, 0x02 }, { 0x84, 0x02 }, { 0x85, 0x02 }, { 0x86, 0x02 }, { 0x87, 0x02 }, { 0x88, 0x02 }, { 0x89, 0x02 }, { 0x8A, 0x02 }, }; static const struct ltk050h3146w_cmd page4_cmds[] = { { 0x70, 0x00 }, { 0x71, 0x00 }, { 0x82, 0x0F }, /* VGH_MOD clamp level=15v */ { 0x84, 0x0F }, /* VGH clamp level 15V */ { 0x85, 0x0D }, /* VGL clamp level (-10V) */ { 0x32, 0xAC }, { 0x8C, 0x80 }, { 0x3C, 0xF5 }, { 0xB5, 0x07 }, /* GAMMA OP */ { 0x31, 0x45 }, /* SOURCE OP */ { 0x3A, 0x24 }, /* PS_EN OFF */ { 0x88, 0x33 }, /* LVD */ }; static inline struct ltk050h3146w *panel_to_ltk050h3146w(struct drm_panel *panel) { return container_of(panel, struct ltk050h3146w, panel); } static int ltk050h3146w_init_sequence(struct ltk050h3146w *ctx) { struct mipi_dsi_device *dsi = to_mipi_dsi_device(ctx->dev); int ret; /* * Init sequence was supplied by the panel vendor without much * documentation. */ mipi_dsi_dcs_write_seq(dsi, 0xdf, 0x93, 0x65, 0xf8); mipi_dsi_dcs_write_seq(dsi, 0xb0, 0x01, 0x03, 0x02, 0x00, 0x64, 0x06, 0x01); mipi_dsi_dcs_write_seq(dsi, 0xb2, 0x00, 0xb5); mipi_dsi_dcs_write_seq(dsi, 0xb3, 0x00, 0xb5); mipi_dsi_dcs_write_seq(dsi, 0xb7, 0x00, 0xbf, 0x00, 0x00, 0xbf, 0x00); mipi_dsi_dcs_write_seq(dsi, 0xb9, 0x00, 0xc4, 0x23, 0x07); mipi_dsi_dcs_write_seq(dsi, 0xbb, 0x02, 0x01, 0x24, 0x00, 0x28, 0x0f, 0x28, 0x04, 0xcc, 0xcc, 0xcc); mipi_dsi_dcs_write_seq(dsi, 0xbc, 0x0f, 0x04); mipi_dsi_dcs_write_seq(dsi, 0xbe, 0x1e, 0xf2); mipi_dsi_dcs_write_seq(dsi, 0xc0, 0x26, 0x03); mipi_dsi_dcs_write_seq(dsi, 0xc1, 0x00, 0x12); mipi_dsi_dcs_write_seq(dsi, 0xc3, 0x04, 0x02, 0x02, 0x76, 0x01, 0x80, 0x80); mipi_dsi_dcs_write_seq(dsi, 0xc4, 0x24, 0x80, 0xb4, 0x81, 0x12, 0x0f, 0x16, 0x00, 0x00); mipi_dsi_dcs_write_seq(dsi, 0xc8, 0x7f, 0x72, 0x67, 0x5d, 0x5d, 0x50, 0x56, 0x41, 0x59, 0x57, 0x55, 0x70, 0x5b, 0x5f, 0x4f, 0x47, 0x38, 0x23, 0x08, 0x7f, 0x72, 0x67, 0x5d, 0x5d, 0x50, 0x56, 0x41, 0x59, 0x57, 0x55, 0x70, 0x5b, 0x5f, 0x4f, 0x47, 0x38, 0x23, 0x08); mipi_dsi_dcs_write_seq(dsi, 0xd0, 0x1e, 0x1f, 0x57, 0x58, 0x48, 0x4a, 0x44, 0x46, 0x40, 0x1f, 0x42, 0x1f, 0x1f, 0x1f, 0x1f, 0x1f, 0x1f, 0x1f, 0x1f, 0x1f, 0x1f, 0x1f); mipi_dsi_dcs_write_seq(dsi, 0xd1, 0x1e, 0x1f, 0x57, 0x58, 0x49, 0x4b, 0x45, 0x47, 0x41, 0x1f, 0x43, 0x1f, 0x1f, 0x1f, 0x1f, 0x1f, 0x1f, 0x1f, 0x1f, 0x1f, 0x1f, 0x1f); mipi_dsi_dcs_write_seq(dsi, 0xd2, 0x1f, 0x1e, 0x17, 0x18, 0x07, 0x05, 0x0b, 0x09, 0x03, 0x1f, 0x01, 0x1f, 0x1f, 0x1f, 0x1f, 0x1f, 0x1f, 0x1f, 0x1f, 0x1f, 0x1f, 0x1f); mipi_dsi_dcs_write_seq(dsi, 0xd3, 0x1f, 0x1e, 0x17, 0x18, 0x06, 0x04, 0x0a, 0x08, 0x02, 0x1f, 0x00, 0x1f, 0x1f, 0x1f, 0x1f, 0x1f, 0x1f, 0x1f, 0x1f, 0x1f, 0x1f, 0x1f); mipi_dsi_dcs_write_seq(dsi, 0xd4, 0x00, 0x00, 0x00, 0x0c, 0x06, 0x20, 0x01, 0x02, 0x00, 0x60, 0x15, 0xb0, 0x30, 0x03, 0x04, 0x00, 0x60, 0x72, 0x0a, 0x00, 0x60, 0x08); mipi_dsi_dcs_write_seq(dsi, 0xd5, 0x00, 0x06, 0x06, 0x00, 0x30, 0x00, 0x00, 0x00, 0x00, 0x00, 0xbc, 0x50, 0x00, 0x05, 0x21, 0x00, 0x60); mipi_dsi_dcs_write_seq(dsi, 0xdd, 0x2c, 0xa3, 0x00); mipi_dsi_dcs_write_seq(dsi, 0xde, 0x02); mipi_dsi_dcs_write_seq(dsi, 0xb2, 0x32, 0x1c); mipi_dsi_dcs_write_seq(dsi, 0xb7, 0x3b, 0x70, 0x00, 0x04); mipi_dsi_dcs_write_seq(dsi, 0xc1, 0x11); mipi_dsi_dcs_write_seq(dsi, 0xbb, 0x21, 0x22, 0x23, 0x24, 0x36, 0x37); mipi_dsi_dcs_write_seq(dsi, 0xc2, 0x20, 0x38, 0x1e, 0x84); mipi_dsi_dcs_write_seq(dsi, 0xde, 0x00); ret = mipi_dsi_dcs_set_tear_on(dsi, 1); if (ret < 0) { dev_err(ctx->dev, "failed to set tear on: %d\n", ret); return ret; } msleep(60); return 0; } static const struct drm_display_mode ltk050h3146w_mode = { .hdisplay = 720, .hsync_start = 720 + 42, .hsync_end = 720 + 42 + 8, .htotal = 720 + 42 + 8 + 42, .vdisplay = 1280, .vsync_start = 1280 + 12, .vsync_end = 1280 + 12 + 4, .vtotal = 1280 + 12 + 4 + 18, .clock = 64018, .width_mm = 62, .height_mm = 110, }; static const struct ltk050h3146w_desc ltk050h3146w_data = { .mode = &ltk050h3146w_mode, .init = ltk050h3146w_init_sequence, }; static int ltk050h3146w_a2_select_page(struct ltk050h3146w *ctx, int page) { struct mipi_dsi_device *dsi = to_mipi_dsi_device(ctx->dev); u8 d[3] = { 0x98, 0x81, page }; return mipi_dsi_dcs_write(dsi, 0xff, d, ARRAY_SIZE(d)); } static int ltk050h3146w_a2_write_page(struct ltk050h3146w *ctx, int page, const struct ltk050h3146w_cmd *cmds, int num) { struct mipi_dsi_device *dsi = to_mipi_dsi_device(ctx->dev); int i, ret; ret = ltk050h3146w_a2_select_page(ctx, page); if (ret < 0) { dev_err(ctx->dev, "failed to select page %d: %d\n", page, ret); return ret; } for (i = 0; i < num; i++) { ret = mipi_dsi_generic_write(dsi, &cmds[i], sizeof(struct ltk050h3146w_cmd)); if (ret < 0) { dev_err(ctx->dev, "failed to write page %d init cmds: %d\n", page, ret); return ret; } } return 0; } static int ltk050h3146w_a2_init_sequence(struct ltk050h3146w *ctx) { struct mipi_dsi_device *dsi = to_mipi_dsi_device(ctx->dev); int ret; /* * Init sequence was supplied by the panel vendor without much * documentation. */ ret = ltk050h3146w_a2_write_page(ctx, 3, page3_cmds, ARRAY_SIZE(page3_cmds)); if (ret < 0) return ret; ret = ltk050h3146w_a2_write_page(ctx, 4, page4_cmds, ARRAY_SIZE(page4_cmds)); if (ret < 0) return ret; ret = ltk050h3146w_a2_write_page(ctx, 1, page1_cmds, ARRAY_SIZE(page1_cmds)); if (ret < 0) return ret; ret = ltk050h3146w_a2_select_page(ctx, 0); if (ret < 0) { dev_err(ctx->dev, "failed to select page 0: %d\n", ret); return ret; } /* vendor code called this without param, where there should be one */ ret = mipi_dsi_dcs_set_tear_on(dsi, 0); if (ret < 0) { dev_err(ctx->dev, "failed to set tear on: %d\n", ret); return ret; } msleep(60); return 0; } static const struct drm_display_mode ltk050h3146w_a2_mode = { .hdisplay = 720, .hsync_start = 720 + 42, .hsync_end = 720 + 42 + 10, .htotal = 720 + 42 + 10 + 60, .vdisplay = 1280, .vsync_start = 1280 + 18, .vsync_end = 1280 + 18 + 4, .vtotal = 1280 + 18 + 4 + 12, .clock = 65595, .width_mm = 62, .height_mm = 110, }; static const struct ltk050h3146w_desc ltk050h3146w_a2_data = { .mode = &ltk050h3146w_a2_mode, .init = ltk050h3146w_a2_init_sequence, }; static int ltk050h3146w_unprepare(struct drm_panel *panel) { struct ltk050h3146w *ctx = panel_to_ltk050h3146w(panel); struct mipi_dsi_device *dsi = to_mipi_dsi_device(ctx->dev); int ret; if (!ctx->prepared) return 0; ret = mipi_dsi_dcs_set_display_off(dsi); if (ret < 0) { dev_err(ctx->dev, "failed to set display off: %d\n", ret); return ret; } mipi_dsi_dcs_enter_sleep_mode(dsi); if (ret < 0) { dev_err(ctx->dev, "failed to enter sleep mode: %d\n", ret); return ret; } regulator_disable(ctx->iovcc); regulator_disable(ctx->vci); ctx->prepared = false; return 0; } static int ltk050h3146w_prepare(struct drm_panel *panel) { struct ltk050h3146w *ctx = panel_to_ltk050h3146w(panel); struct mipi_dsi_device *dsi = to_mipi_dsi_device(ctx->dev); int ret; if (ctx->prepared) return 0; dev_dbg(ctx->dev, "Resetting the panel\n"); ret = regulator_enable(ctx->vci); if (ret < 0) { dev_err(ctx->dev, "Failed to enable vci supply: %d\n", ret); return ret; } ret = regulator_enable(ctx->iovcc); if (ret < 0) { dev_err(ctx->dev, "Failed to enable iovcc supply: %d\n", ret); goto disable_vci; } gpiod_set_value_cansleep(ctx->reset_gpio, 1); usleep_range(5000, 6000); gpiod_set_value_cansleep(ctx->reset_gpio, 0); msleep(20); ret = ctx->panel_desc->init(ctx); if (ret < 0) { dev_err(ctx->dev, "Panel init sequence failed: %d\n", ret); goto disable_iovcc; } ret = mipi_dsi_dcs_exit_sleep_mode(dsi); if (ret < 0) { dev_err(ctx->dev, "Failed to exit sleep mode: %d\n", ret); goto disable_iovcc; } /* T9: 120ms */ msleep(120); ret = mipi_dsi_dcs_set_display_on(dsi); if (ret < 0) { dev_err(ctx->dev, "Failed to set display on: %d\n", ret); goto disable_iovcc; } msleep(50); ctx->prepared = true; return 0; disable_iovcc: regulator_disable(ctx->iovcc); disable_vci: regulator_disable(ctx->vci); return ret; } static int ltk050h3146w_get_modes(struct drm_panel *panel, struct drm_connector *connector) { struct ltk050h3146w *ctx = panel_to_ltk050h3146w(panel); struct drm_display_mode *mode; mode = drm_mode_duplicate(connector->dev, ctx->panel_desc->mode); if (!mode) return -ENOMEM; drm_mode_set_name(mode); mode->type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED; connector->display_info.width_mm = mode->width_mm; connector->display_info.height_mm = mode->height_mm; drm_mode_probed_add(connector, mode); return 1; } static const struct drm_panel_funcs ltk050h3146w_funcs = { .unprepare = ltk050h3146w_unprepare, .prepare = ltk050h3146w_prepare, .get_modes = ltk050h3146w_get_modes, }; static int ltk050h3146w_probe(struct mipi_dsi_device *dsi) { struct device *dev = &dsi->dev; struct ltk050h3146w *ctx; int ret; ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL); if (!ctx) return -ENOMEM; ctx->panel_desc = of_device_get_match_data(dev); if (!ctx->panel_desc) return -EINVAL; ctx->reset_gpio = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_LOW); if (IS_ERR(ctx->reset_gpio)) { dev_err(dev, "cannot get reset gpio\n"); return PTR_ERR(ctx->reset_gpio); } ctx->vci = devm_regulator_get(dev, "vci"); if (IS_ERR(ctx->vci)) { ret = PTR_ERR(ctx->vci); if (ret != -EPROBE_DEFER) dev_err(dev, "Failed to request vci regulator: %d\n", ret); return ret; } ctx->iovcc = devm_regulator_get(dev, "iovcc"); if (IS_ERR(ctx->iovcc)) { ret = PTR_ERR(ctx->iovcc); if (ret != -EPROBE_DEFER) dev_err(dev, "Failed to request iovcc regulator: %d\n", ret); return ret; } mipi_dsi_set_drvdata(dsi, ctx); ctx->dev = dev; dsi->lanes = 4; dsi->format = MIPI_DSI_FMT_RGB888; dsi->mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_BURST | MIPI_DSI_MODE_LPM | MIPI_DSI_MODE_NO_EOT_PACKET; drm_panel_init(&ctx->panel, &dsi->dev, &ltk050h3146w_funcs, DRM_MODE_CONNECTOR_DSI); ret = drm_panel_of_backlight(&ctx->panel); if (ret) return ret; drm_panel_add(&ctx->panel); ret = mipi_dsi_attach(dsi); if (ret < 0) { dev_err(dev, "mipi_dsi_attach failed: %d\n", ret); drm_panel_remove(&ctx->panel); return ret; } return 0; } static void ltk050h3146w_shutdown(struct mipi_dsi_device *dsi) { struct ltk050h3146w *ctx = mipi_dsi_get_drvdata(dsi); int ret; ret = drm_panel_unprepare(&ctx->panel); if (ret < 0) dev_err(&dsi->dev, "Failed to unprepare panel: %d\n", ret); ret = drm_panel_disable(&ctx->panel); if (ret < 0) dev_err(&dsi->dev, "Failed to disable panel: %d\n", ret); } static void ltk050h3146w_remove(struct mipi_dsi_device *dsi) { struct ltk050h3146w *ctx = mipi_dsi_get_drvdata(dsi); int ret; ltk050h3146w_shutdown(dsi); ret = mipi_dsi_detach(dsi); if (ret < 0) dev_err(&dsi->dev, "Failed to detach from DSI host: %d\n", ret); drm_panel_remove(&ctx->panel); } static const struct of_device_id ltk050h3146w_of_match[] = { { .compatible = "leadtek,ltk050h3146w", .data = &ltk050h3146w_data, }, { .compatible = "leadtek,ltk050h3146w-a2", .data = &ltk050h3146w_a2_data, }, { /* sentinel */ } }; MODULE_DEVICE_TABLE(of, ltk050h3146w_of_match); static struct mipi_dsi_driver ltk050h3146w_driver = { .driver = { .name = "panel-leadtek-ltk050h3146w", .of_match_table = ltk050h3146w_of_match, }, .probe = ltk050h3146w_probe, .remove = ltk050h3146w_remove, .shutdown = ltk050h3146w_shutdown, }; module_mipi_dsi_driver(ltk050h3146w_driver); MODULE_AUTHOR("Heiko Stuebner <[email protected]>"); MODULE_DESCRIPTION("DRM driver for Leadtek LTK050H3146W MIPI DSI panel"); MODULE_LICENSE("GPL v2");
linux-master
drivers/gpu/drm/panel/panel-leadtek-ltk050h3146w.c
// SPDX-License-Identifier: GPL-2.0 /* * Panel driver for the Samsung S6D27A1 480x800 DPI RGB panel. * Found in the Samsung Galaxy Ace 2 GT-I8160 mobile phone. */ #include <drm/drm_mipi_dbi.h> #include <drm/drm_modes.h> #include <drm/drm_panel.h> #include <linux/delay.h> #include <linux/gpio/consumer.h> #include <linux/init.h> #include <linux/kernel.h> #include <linux/media-bus-format.h> #include <linux/module.h> #include <linux/of.h> #include <linux/regulator/consumer.h> #include <linux/spi/spi.h> #include <video/mipi_display.h> #define S6D27A1_PASSWD_L2 0xF0 /* Password Command for Level 2 Control */ #define S6D27A1_RESCTL 0xB3 /* Resolution Select Control */ #define S6D27A1_PANELCTL2 0xB4 /* ASG Signal Control */ #define S6D27A1_READID1 0xDA /* Read panel ID 1 */ #define S6D27A1_READID2 0xDB /* Read panel ID 2 */ #define S6D27A1_READID3 0xDC /* Read panel ID 3 */ #define S6D27A1_DISPCTL 0xF2 /* Display Control */ #define S6D27A1_MANPWR 0xF3 /* Manual Control */ #define S6D27A1_PWRCTL1 0xF4 /* Power Control */ #define S6D27A1_SRCCTL 0xF6 /* Source Control */ #define S6D27A1_PANELCTL 0xF7 /* Panel Control*/ static const u8 s6d27a1_dbi_read_commands[] = { S6D27A1_READID1, S6D27A1_READID2, S6D27A1_READID3, 0, /* sentinel */ }; struct s6d27a1 { struct device *dev; struct mipi_dbi dbi; struct drm_panel panel; struct gpio_desc *reset; struct regulator_bulk_data regulators[2]; }; static const struct drm_display_mode s6d27a1_480_800_mode = { /* * The vendor driver states that the S6D27A1 panel * has a pixel clock frequency of 49920000 Hz / 2 = 24960000 Hz. */ .clock = 24960, .hdisplay = 480, .hsync_start = 480 + 63, .hsync_end = 480 + 63 + 2, .htotal = 480 + 63 + 2 + 63, .vdisplay = 800, .vsync_start = 800 + 11, .vsync_end = 800 + 11 + 2, .vtotal = 800 + 11 + 2 + 10, .width_mm = 50, .height_mm = 84, .flags = DRM_MODE_FLAG_NVSYNC | DRM_MODE_FLAG_NHSYNC, }; static inline struct s6d27a1 *to_s6d27a1(struct drm_panel *panel) { return container_of(panel, struct s6d27a1, panel); } static void s6d27a1_read_mtp_id(struct s6d27a1 *ctx) { struct mipi_dbi *dbi = &ctx->dbi; u8 id1, id2, id3; int ret; ret = mipi_dbi_command_read(dbi, S6D27A1_READID1, &id1); if (ret) { dev_err(ctx->dev, "unable to read MTP ID 1\n"); return; } ret = mipi_dbi_command_read(dbi, S6D27A1_READID2, &id2); if (ret) { dev_err(ctx->dev, "unable to read MTP ID 2\n"); return; } ret = mipi_dbi_command_read(dbi, S6D27A1_READID3, &id3); if (ret) { dev_err(ctx->dev, "unable to read MTP ID 3\n"); return; } dev_info(ctx->dev, "MTP ID: %02x %02x %02x\n", id1, id2, id3); } static int s6d27a1_power_on(struct s6d27a1 *ctx) { struct mipi_dbi *dbi = &ctx->dbi; int ret; /* Power up */ ret = regulator_bulk_enable(ARRAY_SIZE(ctx->regulators), ctx->regulators); if (ret) { dev_err(ctx->dev, "failed to enable regulators: %d\n", ret); return ret; } msleep(20); /* Assert reset >=1 ms */ gpiod_set_value_cansleep(ctx->reset, 1); usleep_range(1000, 5000); /* De-assert reset */ gpiod_set_value_cansleep(ctx->reset, 0); /* Wait >= 10 ms */ msleep(20); /* * Exit sleep mode and initialize display - some hammering is * necessary. */ mipi_dbi_command(dbi, MIPI_DCS_EXIT_SLEEP_MODE); mipi_dbi_command(dbi, MIPI_DCS_EXIT_SLEEP_MODE); msleep(120); /* Magic to unlock level 2 control of the display */ mipi_dbi_command(dbi, S6D27A1_PASSWD_L2, 0x5A, 0x5A); /* Configure resolution to 480RGBx800 */ mipi_dbi_command(dbi, S6D27A1_RESCTL, 0x22); mipi_dbi_command(dbi, S6D27A1_PANELCTL2, 0x00, 0x02, 0x03, 0x04, 0x05, 0x08, 0x00, 0x0c); mipi_dbi_command(dbi, S6D27A1_MANPWR, 0x01, 0x00, 0x00, 0x08, 0x08, 0x02, 0x00); mipi_dbi_command(dbi, S6D27A1_DISPCTL, 0x19, 0x00, 0x08, 0x0D, 0x03, 0x41, 0x3F); mipi_dbi_command(dbi, S6D27A1_PWRCTL1, 0x00, 0x00, 0x00, 0x00, 0x55, 0x44, 0x05, 0x88, 0x4B, 0x50); mipi_dbi_command(dbi, S6D27A1_SRCCTL, 0x03, 0x09, 0x8A, 0x00, 0x01, 0x16); mipi_dbi_command(dbi, S6D27A1_PANELCTL, 0x00, 0x05, 0x06, 0x07, 0x08, 0x01, 0x09, 0x0D, 0x0A, 0x0E, 0x0B, 0x0F, 0x0C, 0x10, 0x01, 0x11, 0x12, 0x13, 0x14, 0x05, 0x06, 0x07, 0x08, 0x01, 0x09, 0x0D, 0x0A, 0x0E, 0x0B, 0x0F, 0x0C, 0x10, 0x01, 0x11, 0x12, 0x13, 0x14); /* lock the level 2 control */ mipi_dbi_command(dbi, S6D27A1_PASSWD_L2, 0xA5, 0xA5); s6d27a1_read_mtp_id(ctx); return 0; } static int s6d27a1_power_off(struct s6d27a1 *ctx) { /* Go into RESET and disable regulators */ gpiod_set_value_cansleep(ctx->reset, 1); return regulator_bulk_disable(ARRAY_SIZE(ctx->regulators), ctx->regulators); } static int s6d27a1_unprepare(struct drm_panel *panel) { struct s6d27a1 *ctx = to_s6d27a1(panel); struct mipi_dbi *dbi = &ctx->dbi; mipi_dbi_command(dbi, MIPI_DCS_ENTER_SLEEP_MODE); msleep(120); return s6d27a1_power_off(to_s6d27a1(panel)); } static int s6d27a1_disable(struct drm_panel *panel) { struct s6d27a1 *ctx = to_s6d27a1(panel); struct mipi_dbi *dbi = &ctx->dbi; mipi_dbi_command(dbi, MIPI_DCS_SET_DISPLAY_OFF); msleep(25); return 0; } static int s6d27a1_prepare(struct drm_panel *panel) { return s6d27a1_power_on(to_s6d27a1(panel)); } static int s6d27a1_enable(struct drm_panel *panel) { struct s6d27a1 *ctx = to_s6d27a1(panel); struct mipi_dbi *dbi = &ctx->dbi; mipi_dbi_command(dbi, MIPI_DCS_SET_DISPLAY_ON); return 0; } static int s6d27a1_get_modes(struct drm_panel *panel, struct drm_connector *connector) { struct s6d27a1 *ctx = to_s6d27a1(panel); struct drm_display_mode *mode; static const u32 bus_format = MEDIA_BUS_FMT_RGB888_1X24; mode = drm_mode_duplicate(connector->dev, &s6d27a1_480_800_mode); if (!mode) { dev_err(ctx->dev, "failed to add mode\n"); return -ENOMEM; } connector->display_info.bpc = 8; connector->display_info.width_mm = mode->width_mm; connector->display_info.height_mm = mode->height_mm; connector->display_info.bus_flags = DRM_BUS_FLAG_PIXDATA_DRIVE_NEGEDGE; drm_display_info_set_bus_formats(&connector->display_info, &bus_format, 1); drm_mode_set_name(mode); mode->type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED; drm_mode_probed_add(connector, mode); return 1; } static const struct drm_panel_funcs s6d27a1_drm_funcs = { .disable = s6d27a1_disable, .unprepare = s6d27a1_unprepare, .prepare = s6d27a1_prepare, .enable = s6d27a1_enable, .get_modes = s6d27a1_get_modes, }; static int s6d27a1_probe(struct spi_device *spi) { struct device *dev = &spi->dev; struct s6d27a1 *ctx; int ret; ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL); if (!ctx) return -ENOMEM; ctx->dev = dev; /* * VCI is the analog voltage supply * VCCIO is the digital I/O voltage supply */ ctx->regulators[0].supply = "vci"; ctx->regulators[1].supply = "vccio"; ret = devm_regulator_bulk_get(dev, ARRAY_SIZE(ctx->regulators), ctx->regulators); if (ret) return dev_err_probe(dev, ret, "failed to get regulators\n"); ctx->reset = devm_gpiod_get(dev, "reset", GPIOD_OUT_HIGH); if (IS_ERR(ctx->reset)) { ret = PTR_ERR(ctx->reset); return dev_err_probe(dev, ret, "no RESET GPIO\n"); } ret = mipi_dbi_spi_init(spi, &ctx->dbi, NULL); if (ret) return dev_err_probe(dev, ret, "MIPI DBI init failed\n"); ctx->dbi.read_commands = s6d27a1_dbi_read_commands; drm_panel_init(&ctx->panel, dev, &s6d27a1_drm_funcs, DRM_MODE_CONNECTOR_DPI); ret = drm_panel_of_backlight(&ctx->panel); if (ret) return dev_err_probe(dev, ret, "failed to add backlight\n"); spi_set_drvdata(spi, ctx); drm_panel_add(&ctx->panel); return 0; } static void s6d27a1_remove(struct spi_device *spi) { struct s6d27a1 *ctx = spi_get_drvdata(spi); drm_panel_remove(&ctx->panel); } static const struct of_device_id s6d27a1_match[] = { { .compatible = "samsung,s6d27a1", }, { /* sentinel */ }, }; MODULE_DEVICE_TABLE(of, s6d27a1_match); static struct spi_driver s6d27a1_driver = { .probe = s6d27a1_probe, .remove = s6d27a1_remove, .driver = { .name = "s6d27a1-panel", .of_match_table = s6d27a1_match, }, }; module_spi_driver(s6d27a1_driver); MODULE_AUTHOR("Markuss Broks <[email protected]>"); MODULE_DESCRIPTION("Samsung S6D27A1 panel driver"); MODULE_LICENSE("GPL v2");
linux-master
drivers/gpu/drm/panel/panel-samsung-s6d27a1.c
// SPDX-License-Identifier: GPL-2.0 /* * Toppoly TD028TTEC1 Panel Driver * * Copyright (C) 2019 Texas Instruments Incorporated * * Based on the omapdrm-specific panel-tpo-td028ttec1 driver * * Copyright (C) 2008 Nokia Corporation * Author: Tomi Valkeinen <[email protected]> * * Neo 1973 code (jbt6k74.c): * Copyright (C) 2006-2007 OpenMoko, Inc. * Author: Harald Welte <[email protected]> * * Ported and adapted from Neo 1973 U-Boot by: * H. Nikolaus Schaller <[email protected]> */ #include <linux/delay.h> #include <linux/module.h> #include <linux/spi/spi.h> #include <drm/drm_connector.h> #include <drm/drm_modes.h> #include <drm/drm_panel.h> #define JBT_COMMAND 0x000 #define JBT_DATA 0x100 #define JBT_REG_SLEEP_IN 0x10 #define JBT_REG_SLEEP_OUT 0x11 #define JBT_REG_DISPLAY_OFF 0x28 #define JBT_REG_DISPLAY_ON 0x29 #define JBT_REG_RGB_FORMAT 0x3a #define JBT_REG_QUAD_RATE 0x3b #define JBT_REG_POWER_ON_OFF 0xb0 #define JBT_REG_BOOSTER_OP 0xb1 #define JBT_REG_BOOSTER_MODE 0xb2 #define JBT_REG_BOOSTER_FREQ 0xb3 #define JBT_REG_OPAMP_SYSCLK 0xb4 #define JBT_REG_VSC_VOLTAGE 0xb5 #define JBT_REG_VCOM_VOLTAGE 0xb6 #define JBT_REG_EXT_DISPL 0xb7 #define JBT_REG_OUTPUT_CONTROL 0xb8 #define JBT_REG_DCCLK_DCEV 0xb9 #define JBT_REG_DISPLAY_MODE1 0xba #define JBT_REG_DISPLAY_MODE2 0xbb #define JBT_REG_DISPLAY_MODE 0xbc #define JBT_REG_ASW_SLEW 0xbd #define JBT_REG_DUMMY_DISPLAY 0xbe #define JBT_REG_DRIVE_SYSTEM 0xbf #define JBT_REG_SLEEP_OUT_FR_A 0xc0 #define JBT_REG_SLEEP_OUT_FR_B 0xc1 #define JBT_REG_SLEEP_OUT_FR_C 0xc2 #define JBT_REG_SLEEP_IN_LCCNT_D 0xc3 #define JBT_REG_SLEEP_IN_LCCNT_E 0xc4 #define JBT_REG_SLEEP_IN_LCCNT_F 0xc5 #define JBT_REG_SLEEP_IN_LCCNT_G 0xc6 #define JBT_REG_GAMMA1_FINE_1 0xc7 #define JBT_REG_GAMMA1_FINE_2 0xc8 #define JBT_REG_GAMMA1_INCLINATION 0xc9 #define JBT_REG_GAMMA1_BLUE_OFFSET 0xca #define JBT_REG_BLANK_CONTROL 0xcf #define JBT_REG_BLANK_TH_TV 0xd0 #define JBT_REG_CKV_ON_OFF 0xd1 #define JBT_REG_CKV_1_2 0xd2 #define JBT_REG_OEV_TIMING 0xd3 #define JBT_REG_ASW_TIMING_1 0xd4 #define JBT_REG_ASW_TIMING_2 0xd5 #define JBT_REG_HCLOCK_VGA 0xec #define JBT_REG_HCLOCK_QVGA 0xed struct td028ttec1_panel { struct drm_panel panel; struct spi_device *spi; }; #define to_td028ttec1_device(p) container_of(p, struct td028ttec1_panel, panel) /* * noinline_for_stack so we don't get multiple copies of tx_buf * on the stack in case of gcc-plugin-structleak */ static int noinline_for_stack jbt_ret_write_0(struct td028ttec1_panel *lcd, u8 reg, int *err) { struct spi_device *spi = lcd->spi; u16 tx_buf = JBT_COMMAND | reg; int ret; if (err && *err) return *err; ret = spi_write(spi, (u8 *)&tx_buf, sizeof(tx_buf)); if (ret < 0) { dev_err(&spi->dev, "%s: SPI write failed: %d\n", __func__, ret); if (err) *err = ret; } return ret; } static int noinline_for_stack jbt_reg_write_1(struct td028ttec1_panel *lcd, u8 reg, u8 data, int *err) { struct spi_device *spi = lcd->spi; u16 tx_buf[2]; int ret; if (err && *err) return *err; tx_buf[0] = JBT_COMMAND | reg; tx_buf[1] = JBT_DATA | data; ret = spi_write(spi, (u8 *)tx_buf, sizeof(tx_buf)); if (ret < 0) { dev_err(&spi->dev, "%s: SPI write failed: %d\n", __func__, ret); if (err) *err = ret; } return ret; } static int noinline_for_stack jbt_reg_write_2(struct td028ttec1_panel *lcd, u8 reg, u16 data, int *err) { struct spi_device *spi = lcd->spi; u16 tx_buf[3]; int ret; if (err && *err) return *err; tx_buf[0] = JBT_COMMAND | reg; tx_buf[1] = JBT_DATA | (data >> 8); tx_buf[2] = JBT_DATA | (data & 0xff); ret = spi_write(spi, (u8 *)tx_buf, sizeof(tx_buf)); if (ret < 0) { dev_err(&spi->dev, "%s: SPI write failed: %d\n", __func__, ret); if (err) *err = ret; } return ret; } static int td028ttec1_prepare(struct drm_panel *panel) { struct td028ttec1_panel *lcd = to_td028ttec1_device(panel); unsigned int i; int ret = 0; /* Three times command zero */ for (i = 0; i < 3; ++i) { jbt_ret_write_0(lcd, 0x00, &ret); usleep_range(1000, 2000); } /* deep standby out */ jbt_reg_write_1(lcd, JBT_REG_POWER_ON_OFF, 0x17, &ret); /* RGB I/F on, RAM write off, QVGA through, SIGCON enable */ jbt_reg_write_1(lcd, JBT_REG_DISPLAY_MODE, 0x80, &ret); /* Quad mode off */ jbt_reg_write_1(lcd, JBT_REG_QUAD_RATE, 0x00, &ret); /* AVDD on, XVDD on */ jbt_reg_write_1(lcd, JBT_REG_POWER_ON_OFF, 0x16, &ret); /* Output control */ jbt_reg_write_2(lcd, JBT_REG_OUTPUT_CONTROL, 0xfff9, &ret); /* Sleep mode off */ jbt_ret_write_0(lcd, JBT_REG_SLEEP_OUT, &ret); /* at this point we have like 50% grey */ /* initialize register set */ jbt_reg_write_1(lcd, JBT_REG_DISPLAY_MODE1, 0x01, &ret); jbt_reg_write_1(lcd, JBT_REG_DISPLAY_MODE2, 0x00, &ret); jbt_reg_write_1(lcd, JBT_REG_RGB_FORMAT, 0x60, &ret); jbt_reg_write_1(lcd, JBT_REG_DRIVE_SYSTEM, 0x10, &ret); jbt_reg_write_1(lcd, JBT_REG_BOOSTER_OP, 0x56, &ret); jbt_reg_write_1(lcd, JBT_REG_BOOSTER_MODE, 0x33, &ret); jbt_reg_write_1(lcd, JBT_REG_BOOSTER_FREQ, 0x11, &ret); jbt_reg_write_1(lcd, JBT_REG_BOOSTER_FREQ, 0x11, &ret); jbt_reg_write_1(lcd, JBT_REG_OPAMP_SYSCLK, 0x02, &ret); jbt_reg_write_1(lcd, JBT_REG_VSC_VOLTAGE, 0x2b, &ret); jbt_reg_write_1(lcd, JBT_REG_VCOM_VOLTAGE, 0x40, &ret); jbt_reg_write_1(lcd, JBT_REG_EXT_DISPL, 0x03, &ret); jbt_reg_write_1(lcd, JBT_REG_DCCLK_DCEV, 0x04, &ret); /* * default of 0x02 in JBT_REG_ASW_SLEW responsible for 72Hz requirement * to avoid red / blue flicker */ jbt_reg_write_1(lcd, JBT_REG_ASW_SLEW, 0x04, &ret); jbt_reg_write_1(lcd, JBT_REG_DUMMY_DISPLAY, 0x00, &ret); jbt_reg_write_1(lcd, JBT_REG_SLEEP_OUT_FR_A, 0x11, &ret); jbt_reg_write_1(lcd, JBT_REG_SLEEP_OUT_FR_B, 0x11, &ret); jbt_reg_write_1(lcd, JBT_REG_SLEEP_OUT_FR_C, 0x11, &ret); jbt_reg_write_2(lcd, JBT_REG_SLEEP_IN_LCCNT_D, 0x2040, &ret); jbt_reg_write_2(lcd, JBT_REG_SLEEP_IN_LCCNT_E, 0x60c0, &ret); jbt_reg_write_2(lcd, JBT_REG_SLEEP_IN_LCCNT_F, 0x1020, &ret); jbt_reg_write_2(lcd, JBT_REG_SLEEP_IN_LCCNT_G, 0x60c0, &ret); jbt_reg_write_2(lcd, JBT_REG_GAMMA1_FINE_1, 0x5533, &ret); jbt_reg_write_1(lcd, JBT_REG_GAMMA1_FINE_2, 0x00, &ret); jbt_reg_write_1(lcd, JBT_REG_GAMMA1_INCLINATION, 0x00, &ret); jbt_reg_write_1(lcd, JBT_REG_GAMMA1_BLUE_OFFSET, 0x00, &ret); jbt_reg_write_2(lcd, JBT_REG_HCLOCK_VGA, 0x1f0, &ret); jbt_reg_write_1(lcd, JBT_REG_BLANK_CONTROL, 0x02, &ret); jbt_reg_write_2(lcd, JBT_REG_BLANK_TH_TV, 0x0804, &ret); jbt_reg_write_1(lcd, JBT_REG_CKV_ON_OFF, 0x01, &ret); jbt_reg_write_2(lcd, JBT_REG_CKV_1_2, 0x0000, &ret); jbt_reg_write_2(lcd, JBT_REG_OEV_TIMING, 0x0d0e, &ret); jbt_reg_write_2(lcd, JBT_REG_ASW_TIMING_1, 0x11a4, &ret); jbt_reg_write_1(lcd, JBT_REG_ASW_TIMING_2, 0x0e, &ret); return ret; } static int td028ttec1_enable(struct drm_panel *panel) { struct td028ttec1_panel *lcd = to_td028ttec1_device(panel); return jbt_ret_write_0(lcd, JBT_REG_DISPLAY_ON, NULL); } static int td028ttec1_disable(struct drm_panel *panel) { struct td028ttec1_panel *lcd = to_td028ttec1_device(panel); jbt_ret_write_0(lcd, JBT_REG_DISPLAY_OFF, NULL); return 0; } static int td028ttec1_unprepare(struct drm_panel *panel) { struct td028ttec1_panel *lcd = to_td028ttec1_device(panel); jbt_reg_write_2(lcd, JBT_REG_OUTPUT_CONTROL, 0x8002, NULL); jbt_ret_write_0(lcd, JBT_REG_SLEEP_IN, NULL); jbt_reg_write_1(lcd, JBT_REG_POWER_ON_OFF, 0x00, NULL); return 0; } static const struct drm_display_mode td028ttec1_mode = { .clock = 22153, .hdisplay = 480, .hsync_start = 480 + 24, .hsync_end = 480 + 24 + 8, .htotal = 480 + 24 + 8 + 8, .vdisplay = 640, .vsync_start = 640 + 4, .vsync_end = 640 + 4 + 2, .vtotal = 640 + 4 + 2 + 2, .type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED, .flags = DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC, .width_mm = 43, .height_mm = 58, }; static int td028ttec1_get_modes(struct drm_panel *panel, struct drm_connector *connector) { struct drm_display_mode *mode; mode = drm_mode_duplicate(connector->dev, &td028ttec1_mode); if (!mode) return -ENOMEM; drm_mode_set_name(mode); drm_mode_probed_add(connector, mode); connector->display_info.width_mm = td028ttec1_mode.width_mm; connector->display_info.height_mm = td028ttec1_mode.height_mm; /* * FIXME: According to the datasheet sync signals are sampled on the * rising edge of the clock, but the code running on the OpenMoko Neo * FreeRunner and Neo 1973 indicates sampling on the falling edge. This * should be tested on a real device. */ connector->display_info.bus_flags = DRM_BUS_FLAG_DE_HIGH | DRM_BUS_FLAG_SYNC_SAMPLE_NEGEDGE | DRM_BUS_FLAG_PIXDATA_SAMPLE_POSEDGE; return 1; } static const struct drm_panel_funcs td028ttec1_funcs = { .prepare = td028ttec1_prepare, .enable = td028ttec1_enable, .disable = td028ttec1_disable, .unprepare = td028ttec1_unprepare, .get_modes = td028ttec1_get_modes, }; static int td028ttec1_probe(struct spi_device *spi) { struct td028ttec1_panel *lcd; int ret; lcd = devm_kzalloc(&spi->dev, sizeof(*lcd), GFP_KERNEL); if (!lcd) return -ENOMEM; spi_set_drvdata(spi, lcd); lcd->spi = spi; spi->mode = SPI_MODE_3; spi->bits_per_word = 9; ret = spi_setup(spi); if (ret < 0) { dev_err(&spi->dev, "failed to setup SPI: %d\n", ret); return ret; } drm_panel_init(&lcd->panel, &lcd->spi->dev, &td028ttec1_funcs, DRM_MODE_CONNECTOR_DPI); ret = drm_panel_of_backlight(&lcd->panel); if (ret) return ret; drm_panel_add(&lcd->panel); return 0; } static void td028ttec1_remove(struct spi_device *spi) { struct td028ttec1_panel *lcd = spi_get_drvdata(spi); drm_panel_remove(&lcd->panel); drm_panel_disable(&lcd->panel); drm_panel_unprepare(&lcd->panel); } static const struct of_device_id td028ttec1_of_match[] = { { .compatible = "tpo,td028ttec1", }, /* DT backward compatibility. */ { .compatible = "toppoly,td028ttec1", }, { /* sentinel */ }, }; MODULE_DEVICE_TABLE(of, td028ttec1_of_match); static const struct spi_device_id td028ttec1_ids[] = { { "td028ttec1", 0 }, { /* sentinel */ } }; MODULE_DEVICE_TABLE(spi, td028ttec1_ids); static struct spi_driver td028ttec1_driver = { .probe = td028ttec1_probe, .remove = td028ttec1_remove, .id_table = td028ttec1_ids, .driver = { .name = "panel-tpo-td028ttec1", .of_match_table = td028ttec1_of_match, }, }; module_spi_driver(td028ttec1_driver); MODULE_AUTHOR("H. Nikolaus Schaller <[email protected]>"); MODULE_DESCRIPTION("Toppoly TD028TTEC1 panel driver"); MODULE_LICENSE("GPL");
linux-master
drivers/gpu/drm/panel/panel-tpo-td028ttec1.c
// SPDX-License-Identifier: GPL-2.0 /* * Orisetech OTA5601A TFT LCD panel driver * * Copyright (C) 2021, Christophe Branchereau <[email protected]> */ #include <linux/bits.h> #include <linux/delay.h> #include <linux/device.h> #include <linux/gpio/consumer.h> #include <linux/media-bus-format.h> #include <linux/module.h> #include <linux/of.h> #include <linux/regmap.h> #include <linux/regulator/consumer.h> #include <linux/spi/spi.h> #include <drm/drm_modes.h> #include <drm/drm_panel.h> #define OTA5601A_CTL 0x01 #define OTA5601A_CTL_OFF 0x00 #define OTA5601A_CTL_ON BIT(0) struct ota5601a_panel_info { const struct drm_display_mode *display_modes; unsigned int num_modes; u16 width_mm, height_mm; u32 bus_format, bus_flags; }; struct ota5601a { struct drm_panel drm_panel; struct regmap *map; struct regulator *supply; const struct ota5601a_panel_info *panel_info; struct gpio_desc *reset_gpio; }; static inline struct ota5601a *to_ota5601a(struct drm_panel *panel) { return container_of(panel, struct ota5601a, drm_panel); } static const struct reg_sequence ota5601a_panel_regs[] = { { 0xfd, 0x00 }, /* Page Shift */ { 0x02, 0x00 }, /* Reset */ { 0x18, 0x00 }, /* Interface Sel: RGB 24 Bits */ { 0x34, 0x20 }, /* Undocumented */ { 0x0c, 0x01 }, /* Contrast set by CMD1 == within page 0x00 */ { 0x0d, 0x48 }, /* R Brightness */ { 0x0e, 0x48 }, /* G Brightness */ { 0x0f, 0x48 }, /* B Brightness */ { 0x07, 0x40 }, /* R Contrast */ { 0x08, 0x33 }, /* G Contrast */ { 0x09, 0x3a }, /* B Contrast */ { 0x16, 0x01 }, /* NTSC Sel */ { 0x19, 0x8d }, /* VBLK */ { 0x1a, 0x28 }, /* HBLK */ { 0x1c, 0x00 }, /* Scan Shift Dir. */ { 0xfd, 0xc5 }, /* Page Shift */ { 0x82, 0x0c }, /* PWR_CTRL Pump */ { 0xa2, 0xb4 }, /* PWR_CTRL VGH/VGL */ { 0xfd, 0xc4 }, /* Page Shift - What follows is listed as "RGB 24bit Timing Set" */ { 0x82, 0x45 }, { 0xfd, 0xc1 }, { 0x91, 0x02 }, { 0xfd, 0xc0 }, { 0xa1, 0x01 }, { 0xa2, 0x1f }, { 0xa3, 0x0b }, { 0xa4, 0x38 }, { 0xa5, 0x00 }, { 0xa6, 0x0a }, { 0xa7, 0x38 }, { 0xa8, 0x00 }, { 0xa9, 0x0a }, { 0xaa, 0x37 }, { 0xfd, 0xce }, { 0x81, 0x18 }, { 0x82, 0x43 }, { 0x83, 0x43 }, { 0x91, 0x06 }, { 0x93, 0x38 }, { 0x94, 0x02 }, { 0x95, 0x06 }, { 0x97, 0x38 }, { 0x98, 0x02 }, { 0x99, 0x06 }, { 0x9b, 0x38 }, { 0x9c, 0x02 }, { 0xfd, 0x00 }, /* Page Shift */ }; static const struct regmap_config ota5601a_regmap_config = { .reg_bits = 8, .val_bits = 8, }; static int ota5601a_prepare(struct drm_panel *drm_panel) { struct ota5601a *panel = to_ota5601a(drm_panel); int err; err = regulator_enable(panel->supply); if (err) { dev_err(drm_panel->dev, "Failed to enable power supply: %d\n", err); return err; } /* Reset to be held low for 10us min according to the doc, 10ms before sending commands */ gpiod_set_value_cansleep(panel->reset_gpio, 1); usleep_range(10, 30); gpiod_set_value_cansleep(panel->reset_gpio, 0); usleep_range(10000, 20000); /* Init all registers. */ err = regmap_multi_reg_write(panel->map, ota5601a_panel_regs, ARRAY_SIZE(ota5601a_panel_regs)); if (err) { dev_err(drm_panel->dev, "Failed to init registers: %d\n", err); goto err_disable_regulator; } msleep(120); return 0; err_disable_regulator: regulator_disable(panel->supply); return err; } static int ota5601a_unprepare(struct drm_panel *drm_panel) { struct ota5601a *panel = to_ota5601a(drm_panel); gpiod_set_value_cansleep(panel->reset_gpio, 1); regulator_disable(panel->supply); return 0; } static int ota5601a_enable(struct drm_panel *drm_panel) { struct ota5601a *panel = to_ota5601a(drm_panel); int err; err = regmap_write(panel->map, OTA5601A_CTL, OTA5601A_CTL_ON); if (err) { dev_err(drm_panel->dev, "Unable to enable panel: %d\n", err); return err; } if (drm_panel->backlight) { /* Wait for the picture to be ready before enabling backlight */ msleep(120); } return 0; } static int ota5601a_disable(struct drm_panel *drm_panel) { struct ota5601a *panel = to_ota5601a(drm_panel); int err; err = regmap_write(panel->map, OTA5601A_CTL, OTA5601A_CTL_OFF); if (err) { dev_err(drm_panel->dev, "Unable to disable panel: %d\n", err); return err; } return 0; } static int ota5601a_get_modes(struct drm_panel *drm_panel, struct drm_connector *connector) { struct ota5601a *panel = to_ota5601a(drm_panel); const struct ota5601a_panel_info *panel_info = panel->panel_info; struct drm_display_mode *mode; unsigned int i; for (i = 0; i < panel_info->num_modes; i++) { mode = drm_mode_duplicate(connector->dev, &panel_info->display_modes[i]); if (!mode) return -ENOMEM; drm_mode_set_name(mode); mode->type = DRM_MODE_TYPE_DRIVER; if (panel_info->num_modes == 1) mode->type |= DRM_MODE_TYPE_PREFERRED; drm_mode_probed_add(connector, mode); } connector->display_info.bpc = 8; connector->display_info.width_mm = panel_info->width_mm; connector->display_info.height_mm = panel_info->height_mm; drm_display_info_set_bus_formats(&connector->display_info, &panel_info->bus_format, 1); connector->display_info.bus_flags = panel_info->bus_flags; return panel_info->num_modes; } static const struct drm_panel_funcs ota5601a_funcs = { .prepare = ota5601a_prepare, .unprepare = ota5601a_unprepare, .enable = ota5601a_enable, .disable = ota5601a_disable, .get_modes = ota5601a_get_modes, }; static int ota5601a_probe(struct spi_device *spi) { const struct spi_device_id *id = spi_get_device_id(spi); struct device *dev = &spi->dev; struct ota5601a *panel; int err; panel = devm_kzalloc(dev, sizeof(*panel), GFP_KERNEL); if (!panel) return -ENOMEM; spi_set_drvdata(spi, panel); panel->panel_info = (const struct ota5601a_panel_info *)id->driver_data; if (!panel->panel_info) return -EINVAL; panel->supply = devm_regulator_get(dev, "power"); if (IS_ERR(panel->supply)) { dev_err(dev, "Failed to get power supply\n"); return PTR_ERR(panel->supply); } panel->reset_gpio = devm_gpiod_get(dev, "reset", GPIOD_OUT_HIGH); if (IS_ERR(panel->reset_gpio)) { dev_err(dev, "Failed to get reset GPIO\n"); return PTR_ERR(panel->reset_gpio); } spi->bits_per_word = 8; spi->mode = SPI_MODE_3 | SPI_3WIRE; err = spi_setup(spi); if (err) { dev_err(dev, "Failed to setup SPI\n"); return err; } panel->map = devm_regmap_init_spi(spi, &ota5601a_regmap_config); if (IS_ERR(panel->map)) { dev_err(dev, "Failed to init regmap\n"); return PTR_ERR(panel->map); } drm_panel_init(&panel->drm_panel, dev, &ota5601a_funcs, DRM_MODE_CONNECTOR_DPI); err = drm_panel_of_backlight(&panel->drm_panel); if (err) { if (err != -EPROBE_DEFER) dev_err(dev, "Failed to get backlight handle\n"); return err; } drm_panel_add(&panel->drm_panel); return 0; } static void ota5601a_remove(struct spi_device *spi) { struct ota5601a *panel = spi_get_drvdata(spi); drm_panel_remove(&panel->drm_panel); ota5601a_disable(&panel->drm_panel); ota5601a_unprepare(&panel->drm_panel); } static const struct drm_display_mode gpt3_display_modes[] = { { /* 60 Hz */ .clock = 27000, .hdisplay = 640, .hsync_start = 640 + 220, .hsync_end = 640 + 220 + 20, .htotal = 640 + 220 + 20 + 20, .vdisplay = 480, .vsync_start = 480 + 7, .vsync_end = 480 + 7 + 6, .vtotal = 480 + 7 + 6 + 7, .flags = DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC, }, { /* 50 Hz */ .clock = 24000, .hdisplay = 640, .hsync_start = 640 + 280, .hsync_end = 640 + 280 + 20, .htotal = 640 + 280 + 20 + 20, .vdisplay = 480, .vsync_start = 480 + 7, .vsync_end = 480 + 7 + 6, .vtotal = 480 + 7 + 6 + 7, .flags = DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC, }, }; static const struct ota5601a_panel_info gpt3_info = { .display_modes = gpt3_display_modes, .num_modes = ARRAY_SIZE(gpt3_display_modes), .width_mm = 71, .height_mm = 51, .bus_format = MEDIA_BUS_FMT_RGB888_1X24, .bus_flags = DRM_BUS_FLAG_PIXDATA_SAMPLE_POSEDGE, }; static const struct spi_device_id gpt3_id[] = { { "gpt3", (kernel_ulong_t)&gpt3_info }, { /* sentinel */ } }; MODULE_DEVICE_TABLE(spi, gpt3_id); static const struct of_device_id ota5601a_of_match[] = { { .compatible = "focaltech,gpt3" }, { /* sentinel */ } }; MODULE_DEVICE_TABLE(of, ota5601a_of_match); static struct spi_driver ota5601a_driver = { .driver = { .name = "ota5601a", .of_match_table = ota5601a_of_match, }, .id_table = gpt3_id, .probe = ota5601a_probe, .remove = ota5601a_remove, }; module_spi_driver(ota5601a_driver); MODULE_AUTHOR("Christophe Branchereau <[email protected]>"); MODULE_LICENSE("GPL");
linux-master
drivers/gpu/drm/panel/panel-orisetech-ota5601a.c
// SPDX-License-Identifier: GPL-2.0 /* * DSI interface to the Samsung S6E63M0 panel. * (C) 2019 Linus Walleij */ #include <linux/module.h> #include <linux/delay.h> #include <linux/mod_devicetable.h> #include <drm/drm_mipi_dsi.h> #include <drm/drm_print.h> #include "panel-samsung-s6e63m0.h" #define MCS_GLOBAL_PARAM 0xb0 #define S6E63M0_DSI_MAX_CHUNK 15 /* CMD + 15 bytes max */ static int s6e63m0_dsi_dcs_read(struct device *dev, void *trsp, const u8 cmd, u8 *data) { struct mipi_dsi_device *dsi = to_mipi_dsi_device(dev); int ret; ret = mipi_dsi_dcs_read(dsi, cmd, data, 1); if (ret < 0) { dev_err(dev, "could not read DCS CMD %02x\n", cmd); return ret; } dev_dbg(dev, "DSI read CMD %02x = %02x\n", cmd, *data); return 0; } static int s6e63m0_dsi_dcs_write(struct device *dev, void *trsp, const u8 *data, size_t len) { struct mipi_dsi_device *dsi = to_mipi_dsi_device(dev); const u8 *seqp = data; u8 cmd; u8 cmdwritten; int remain; int chunk; int ret; dev_dbg(dev, "DSI writing dcs seq: %*ph\n", (int)len, data); /* Pick out and skip past the DCS command */ cmd = *seqp; seqp++; cmdwritten = 0; remain = len - 1; chunk = remain; /* Send max S6E63M0_DSI_MAX_CHUNK bytes at a time */ if (chunk > S6E63M0_DSI_MAX_CHUNK) chunk = S6E63M0_DSI_MAX_CHUNK; ret = mipi_dsi_dcs_write(dsi, cmd, seqp, chunk); if (ret < 0) { dev_err(dev, "error sending DCS command seq cmd %02x\n", cmd); return ret; } cmdwritten += chunk; seqp += chunk; while (cmdwritten < remain) { chunk = remain - cmdwritten; if (chunk > S6E63M0_DSI_MAX_CHUNK) chunk = S6E63M0_DSI_MAX_CHUNK; ret = mipi_dsi_dcs_write(dsi, MCS_GLOBAL_PARAM, &cmdwritten, 1); if (ret < 0) { dev_err(dev, "error sending CMD %02x global param %02x\n", cmd, cmdwritten); return ret; } ret = mipi_dsi_dcs_write(dsi, cmd, seqp, chunk); if (ret < 0) { dev_err(dev, "error sending CMD %02x chunk\n", cmd); return ret; } cmdwritten += chunk; seqp += chunk; } dev_dbg(dev, "sent command %02x %02x bytes\n", cmd, cmdwritten); usleep_range(8000, 9000); return 0; } static int s6e63m0_dsi_probe(struct mipi_dsi_device *dsi) { struct device *dev = &dsi->dev; int ret; dsi->lanes = 2; dsi->format = MIPI_DSI_FMT_RGB888; dsi->hs_rate = 349440000; dsi->lp_rate = 9600000; dsi->mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_BURST; ret = s6e63m0_probe(dev, NULL, s6e63m0_dsi_dcs_read, s6e63m0_dsi_dcs_write, true); if (ret) return ret; ret = mipi_dsi_attach(dsi); if (ret < 0) s6e63m0_remove(dev); return ret; } static void s6e63m0_dsi_remove(struct mipi_dsi_device *dsi) { mipi_dsi_detach(dsi); s6e63m0_remove(&dsi->dev); } static const struct of_device_id s6e63m0_dsi_of_match[] = { { .compatible = "samsung,s6e63m0" }, { /* sentinel */ } }; MODULE_DEVICE_TABLE(of, s6e63m0_dsi_of_match); static struct mipi_dsi_driver s6e63m0_dsi_driver = { .probe = s6e63m0_dsi_probe, .remove = s6e63m0_dsi_remove, .driver = { .name = "panel-samsung-s6e63m0", .of_match_table = s6e63m0_dsi_of_match, }, }; module_mipi_dsi_driver(s6e63m0_dsi_driver); MODULE_AUTHOR("Linus Walleij <[email protected]>"); MODULE_DESCRIPTION("s6e63m0 LCD DSI Driver"); MODULE_LICENSE("GPL v2");
linux-master
drivers/gpu/drm/panel/panel-samsung-s6e63m0-dsi.c
// SPDX-License-Identifier: GPL-2.0 /* * Panel driver for the Samsung LMS397KF04 480x800 DPI RGB panel. * According to the data sheet the display controller is called DB7430. * Found in the Samsung Galaxy Beam GT-I8350 mobile phone. * Linus Walleij <[email protected]> */ #include <drm/drm_mipi_dbi.h> #include <drm/drm_modes.h> #include <drm/drm_panel.h> #include <linux/delay.h> #include <linux/gpio/consumer.h> #include <linux/init.h> #include <linux/kernel.h> #include <linux/media-bus-format.h> #include <linux/module.h> #include <linux/of.h> #include <linux/regulator/consumer.h> #include <linux/spi/spi.h> #include <video/mipi_display.h> #define DB7430_ACCESS_PROT_OFF 0xb0 #define DB7430_UNKNOWN_B4 0xb4 #define DB7430_USER_SELECT 0xb5 #define DB7430_UNKNOWN_B7 0xb7 #define DB7430_UNKNOWN_B8 0xb8 #define DB7430_PANEL_DRIVING 0xc0 #define DB7430_SOURCE_CONTROL 0xc1 #define DB7430_GATE_INTERFACE 0xc4 #define DB7430_DISPLAY_H_TIMING 0xc5 #define DB7430_RGB_SYNC_OPTION 0xc6 #define DB7430_GAMMA_SET_RED 0xc8 #define DB7430_GAMMA_SET_GREEN 0xc9 #define DB7430_GAMMA_SET_BLUE 0xca #define DB7430_BIAS_CURRENT_CTRL 0xd1 #define DB7430_DDV_CTRL 0xd2 #define DB7430_GAMMA_CTRL_REF 0xd3 #define DB7430_UNKNOWN_D4 0xd4 #define DB7430_DCDC_CTRL 0xd5 #define DB7430_VCL_CTRL 0xd6 #define DB7430_UNKNOWN_F8 0xf8 #define DB7430_UNKNOWN_FC 0xfc #define DATA_MASK 0x100 /** * struct db7430 - state container for a panel controlled by the DB7430 * controller */ struct db7430 { /** @dev: the container device */ struct device *dev; /** @dbi: the DBI bus abstraction handle */ struct mipi_dbi dbi; /** @panel: the DRM panel instance for this device */ struct drm_panel panel; /** @reset: reset GPIO line */ struct gpio_desc *reset; /** @regulators: VCCIO and VIO supply regulators */ struct regulator_bulk_data regulators[2]; }; static const struct drm_display_mode db7430_480_800_mode = { /* * 31 ns period min (htotal*vtotal*vrefresh)/1000 * gives a Vrefresh of ~71 Hz. */ .clock = 32258, .hdisplay = 480, .hsync_start = 480 + 10, .hsync_end = 480 + 10 + 4, .htotal = 480 + 10 + 4 + 40, .vdisplay = 800, .vsync_start = 800 + 6, .vsync_end = 800 + 6 + 1, .vtotal = 800 + 6 + 1 + 7, .width_mm = 53, .height_mm = 87, .flags = DRM_MODE_FLAG_NVSYNC | DRM_MODE_FLAG_NHSYNC, }; static inline struct db7430 *to_db7430(struct drm_panel *panel) { return container_of(panel, struct db7430, panel); } static int db7430_power_on(struct db7430 *db) { struct mipi_dbi *dbi = &db->dbi; int ret; /* Power up */ ret = regulator_bulk_enable(ARRAY_SIZE(db->regulators), db->regulators); if (ret) { dev_err(db->dev, "failed to enable regulators: %d\n", ret); return ret; } msleep(50); /* Assert reset >=1 ms */ gpiod_set_value_cansleep(db->reset, 1); usleep_range(1000, 5000); /* De-assert reset */ gpiod_set_value_cansleep(db->reset, 0); /* Wait >= 10 ms */ msleep(10); dev_dbg(db->dev, "de-asserted RESET\n"); /* * This is set to 0x0a (RGB/BGR order + horizontal flip) in order * to make the display behave normally. If this is not set the displays * normal output behaviour is horizontally flipped and BGR ordered. Do * it twice because the first message doesn't always "take". */ mipi_dbi_command(dbi, MIPI_DCS_SET_ADDRESS_MODE, 0x0a); mipi_dbi_command(dbi, MIPI_DCS_SET_ADDRESS_MODE, 0x0a); mipi_dbi_command(dbi, DB7430_ACCESS_PROT_OFF, 0x00); mipi_dbi_command(dbi, DB7430_PANEL_DRIVING, 0x28, 0x08); mipi_dbi_command(dbi, DB7430_SOURCE_CONTROL, 0x01, 0x30, 0x15, 0x05, 0x22); mipi_dbi_command(dbi, DB7430_GATE_INTERFACE, 0x10, 0x01, 0x00); mipi_dbi_command(dbi, DB7430_DISPLAY_H_TIMING, 0x06, 0x55, 0x03, 0x07, 0x0b, 0x33, 0x00, 0x01, 0x03); /* * 0x00 in datasheet 0x01 in vendor code 0x00, it seems 0x01 means * DE active high and 0x00 means DE active low. */ mipi_dbi_command(dbi, DB7430_RGB_SYNC_OPTION, 0x01); mipi_dbi_command(dbi, DB7430_GAMMA_SET_RED, /* R positive gamma */ 0x00, 0x0A, 0x31, 0x3B, 0x4E, 0x58, 0x59, 0x5B, 0x58, 0x5E, 0x62, 0x60, 0x61, 0x5E, 0x62, 0x55, 0x55, 0x7F, 0x08, /* R negative gamma */ 0x00, 0x0A, 0x31, 0x3B, 0x4E, 0x58, 0x59, 0x5B, 0x58, 0x5E, 0x62, 0x60, 0x61, 0x5E, 0x62, 0x55, 0x55, 0x7F, 0x08); mipi_dbi_command(dbi, DB7430_GAMMA_SET_GREEN, /* G positive gamma */ 0x00, 0x25, 0x15, 0x28, 0x3D, 0x4A, 0x48, 0x4C, 0x4A, 0x52, 0x59, 0x59, 0x5B, 0x56, 0x60, 0x5D, 0x55, 0x7F, 0x0A, /* G negative gamma */ 0x00, 0x25, 0x15, 0x28, 0x3D, 0x4A, 0x48, 0x4C, 0x4A, 0x52, 0x59, 0x59, 0x5B, 0x56, 0x60, 0x5D, 0x55, 0x7F, 0x0A); mipi_dbi_command(dbi, DB7430_GAMMA_SET_BLUE, /* B positive gamma */ 0x00, 0x48, 0x10, 0x1F, 0x2F, 0x35, 0x38, 0x3D, 0x3C, 0x45, 0x4D, 0x4E, 0x52, 0x51, 0x60, 0x7F, 0x7E, 0x7F, 0x0C, /* B negative gamma */ 0x00, 0x48, 0x10, 0x1F, 0x2F, 0x35, 0x38, 0x3D, 0x3C, 0x45, 0x4D, 0x4E, 0x52, 0x51, 0x60, 0x7F, 0x7E, 0x7F, 0x0C); mipi_dbi_command(dbi, DB7430_BIAS_CURRENT_CTRL, 0x33, 0x13); mipi_dbi_command(dbi, DB7430_DDV_CTRL, 0x11, 0x00, 0x00); mipi_dbi_command(dbi, DB7430_GAMMA_CTRL_REF, 0x50, 0x50); mipi_dbi_command(dbi, DB7430_DCDC_CTRL, 0x2f, 0x11, 0x1e, 0x46); mipi_dbi_command(dbi, DB7430_VCL_CTRL, 0x11, 0x0a); return 0; } static int db7430_power_off(struct db7430 *db) { /* Go into RESET and disable regulators */ gpiod_set_value_cansleep(db->reset, 1); return regulator_bulk_disable(ARRAY_SIZE(db->regulators), db->regulators); } static int db7430_unprepare(struct drm_panel *panel) { return db7430_power_off(to_db7430(panel)); } static int db7430_disable(struct drm_panel *panel) { struct db7430 *db = to_db7430(panel); struct mipi_dbi *dbi = &db->dbi; mipi_dbi_command(dbi, MIPI_DCS_SET_DISPLAY_OFF); msleep(25); mipi_dbi_command(dbi, MIPI_DCS_ENTER_SLEEP_MODE); msleep(120); return 0; } static int db7430_prepare(struct drm_panel *panel) { return db7430_power_on(to_db7430(panel)); } static int db7430_enable(struct drm_panel *panel) { struct db7430 *db = to_db7430(panel); struct mipi_dbi *dbi = &db->dbi; /* Exit sleep mode */ mipi_dbi_command(dbi, MIPI_DCS_EXIT_SLEEP_MODE); msleep(20); /* NVM (non-volatile memory) load sequence */ mipi_dbi_command(dbi, DB7430_UNKNOWN_D4, 0x52, 0x5e); mipi_dbi_command(dbi, DB7430_UNKNOWN_F8, 0x01, 0xf5, 0xf2, 0x71, 0x44); mipi_dbi_command(dbi, DB7430_UNKNOWN_FC, 0x00, 0x08); msleep(150); /* CABC turn on sequence (BC = backlight control) */ mipi_dbi_command(dbi, DB7430_UNKNOWN_B4, 0x0f, 0x00, 0x50); mipi_dbi_command(dbi, DB7430_USER_SELECT, 0x80); mipi_dbi_command(dbi, DB7430_UNKNOWN_B7, 0x24); mipi_dbi_command(dbi, DB7430_UNKNOWN_B8, 0x01); /* Turn on display */ mipi_dbi_command(dbi, MIPI_DCS_SET_DISPLAY_ON); return 0; } /** * db7430_get_modes() - return the mode * @panel: the panel to get the mode for * @connector: reference to the central DRM connector control structure */ static int db7430_get_modes(struct drm_panel *panel, struct drm_connector *connector) { struct db7430 *db = to_db7430(panel); struct drm_display_mode *mode; static const u32 bus_format = MEDIA_BUS_FMT_RGB888_1X24; mode = drm_mode_duplicate(connector->dev, &db7430_480_800_mode); if (!mode) { dev_err(db->dev, "failed to add mode\n"); return -ENOMEM; } connector->display_info.bpc = 8; connector->display_info.width_mm = mode->width_mm; connector->display_info.height_mm = mode->height_mm; connector->display_info.bus_flags = DRM_BUS_FLAG_PIXDATA_DRIVE_NEGEDGE; drm_display_info_set_bus_formats(&connector->display_info, &bus_format, 1); drm_mode_set_name(mode); mode->type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED; drm_mode_probed_add(connector, mode); return 1; } static const struct drm_panel_funcs db7430_drm_funcs = { .disable = db7430_disable, .unprepare = db7430_unprepare, .prepare = db7430_prepare, .enable = db7430_enable, .get_modes = db7430_get_modes, }; static int db7430_probe(struct spi_device *spi) { struct device *dev = &spi->dev; struct db7430 *db; int ret; db = devm_kzalloc(dev, sizeof(*db), GFP_KERNEL); if (!db) return -ENOMEM; db->dev = dev; /* * VCI is the analog voltage supply * VCCIO is the digital I/O voltage supply */ db->regulators[0].supply = "vci"; db->regulators[1].supply = "vccio"; ret = devm_regulator_bulk_get(dev, ARRAY_SIZE(db->regulators), db->regulators); if (ret) return dev_err_probe(dev, ret, "failed to get regulators\n"); db->reset = devm_gpiod_get(dev, "reset", GPIOD_OUT_HIGH); if (IS_ERR(db->reset)) { ret = PTR_ERR(db->reset); return dev_err_probe(dev, ret, "no RESET GPIO\n"); } ret = mipi_dbi_spi_init(spi, &db->dbi, NULL); if (ret) return dev_err_probe(dev, ret, "MIPI DBI init failed\n"); drm_panel_init(&db->panel, dev, &db7430_drm_funcs, DRM_MODE_CONNECTOR_DPI); /* FIXME: if no external backlight, use internal backlight */ ret = drm_panel_of_backlight(&db->panel); if (ret) return dev_err_probe(dev, ret, "failed to add backlight\n"); spi_set_drvdata(spi, db); drm_panel_add(&db->panel); dev_dbg(dev, "added panel\n"); return 0; } static void db7430_remove(struct spi_device *spi) { struct db7430 *db = spi_get_drvdata(spi); drm_panel_remove(&db->panel); } /* * The DB7430 display controller may be used in several display products, * so list the different variants here and add per-variant data if needed. */ static const struct of_device_id db7430_match[] = { { .compatible = "samsung,lms397kf04", }, {}, }; MODULE_DEVICE_TABLE(of, db7430_match); static const struct spi_device_id db7430_ids[] = { { "lms397kf04" }, { }, }; MODULE_DEVICE_TABLE(spi, db7430_ids); static struct spi_driver db7430_driver = { .probe = db7430_probe, .remove = db7430_remove, .id_table = db7430_ids, .driver = { .name = "db7430-panel", .of_match_table = db7430_match, }, }; module_spi_driver(db7430_driver); MODULE_AUTHOR("Linus Walleij <[email protected]>"); MODULE_DESCRIPTION("Samsung DB7430 panel driver"); MODULE_LICENSE("GPL v2");
linux-master
drivers/gpu/drm/panel/panel-samsung-db7430.c
// SPDX-License-Identifier: GPL-2.0 /* * Driver for panels based on Sitronix ST7703 controller, souch as: * * - Rocktech jh057n00900 5.5" MIPI-DSI panel * * Copyright (C) Purism SPC 2019 */ #include <linux/debugfs.h> #include <linux/delay.h> #include <linux/gpio/consumer.h> #include <linux/media-bus-format.h> #include <linux/mod_devicetable.h> #include <linux/module.h> #include <linux/of.h> #include <linux/regulator/consumer.h> #include <video/display_timing.h> #include <video/mipi_display.h> #include <drm/drm_mipi_dsi.h> #include <drm/drm_modes.h> #include <drm/drm_panel.h> #define DRV_NAME "panel-sitronix-st7703" /* Manufacturer specific Commands send via DSI */ #define ST7703_CMD_ALL_PIXEL_OFF 0x22 #define ST7703_CMD_ALL_PIXEL_ON 0x23 #define ST7703_CMD_SETAPID 0xB1 #define ST7703_CMD_SETDISP 0xB2 #define ST7703_CMD_SETRGBIF 0xB3 #define ST7703_CMD_SETCYC 0xB4 #define ST7703_CMD_SETBGP 0xB5 #define ST7703_CMD_SETVCOM 0xB6 #define ST7703_CMD_SETOTP 0xB7 #define ST7703_CMD_SETPOWER_EXT 0xB8 #define ST7703_CMD_SETEXTC 0xB9 #define ST7703_CMD_SETMIPI 0xBA #define ST7703_CMD_SETVDC 0xBC #define ST7703_CMD_UNKNOWN_BF 0xBF #define ST7703_CMD_SETSCR 0xC0 #define ST7703_CMD_SETPOWER 0xC1 #define ST7703_CMD_SETECO 0xC6 #define ST7703_CMD_SETIO 0xC7 #define ST7703_CMD_SETCABC 0xC8 #define ST7703_CMD_SETPANEL 0xCC #define ST7703_CMD_SETGAMMA 0xE0 #define ST7703_CMD_SETEQ 0xE3 #define ST7703_CMD_SETGIP1 0xE9 #define ST7703_CMD_SETGIP2 0xEA #define ST7703_CMD_UNKNOWN_EF 0xEF struct st7703 { struct device *dev; struct drm_panel panel; struct gpio_desc *reset_gpio; struct regulator *vcc; struct regulator *iovcc; bool prepared; struct dentry *debugfs; const struct st7703_panel_desc *desc; }; struct st7703_panel_desc { const struct drm_display_mode *mode; unsigned int lanes; unsigned long mode_flags; enum mipi_dsi_pixel_format format; int (*init_sequence)(struct st7703 *ctx); }; static inline struct st7703 *panel_to_st7703(struct drm_panel *panel) { return container_of(panel, struct st7703, panel); } static int jh057n_init_sequence(struct st7703 *ctx) { struct mipi_dsi_device *dsi = to_mipi_dsi_device(ctx->dev); /* * Init sequence was supplied by the panel vendor. Most of the commands * resemble the ST7703 but the number of parameters often don't match * so it's likely a clone. */ mipi_dsi_generic_write_seq(dsi, ST7703_CMD_SETEXTC, 0xF1, 0x12, 0x83); mipi_dsi_generic_write_seq(dsi, ST7703_CMD_SETRGBIF, 0x10, 0x10, 0x05, 0x05, 0x03, 0xFF, 0x00, 0x00, 0x00, 0x00); mipi_dsi_generic_write_seq(dsi, ST7703_CMD_SETSCR, 0x73, 0x73, 0x50, 0x50, 0x00, 0x00, 0x08, 0x70, 0x00); mipi_dsi_generic_write_seq(dsi, ST7703_CMD_SETVDC, 0x4E); mipi_dsi_generic_write_seq(dsi, ST7703_CMD_SETPANEL, 0x0B); mipi_dsi_generic_write_seq(dsi, ST7703_CMD_SETCYC, 0x80); mipi_dsi_generic_write_seq(dsi, ST7703_CMD_SETDISP, 0xF0, 0x12, 0x30); mipi_dsi_generic_write_seq(dsi, ST7703_CMD_SETEQ, 0x07, 0x07, 0x0B, 0x0B, 0x03, 0x0B, 0x00, 0x00, 0x00, 0x00, 0xFF, 0x00, 0xC0, 0x10); mipi_dsi_generic_write_seq(dsi, ST7703_CMD_SETBGP, 0x08, 0x08); msleep(20); mipi_dsi_generic_write_seq(dsi, ST7703_CMD_SETVCOM, 0x3F, 0x3F); mipi_dsi_generic_write_seq(dsi, ST7703_CMD_UNKNOWN_BF, 0x02, 0x11, 0x00); mipi_dsi_generic_write_seq(dsi, ST7703_CMD_SETGIP1, 0x82, 0x10, 0x06, 0x05, 0x9E, 0x0A, 0xA5, 0x12, 0x31, 0x23, 0x37, 0x83, 0x04, 0xBC, 0x27, 0x38, 0x0C, 0x00, 0x03, 0x00, 0x00, 0x00, 0x0C, 0x00, 0x03, 0x00, 0x00, 0x00, 0x75, 0x75, 0x31, 0x88, 0x88, 0x88, 0x88, 0x88, 0x88, 0x13, 0x88, 0x64, 0x64, 0x20, 0x88, 0x88, 0x88, 0x88, 0x88, 0x88, 0x02, 0x88, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00); mipi_dsi_generic_write_seq(dsi, ST7703_CMD_SETGIP2, 0x02, 0x21, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0x46, 0x02, 0x88, 0x88, 0x88, 0x88, 0x88, 0x88, 0x64, 0x88, 0x13, 0x57, 0x13, 0x88, 0x88, 0x88, 0x88, 0x88, 0x88, 0x75, 0x88, 0x23, 0x14, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x30, 0x0A, 0xA5, 0x00, 0x00, 0x00, 0x00); mipi_dsi_generic_write_seq(dsi, ST7703_CMD_SETGAMMA, 0x00, 0x09, 0x0E, 0x29, 0x2D, 0x3C, 0x41, 0x37, 0x07, 0x0B, 0x0D, 0x10, 0x11, 0x0F, 0x10, 0x11, 0x18, 0x00, 0x09, 0x0E, 0x29, 0x2D, 0x3C, 0x41, 0x37, 0x07, 0x0B, 0x0D, 0x10, 0x11, 0x0F, 0x10, 0x11, 0x18); return 0; } static const struct drm_display_mode jh057n00900_mode = { .hdisplay = 720, .hsync_start = 720 + 90, .hsync_end = 720 + 90 + 20, .htotal = 720 + 90 + 20 + 20, .vdisplay = 1440, .vsync_start = 1440 + 20, .vsync_end = 1440 + 20 + 4, .vtotal = 1440 + 20 + 4 + 12, .clock = 75276, .flags = DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC, .width_mm = 65, .height_mm = 130, }; static const struct st7703_panel_desc jh057n00900_panel_desc = { .mode = &jh057n00900_mode, .lanes = 4, .mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_BURST | MIPI_DSI_MODE_VIDEO_SYNC_PULSE, .format = MIPI_DSI_FMT_RGB888, .init_sequence = jh057n_init_sequence, }; static int xbd599_init_sequence(struct st7703 *ctx) { struct mipi_dsi_device *dsi = to_mipi_dsi_device(ctx->dev); /* * Init sequence was supplied by the panel vendor. */ /* Magic sequence to unlock user commands below. */ mipi_dsi_dcs_write_seq(dsi, ST7703_CMD_SETEXTC, 0xF1, 0x12, 0x83); mipi_dsi_dcs_write_seq(dsi, ST7703_CMD_SETMIPI, 0x33, /* VC_main = 0, Lane_Number = 3 (4 lanes) */ 0x81, /* DSI_LDO_SEL = 1.7V, RTERM = 90 Ohm */ 0x05, /* IHSRX = x6 (Low High Speed driving ability) */ 0xF9, /* TX_CLK_SEL = fDSICLK/16 */ 0x0E, /* HFP_OSC (min. HFP number in DSI mode) */ 0x0E, /* HBP_OSC (min. HBP number in DSI mode) */ /* The rest is undocumented in ST7703 datasheet */ 0x20, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x44, 0x25, 0x00, 0x91, 0x0a, 0x00, 0x00, 0x02, 0x4F, 0x11, 0x00, 0x00, 0x37); mipi_dsi_dcs_write_seq(dsi, ST7703_CMD_SETPOWER_EXT, 0x25, /* PCCS = 2, ECP_DC_DIV = 1/4 HSYNC */ 0x22, /* DT = 15ms XDK_ECP = x2 */ 0x20, /* PFM_DC_DIV = /1 */ 0x03 /* ECP_SYNC_EN = 1, VGX_SYNC_EN = 1 */); /* RGB I/F porch timing */ mipi_dsi_dcs_write_seq(dsi, ST7703_CMD_SETRGBIF, 0x10, /* VBP_RGB_GEN */ 0x10, /* VFP_RGB_GEN */ 0x05, /* DE_BP_RGB_GEN */ 0x05, /* DE_FP_RGB_GEN */ /* The rest is undocumented in ST7703 datasheet */ 0x03, 0xFF, 0x00, 0x00, 0x00, 0x00); /* Source driving settings. */ mipi_dsi_dcs_write_seq(dsi, ST7703_CMD_SETSCR, 0x73, /* N_POPON */ 0x73, /* N_NOPON */ 0x50, /* I_POPON */ 0x50, /* I_NOPON */ 0x00, /* SCR[31,24] */ 0xC0, /* SCR[23,16] */ 0x08, /* SCR[15,8] */ 0x70, /* SCR[7,0] */ 0x00 /* Undocumented */); /* NVDDD_SEL = -1.8V, VDDD_SEL = out of range (possibly 1.9V?) */ mipi_dsi_dcs_write_seq(dsi, ST7703_CMD_SETVDC, 0x4E); /* * SS_PANEL = 1 (reverse scan), GS_PANEL = 0 (normal scan) * REV_PANEL = 1 (normally black panel), BGR_PANEL = 1 (BGR) */ mipi_dsi_dcs_write_seq(dsi, ST7703_CMD_SETPANEL, 0x0B); /* Zig-Zag Type C column inversion. */ mipi_dsi_dcs_write_seq(dsi, ST7703_CMD_SETCYC, 0x80); /* Set display resolution. */ mipi_dsi_dcs_write_seq(dsi, ST7703_CMD_SETDISP, 0xF0, /* NL = 240 */ 0x12, /* RES_V_LSB = 0, BLK_CON = VSSD, * RESO_SEL = 720RGB */ 0xF0 /* WHITE_GND_EN = 1 (GND), * WHITE_FRAME_SEL = 7 frames, * ISC = 0 frames */); mipi_dsi_dcs_write_seq(dsi, ST7703_CMD_SETEQ, 0x00, /* PNOEQ */ 0x00, /* NNOEQ */ 0x0B, /* PEQGND */ 0x0B, /* NEQGND */ 0x10, /* PEQVCI */ 0x10, /* NEQVCI */ 0x00, /* PEQVCI1 */ 0x00, /* NEQVCI1 */ 0x00, /* reserved */ 0x00, /* reserved */ 0xFF, /* reserved */ 0x00, /* reserved */ 0xC0, /* ESD_DET_DATA_WHITE = 1, ESD_WHITE_EN = 1 */ 0x10 /* SLPIN_OPTION = 1 (no need vsync after sleep-in) * VEDIO_NO_CHECK_EN = 0 * ESD_WHITE_GND_EN = 0 * ESD_DET_TIME_SEL = 0 frames */); mipi_dsi_dcs_write_seq(dsi, ST7703_CMD_SETECO, 0x01, 0x00, 0xFF, 0xFF, 0x00); mipi_dsi_dcs_write_seq(dsi, ST7703_CMD_SETPOWER, 0x74, /* VBTHS, VBTLS: VGH = 17V, VBL = -11V */ 0x00, /* FBOFF_VGH = 0, FBOFF_VGL = 0 */ 0x32, /* VRP */ 0x32, /* VRN */ 0x77, /* reserved */ 0xF1, /* APS = 1 (small), * VGL_DET_EN = 1, VGH_DET_EN = 1, * VGL_TURBO = 1, VGH_TURBO = 1 */ 0xFF, /* VGH1_L_DIV, VGL1_L_DIV (1.5MHz) */ 0xFF, /* VGH1_R_DIV, VGL1_R_DIV (1.5MHz) */ 0xCC, /* VGH2_L_DIV, VGL2_L_DIV (2.6MHz) */ 0xCC, /* VGH2_R_DIV, VGL2_R_DIV (2.6MHz) */ 0x77, /* VGH3_L_DIV, VGL3_L_DIV (4.5MHz) */ 0x77 /* VGH3_R_DIV, VGL3_R_DIV (4.5MHz) */); /* Reference voltage. */ mipi_dsi_dcs_write_seq(dsi, ST7703_CMD_SETBGP, 0x07, /* VREF_SEL = 4.2V */ 0x07 /* NVREF_SEL = 4.2V */); msleep(20); mipi_dsi_dcs_write_seq(dsi, ST7703_CMD_SETVCOM, 0x2C, /* VCOMDC_F = -0.67V */ 0x2C /* VCOMDC_B = -0.67V */); /* Undocumented command. */ mipi_dsi_dcs_write_seq(dsi, ST7703_CMD_UNKNOWN_BF, 0x02, 0x11, 0x00); /* This command is to set forward GIP timing. */ mipi_dsi_dcs_write_seq(dsi, ST7703_CMD_SETGIP1, 0x82, 0x10, 0x06, 0x05, 0xA2, 0x0A, 0xA5, 0x12, 0x31, 0x23, 0x37, 0x83, 0x04, 0xBC, 0x27, 0x38, 0x0C, 0x00, 0x03, 0x00, 0x00, 0x00, 0x0C, 0x00, 0x03, 0x00, 0x00, 0x00, 0x75, 0x75, 0x31, 0x88, 0x88, 0x88, 0x88, 0x88, 0x88, 0x13, 0x88, 0x64, 0x64, 0x20, 0x88, 0x88, 0x88, 0x88, 0x88, 0x88, 0x02, 0x88, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00); /* This command is to set backward GIP timing. */ mipi_dsi_dcs_write_seq(dsi, ST7703_CMD_SETGIP2, 0x02, 0x21, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0x46, 0x02, 0x88, 0x88, 0x88, 0x88, 0x88, 0x88, 0x64, 0x88, 0x13, 0x57, 0x13, 0x88, 0x88, 0x88, 0x88, 0x88, 0x88, 0x75, 0x88, 0x23, 0x14, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, 0x0A, 0xA5, 0x00, 0x00, 0x00, 0x00); /* Adjust the gamma characteristics of the panel. */ mipi_dsi_dcs_write_seq(dsi, ST7703_CMD_SETGAMMA, 0x00, 0x09, 0x0D, 0x23, 0x27, 0x3C, 0x41, 0x35, 0x07, 0x0D, 0x0E, 0x12, 0x13, 0x10, 0x12, 0x12, 0x18, 0x00, 0x09, 0x0D, 0x23, 0x27, 0x3C, 0x41, 0x35, 0x07, 0x0D, 0x0E, 0x12, 0x13, 0x10, 0x12, 0x12, 0x18); return 0; } static const struct drm_display_mode xbd599_mode = { .hdisplay = 720, .hsync_start = 720 + 40, .hsync_end = 720 + 40 + 40, .htotal = 720 + 40 + 40 + 40, .vdisplay = 1440, .vsync_start = 1440 + 18, .vsync_end = 1440 + 18 + 10, .vtotal = 1440 + 18 + 10 + 17, .clock = 69000, .flags = DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC, .width_mm = 68, .height_mm = 136, }; static const struct st7703_panel_desc xbd599_desc = { .mode = &xbd599_mode, .lanes = 4, .mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_SYNC_PULSE, .format = MIPI_DSI_FMT_RGB888, .init_sequence = xbd599_init_sequence, }; static int rg353v2_init_sequence(struct st7703 *ctx) { struct mipi_dsi_device *dsi = to_mipi_dsi_device(ctx->dev); /* * Init sequence was supplied by the panel vendor. */ mipi_dsi_dcs_write_seq(dsi, ST7703_CMD_SETEXTC, 0xf1, 0x12, 0x83); mipi_dsi_dcs_write_seq(dsi, ST7703_CMD_SETAPID, 0x00, 0x00, 0x00, 0xda, 0x80); mipi_dsi_dcs_write_seq(dsi, ST7703_CMD_SETDISP, 0x00, 0x13, 0x70); mipi_dsi_dcs_write_seq(dsi, ST7703_CMD_SETRGBIF, 0x10, 0x10, 0x28, 0x28, 0x03, 0xff, 0x00, 0x00, 0x00, 0x00); mipi_dsi_dcs_write_seq(dsi, ST7703_CMD_SETCYC, 0x80); mipi_dsi_dcs_write_seq(dsi, ST7703_CMD_SETBGP, 0x0a, 0x0a); mipi_dsi_dcs_write_seq(dsi, ST7703_CMD_SETVCOM, 0x92, 0x92); mipi_dsi_dcs_write_seq(dsi, ST7703_CMD_SETPOWER_EXT, 0x25, 0x22, 0xf0, 0x63); mipi_dsi_dcs_write_seq(dsi, ST7703_CMD_SETMIPI, 0x33, 0x81, 0x05, 0xf9, 0x0e, 0x0e, 0x20, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x44, 0x25, 0x00, 0x90, 0x0a, 0x00, 0x00, 0x01, 0x4f, 0x01, 0x00, 0x00, 0x37); mipi_dsi_dcs_write_seq(dsi, ST7703_CMD_SETVDC, 0x47); mipi_dsi_dcs_write_seq(dsi, ST7703_CMD_UNKNOWN_BF, 0x02, 0x11, 0x00); mipi_dsi_dcs_write_seq(dsi, ST7703_CMD_SETSCR, 0x73, 0x73, 0x50, 0x50, 0x00, 0x00, 0x12, 0x50, 0x00); mipi_dsi_dcs_write_seq(dsi, ST7703_CMD_SETPOWER, 0x53, 0xc0, 0x32, 0x32, 0x77, 0xe1, 0xdd, 0xdd, 0x77, 0x77, 0x33, 0x33); mipi_dsi_dcs_write_seq(dsi, ST7703_CMD_SETECO, 0x82, 0x00, 0xbf, 0xff, 0x00, 0xff); mipi_dsi_dcs_write_seq(dsi, ST7703_CMD_SETIO, 0xb8, 0x00, 0x0a, 0x00, 0x00, 0x00); mipi_dsi_dcs_write_seq(dsi, ST7703_CMD_SETCABC, 0x10, 0x40, 0x1e, 0x02); mipi_dsi_dcs_write_seq(dsi, ST7703_CMD_SETPANEL, 0x0b); mipi_dsi_dcs_write_seq(dsi, ST7703_CMD_SETGAMMA, 0x00, 0x07, 0x0d, 0x37, 0x35, 0x3f, 0x41, 0x44, 0x06, 0x0c, 0x0d, 0x0f, 0x11, 0x10, 0x12, 0x14, 0x1a, 0x00, 0x07, 0x0d, 0x37, 0x35, 0x3f, 0x41, 0x44, 0x06, 0x0c, 0x0d, 0x0f, 0x11, 0x10, 0x12, 0x14, 0x1a); mipi_dsi_dcs_write_seq(dsi, ST7703_CMD_SETEQ, 0x07, 0x07, 0x0b, 0x0b, 0x0b, 0x0b, 0x00, 0x00, 0x00, 0x00, 0xff, 0x00, 0xc0, 0x10); mipi_dsi_dcs_write_seq(dsi, ST7703_CMD_SETGIP1, 0xc8, 0x10, 0x02, 0x00, 0x00, 0xb0, 0xb1, 0x11, 0x31, 0x23, 0x28, 0x80, 0xb0, 0xb1, 0x27, 0x08, 0x00, 0x04, 0x02, 0x00, 0x00, 0x00, 0x00, 0x04, 0x02, 0x00, 0x00, 0x00, 0x88, 0x88, 0xba, 0x60, 0x24, 0x08, 0x88, 0x88, 0x88, 0x88, 0x88, 0x88, 0x88, 0xba, 0x71, 0x35, 0x18, 0x88, 0x88, 0x88, 0x88, 0x88, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00); mipi_dsi_dcs_write_seq(dsi, ST7703_CMD_SETGIP2, 0x97, 0x0a, 0x82, 0x02, 0x03, 0x07, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x81, 0x88, 0xba, 0x17, 0x53, 0x88, 0x88, 0x88, 0x88, 0x88, 0x88, 0x80, 0x88, 0xba, 0x06, 0x42, 0x88, 0x88, 0x88, 0x88, 0x88, 0x88, 0x23, 0x00, 0x00, 0x02, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00); mipi_dsi_dcs_write_seq(dsi, ST7703_CMD_UNKNOWN_EF, 0xff, 0xff, 0x01); return 0; } static const struct drm_display_mode rg353v2_mode = { .hdisplay = 640, .hsync_start = 640 + 40, .hsync_end = 640 + 40 + 2, .htotal = 640 + 40 + 2 + 80, .vdisplay = 480, .vsync_start = 480 + 18, .vsync_end = 480 + 18 + 2, .vtotal = 480 + 18 + 2 + 28, .clock = 24150, .flags = DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC, .width_mm = 70, .height_mm = 57, }; static const struct st7703_panel_desc rg353v2_desc = { .mode = &rg353v2_mode, .lanes = 4, .mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_BURST | MIPI_DSI_MODE_NO_EOT_PACKET | MIPI_DSI_MODE_LPM, .format = MIPI_DSI_FMT_RGB888, .init_sequence = rg353v2_init_sequence, }; static int st7703_enable(struct drm_panel *panel) { struct st7703 *ctx = panel_to_st7703(panel); struct mipi_dsi_device *dsi = to_mipi_dsi_device(ctx->dev); int ret; ret = ctx->desc->init_sequence(ctx); if (ret < 0) { dev_err(ctx->dev, "Panel init sequence failed: %d\n", ret); return ret; } msleep(20); ret = mipi_dsi_dcs_exit_sleep_mode(dsi); if (ret < 0) { dev_err(ctx->dev, "Failed to exit sleep mode: %d\n", ret); return ret; } /* Panel is operational 120 msec after reset */ msleep(60); ret = mipi_dsi_dcs_set_display_on(dsi); if (ret) return ret; dev_dbg(ctx->dev, "Panel init sequence done\n"); return 0; } static int st7703_disable(struct drm_panel *panel) { struct st7703 *ctx = panel_to_st7703(panel); struct mipi_dsi_device *dsi = to_mipi_dsi_device(ctx->dev); int ret; ret = mipi_dsi_dcs_set_display_off(dsi); if (ret < 0) dev_err(ctx->dev, "Failed to turn off the display: %d\n", ret); ret = mipi_dsi_dcs_enter_sleep_mode(dsi); if (ret < 0) dev_err(ctx->dev, "Failed to enter sleep mode: %d\n", ret); return 0; } static int st7703_unprepare(struct drm_panel *panel) { struct st7703 *ctx = panel_to_st7703(panel); if (!ctx->prepared) return 0; gpiod_set_value_cansleep(ctx->reset_gpio, 1); regulator_disable(ctx->iovcc); regulator_disable(ctx->vcc); ctx->prepared = false; return 0; } static int st7703_prepare(struct drm_panel *panel) { struct st7703 *ctx = panel_to_st7703(panel); int ret; if (ctx->prepared) return 0; dev_dbg(ctx->dev, "Resetting the panel\n"); ret = regulator_enable(ctx->vcc); if (ret < 0) { dev_err(ctx->dev, "Failed to enable vcc supply: %d\n", ret); return ret; } ret = regulator_enable(ctx->iovcc); if (ret < 0) { dev_err(ctx->dev, "Failed to enable iovcc supply: %d\n", ret); goto disable_vcc; } gpiod_set_value_cansleep(ctx->reset_gpio, 1); usleep_range(20, 40); gpiod_set_value_cansleep(ctx->reset_gpio, 0); msleep(20); ctx->prepared = true; return 0; disable_vcc: regulator_disable(ctx->vcc); return ret; } static const u32 mantix_bus_formats[] = { MEDIA_BUS_FMT_RGB888_1X24, }; static int st7703_get_modes(struct drm_panel *panel, struct drm_connector *connector) { struct st7703 *ctx = panel_to_st7703(panel); struct drm_display_mode *mode; mode = drm_mode_duplicate(connector->dev, ctx->desc->mode); if (!mode) { dev_err(ctx->dev, "Failed to add mode %ux%u@%u\n", ctx->desc->mode->hdisplay, ctx->desc->mode->vdisplay, drm_mode_vrefresh(ctx->desc->mode)); return -ENOMEM; } drm_mode_set_name(mode); mode->type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED; connector->display_info.width_mm = mode->width_mm; connector->display_info.height_mm = mode->height_mm; drm_mode_probed_add(connector, mode); drm_display_info_set_bus_formats(&connector->display_info, mantix_bus_formats, ARRAY_SIZE(mantix_bus_formats)); return 1; } static const struct drm_panel_funcs st7703_drm_funcs = { .disable = st7703_disable, .unprepare = st7703_unprepare, .prepare = st7703_prepare, .enable = st7703_enable, .get_modes = st7703_get_modes, }; static int allpixelson_set(void *data, u64 val) { struct st7703 *ctx = data; struct mipi_dsi_device *dsi = to_mipi_dsi_device(ctx->dev); dev_dbg(ctx->dev, "Setting all pixels on\n"); mipi_dsi_generic_write_seq(dsi, ST7703_CMD_ALL_PIXEL_ON); msleep(val * 1000); /* Reset the panel to get video back */ drm_panel_disable(&ctx->panel); drm_panel_unprepare(&ctx->panel); drm_panel_prepare(&ctx->panel); drm_panel_enable(&ctx->panel); return 0; } DEFINE_SIMPLE_ATTRIBUTE(allpixelson_fops, NULL, allpixelson_set, "%llu\n"); static void st7703_debugfs_init(struct st7703 *ctx) { ctx->debugfs = debugfs_create_dir(DRV_NAME, NULL); debugfs_create_file("allpixelson", 0600, ctx->debugfs, ctx, &allpixelson_fops); } static void st7703_debugfs_remove(struct st7703 *ctx) { debugfs_remove_recursive(ctx->debugfs); ctx->debugfs = NULL; } static int st7703_probe(struct mipi_dsi_device *dsi) { struct device *dev = &dsi->dev; struct st7703 *ctx; int ret; ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL); if (!ctx) return -ENOMEM; ctx->reset_gpio = devm_gpiod_get(dev, "reset", GPIOD_OUT_LOW); if (IS_ERR(ctx->reset_gpio)) return dev_err_probe(dev, PTR_ERR(ctx->reset_gpio), "Failed to get reset gpio\n"); mipi_dsi_set_drvdata(dsi, ctx); ctx->dev = dev; ctx->desc = of_device_get_match_data(dev); dsi->mode_flags = ctx->desc->mode_flags; dsi->format = ctx->desc->format; dsi->lanes = ctx->desc->lanes; ctx->vcc = devm_regulator_get(dev, "vcc"); if (IS_ERR(ctx->vcc)) return dev_err_probe(dev, PTR_ERR(ctx->vcc), "Failed to request vcc regulator\n"); ctx->iovcc = devm_regulator_get(dev, "iovcc"); if (IS_ERR(ctx->iovcc)) return dev_err_probe(dev, PTR_ERR(ctx->iovcc), "Failed to request iovcc regulator\n"); drm_panel_init(&ctx->panel, dev, &st7703_drm_funcs, DRM_MODE_CONNECTOR_DSI); ret = drm_panel_of_backlight(&ctx->panel); if (ret) return ret; drm_panel_add(&ctx->panel); ret = mipi_dsi_attach(dsi); if (ret < 0) { dev_err(dev, "mipi_dsi_attach failed (%d). Is host ready?\n", ret); drm_panel_remove(&ctx->panel); return ret; } dev_info(dev, "%ux%u@%u %ubpp dsi %udl - ready\n", ctx->desc->mode->hdisplay, ctx->desc->mode->vdisplay, drm_mode_vrefresh(ctx->desc->mode), mipi_dsi_pixel_format_to_bpp(dsi->format), dsi->lanes); st7703_debugfs_init(ctx); return 0; } static void st7703_shutdown(struct mipi_dsi_device *dsi) { struct st7703 *ctx = mipi_dsi_get_drvdata(dsi); int ret; ret = drm_panel_unprepare(&ctx->panel); if (ret < 0) dev_err(&dsi->dev, "Failed to unprepare panel: %d\n", ret); ret = drm_panel_disable(&ctx->panel); if (ret < 0) dev_err(&dsi->dev, "Failed to disable panel: %d\n", ret); } static void st7703_remove(struct mipi_dsi_device *dsi) { struct st7703 *ctx = mipi_dsi_get_drvdata(dsi); int ret; st7703_shutdown(dsi); ret = mipi_dsi_detach(dsi); if (ret < 0) dev_err(&dsi->dev, "Failed to detach from DSI host: %d\n", ret); drm_panel_remove(&ctx->panel); st7703_debugfs_remove(ctx); } static const struct of_device_id st7703_of_match[] = { { .compatible = "anbernic,rg353v-panel-v2", .data = &rg353v2_desc }, { .compatible = "rocktech,jh057n00900", .data = &jh057n00900_panel_desc }, { .compatible = "xingbangda,xbd599", .data = &xbd599_desc }, { /* sentinel */ } }; MODULE_DEVICE_TABLE(of, st7703_of_match); static struct mipi_dsi_driver st7703_driver = { .probe = st7703_probe, .remove = st7703_remove, .shutdown = st7703_shutdown, .driver = { .name = DRV_NAME, .of_match_table = st7703_of_match, }, }; module_mipi_dsi_driver(st7703_driver); MODULE_AUTHOR("Guido Günther <[email protected]>"); MODULE_DESCRIPTION("DRM driver for Sitronix ST7703 based MIPI DSI panels"); MODULE_LICENSE("GPL v2");
linux-master
drivers/gpu/drm/panel/panel-sitronix-st7703.c
// SPDX-License-Identifier: GPL-2.0-only /* * Ilitek ILI9322 TFT LCD drm_panel driver. * * This panel can be configured to support: * - 8-bit serial RGB interface * - 24-bit parallel RGB interface * - 8-bit ITU-R BT.601 interface * - 8-bit ITU-R BT.656 interface * - Up to 320RGBx240 dots resolution TFT LCD displays * - Scaling, brightness and contrast * * The scaling means that the display accepts a 640x480 or 720x480 * input and rescales it to fit to the 320x240 display. So what we * present to the system is something else than what comes out on the * actual display. * * Copyright (C) 2017 Linus Walleij <[email protected]> * Derived from drivers/drm/gpu/panel/panel-samsung-ld9040.c */ #include <linux/bitops.h> #include <linux/gpio/consumer.h> #include <linux/module.h> #include <linux/of.h> #include <linux/platform_device.h> #include <linux/regmap.h> #include <linux/regulator/consumer.h> #include <linux/spi/spi.h> #include <video/mipi_display.h> #include <video/of_videomode.h> #include <video/videomode.h> #include <drm/drm_modes.h> #include <drm/drm_panel.h> #define ILI9322_CHIP_ID 0x00 #define ILI9322_CHIP_ID_MAGIC 0x96 /* * Voltage on the communication interface, from 0.7 (0x00) * to 1.32 (0x1f) times the VREG1OUT voltage in 2% increments. * 1.00 (0x0f) is the default. */ #define ILI9322_VCOM_AMP 0x01 /* * High voltage on the communication signals, from 0.37 (0x00) to * 1.0 (0x3f) times the VREGOUT1 voltage in 1% increments. * 0.83 (0x2e) is the default. */ #define ILI9322_VCOM_HIGH 0x02 /* * VREG1 voltage regulator from 3.6V (0x00) to 6.0V (0x18) in 0.1V * increments. 5.4V (0x12) is the default. This is the reference * voltage for the VCOM levels and the greyscale level. */ #define ILI9322_VREG1_VOLTAGE 0x03 /* Describes the incoming signal */ #define ILI9322_ENTRY 0x06 /* 0 = right-to-left, 1 = left-to-right (default), horizontal flip */ #define ILI9322_ENTRY_HDIR BIT(0) /* 0 = down-to-up, 1 = up-to-down (default), vertical flip */ #define ILI9322_ENTRY_VDIR BIT(1) /* NTSC, PAL or autodetect */ #define ILI9322_ENTRY_NTSC (0 << 2) #define ILI9322_ENTRY_PAL (1 << 2) #define ILI9322_ENTRY_AUTODETECT (3 << 2) /* Input format */ #define ILI9322_ENTRY_SERIAL_RGB_THROUGH (0 << 4) #define ILI9322_ENTRY_SERIAL_RGB_ALIGNED (1 << 4) #define ILI9322_ENTRY_SERIAL_RGB_DUMMY_320X240 (2 << 4) #define ILI9322_ENTRY_SERIAL_RGB_DUMMY_360X240 (3 << 4) #define ILI9322_ENTRY_DISABLE_1 (4 << 4) #define ILI9322_ENTRY_PARALLEL_RGB_THROUGH (5 << 4) #define ILI9322_ENTRY_PARALLEL_RGB_ALIGNED (6 << 4) #define ILI9322_ENTRY_YUV_640Y_320CBCR_25_54_MHZ (7 << 4) #define ILI9322_ENTRY_YUV_720Y_360CBCR_27_MHZ (8 << 4) #define ILI9322_ENTRY_DISABLE_2 (9 << 4) #define ILI9322_ENTRY_ITU_R_BT_656_720X360 (10 << 4) #define ILI9322_ENTRY_ITU_R_BT_656_640X320 (11 << 4) /* Power control */ #define ILI9322_POW_CTRL 0x07 #define ILI9322_POW_CTRL_STB BIT(0) /* 0 = standby, 1 = normal */ #define ILI9322_POW_CTRL_VGL BIT(1) /* 0 = off, 1 = on */ #define ILI9322_POW_CTRL_VGH BIT(2) /* 0 = off, 1 = on */ #define ILI9322_POW_CTRL_DDVDH BIT(3) /* 0 = off, 1 = on */ #define ILI9322_POW_CTRL_VCOM BIT(4) /* 0 = off, 1 = on */ #define ILI9322_POW_CTRL_VCL BIT(5) /* 0 = off, 1 = on */ #define ILI9322_POW_CTRL_AUTO BIT(6) /* 0 = interactive, 1 = auto */ #define ILI9322_POW_CTRL_STANDBY (ILI9322_POW_CTRL_VGL | \ ILI9322_POW_CTRL_VGH | \ ILI9322_POW_CTRL_DDVDH | \ ILI9322_POW_CTRL_VCL | \ ILI9322_POW_CTRL_AUTO | \ BIT(7)) #define ILI9322_POW_CTRL_DEFAULT (ILI9322_POW_CTRL_STANDBY | \ ILI9322_POW_CTRL_STB) /* Vertical back porch bits 0..5 */ #define ILI9322_VBP 0x08 /* Horizontal back porch, 8 bits */ #define ILI9322_HBP 0x09 /* * Polarity settings: * 1 = positive polarity * 0 = negative polarity */ #define ILI9322_POL 0x0a #define ILI9322_POL_DCLK BIT(0) /* 1 default */ #define ILI9322_POL_HSYNC BIT(1) /* 0 default */ #define ILI9322_POL_VSYNC BIT(2) /* 0 default */ #define ILI9322_POL_DE BIT(3) /* 1 default */ /* * 0 means YCBCR are ordered Cb0,Y0,Cr0,Y1,Cb2,Y2,Cr2,Y3 (default) * in RGB mode this means RGB comes in RGBRGB * 1 means YCBCR are ordered Cr0,Y0,Cb0,Y1,Cr2,Y2,Cb2,Y3 * in RGB mode this means RGB comes in BGRBGR */ #define ILI9322_POL_YCBCR_MODE BIT(4) /* Formula A for YCbCR->RGB = 0, Formula B = 1 */ #define ILI9322_POL_FORMULA BIT(5) /* Reverse polarity: 0 = 0..255, 1 = 255..0 */ #define ILI9322_POL_REV BIT(6) #define ILI9322_IF_CTRL 0x0b #define ILI9322_IF_CTRL_HSYNC_VSYNC 0x00 #define ILI9322_IF_CTRL_HSYNC_VSYNC_DE BIT(2) #define ILI9322_IF_CTRL_DE_ONLY BIT(3) #define ILI9322_IF_CTRL_SYNC_DISABLED (BIT(2) | BIT(3)) #define ILI9322_IF_CTRL_LINE_INVERSION BIT(0) /* Not set means frame inv */ #define ILI9322_GLOBAL_RESET 0x04 #define ILI9322_GLOBAL_RESET_ASSERT 0x00 /* bit 0 = 0 -> reset */ /* * 4+4 bits of negative and positive gamma correction * Upper nybble, bits 4-7 are negative gamma * Lower nybble, bits 0-3 are positive gamma */ #define ILI9322_GAMMA_1 0x10 #define ILI9322_GAMMA_2 0x11 #define ILI9322_GAMMA_3 0x12 #define ILI9322_GAMMA_4 0x13 #define ILI9322_GAMMA_5 0x14 #define ILI9322_GAMMA_6 0x15 #define ILI9322_GAMMA_7 0x16 #define ILI9322_GAMMA_8 0x17 /* * enum ili9322_input - the format of the incoming signal to the panel * * The panel can be connected to various input streams and four of them can * be selected by electronic straps on the display. However it is possible * to select another mode or override the electronic default with this * setting. */ enum ili9322_input { ILI9322_INPUT_SRGB_THROUGH = 0x0, ILI9322_INPUT_SRGB_ALIGNED = 0x1, ILI9322_INPUT_SRGB_DUMMY_320X240 = 0x2, ILI9322_INPUT_SRGB_DUMMY_360X240 = 0x3, ILI9322_INPUT_DISABLED_1 = 0x4, ILI9322_INPUT_PRGB_THROUGH = 0x5, ILI9322_INPUT_PRGB_ALIGNED = 0x6, ILI9322_INPUT_YUV_640X320_YCBCR = 0x7, ILI9322_INPUT_YUV_720X360_YCBCR = 0x8, ILI9322_INPUT_DISABLED_2 = 0x9, ILI9322_INPUT_ITU_R_BT656_720X360_YCBCR = 0xa, ILI9322_INPUT_ITU_R_BT656_640X320_YCBCR = 0xb, ILI9322_INPUT_UNKNOWN = 0xc, }; static const char * const ili9322_inputs[] = { "8 bit serial RGB through", "8 bit serial RGB aligned", "8 bit serial RGB dummy 320x240", "8 bit serial RGB dummy 360x240", "disabled 1", "24 bit parallel RGB through", "24 bit parallel RGB aligned", "24 bit YUV 640Y 320CbCr", "24 bit YUV 720Y 360CbCr", "disabled 2", "8 bit ITU-R BT.656 720Y 360CbCr", "8 bit ITU-R BT.656 640Y 320CbCr", }; /** * struct ili9322_config - the system specific ILI9322 configuration * @width_mm: physical panel width [mm] * @height_mm: physical panel height [mm] * @flip_horizontal: flip the image horizontally (right-to-left scan) * (only in RGB and YUV modes) * @flip_vertical: flip the image vertically (down-to-up scan) * (only in RGB and YUV modes) * @input: the input/entry type used in this system, if this is set to * ILI9322_INPUT_UNKNOWN the driver will try to figure it out by probing * the hardware * @vreg1out_mv: the output in microvolts for the VREGOUT1 regulator used * to drive the physical display. Valid ranges are 3600 thru 6000 in 100 * microvolt increments. If not specified, hardware defaults will be * used (4.5V). * @vcom_high_percent: the percentage of VREGOUT1 used for the peak * voltage on the communications link. Valid ranges are 37 thru 100 * percent. If not specified, hardware defaults will be used (91%). * @vcom_amplitude_percent: the percentage of VREGOUT1 used for the * peak-to-peak amplitude of the communcation signals to the physical * display. Valid ranges are 70 thru 132 percent in increments if two * percent. Odd percentages will be truncated. If not specified, hardware * defaults will be used (114%). * @dclk_active_high: data/pixel clock active high, data will be clocked * in on the rising edge of the DCLK (this is usually the case). * @syncmode: The synchronization mode, what sync signals are emitted. * See the enum for details. * @de_active_high: DE (data entry) is active high * @hsync_active_high: HSYNC is active high * @vsync_active_high: VSYNC is active high * @gamma_corr_pos: a set of 8 nybbles describing positive * gamma correction for voltages V1 thru V8. Valid range 0..15 * @gamma_corr_neg: a set of 8 nybbles describing negative * gamma correction for voltages V1 thru V8. Valid range 0..15 * * These adjust what grayscale voltage will be output for input data V1 = 0, * V2 = 16, V3 = 48, V4 = 96, V5 = 160, V6 = 208, V7 = 240 and V8 = 255. * The curve is shaped like this: * * ^ * | V8 * | V7 * | V6 * | V5 * | V4 * | V3 * | V2 * | V1 * +-----------------------------------------------------------> * 0 16 48 96 160 208 240 255 * * The negative and postive gamma values adjust the V1 thru V8 up/down * according to the datasheet specifications. This is a property of the * physical display connected to the display controller and may vary. * If defined, both arrays must be supplied in full. If the properties * are not supplied, hardware defaults will be used. */ struct ili9322_config { u32 width_mm; u32 height_mm; bool flip_horizontal; bool flip_vertical; enum ili9322_input input; u32 vreg1out_mv; u32 vcom_high_percent; u32 vcom_amplitude_percent; bool dclk_active_high; bool de_active_high; bool hsync_active_high; bool vsync_active_high; u8 syncmode; u8 gamma_corr_pos[8]; u8 gamma_corr_neg[8]; }; struct ili9322 { struct device *dev; const struct ili9322_config *conf; struct drm_panel panel; struct regmap *regmap; struct regulator_bulk_data supplies[3]; struct gpio_desc *reset_gpio; enum ili9322_input input; struct videomode vm; u8 gamma[8]; u8 vreg1out; u8 vcom_high; u8 vcom_amplitude; }; static inline struct ili9322 *panel_to_ili9322(struct drm_panel *panel) { return container_of(panel, struct ili9322, panel); } static int ili9322_regmap_spi_write(void *context, const void *data, size_t count) { struct device *dev = context; struct spi_device *spi = to_spi_device(dev); u8 buf[2]; /* Clear bit 7 to write */ memcpy(buf, data, 2); buf[0] &= ~0x80; dev_dbg(dev, "WRITE: %02x %02x\n", buf[0], buf[1]); return spi_write_then_read(spi, buf, 2, NULL, 0); } static int ili9322_regmap_spi_read(void *context, const void *reg, size_t reg_size, void *val, size_t val_size) { struct device *dev = context; struct spi_device *spi = to_spi_device(dev); u8 buf[1]; /* Set bit 7 to 1 to read */ memcpy(buf, reg, 1); dev_dbg(dev, "READ: %02x reg size = %zu, val size = %zu\n", buf[0], reg_size, val_size); buf[0] |= 0x80; return spi_write_then_read(spi, buf, 1, val, 1); } static struct regmap_bus ili9322_regmap_bus = { .write = ili9322_regmap_spi_write, .read = ili9322_regmap_spi_read, .reg_format_endian_default = REGMAP_ENDIAN_BIG, .val_format_endian_default = REGMAP_ENDIAN_BIG, }; static bool ili9322_volatile_reg(struct device *dev, unsigned int reg) { return false; } static bool ili9322_writeable_reg(struct device *dev, unsigned int reg) { /* Just register 0 is read-only */ if (reg == 0x00) return false; return true; } static const struct regmap_config ili9322_regmap_config = { .reg_bits = 8, .val_bits = 8, .max_register = 0x44, .cache_type = REGCACHE_RBTREE, .volatile_reg = ili9322_volatile_reg, .writeable_reg = ili9322_writeable_reg, }; static int ili9322_init(struct drm_panel *panel, struct ili9322 *ili) { u8 reg; int ret; int i; /* Reset display */ ret = regmap_write(ili->regmap, ILI9322_GLOBAL_RESET, ILI9322_GLOBAL_RESET_ASSERT); if (ret) { dev_err(ili->dev, "can't issue GRESET (%d)\n", ret); return ret; } /* Set up the main voltage regulator */ if (ili->vreg1out != U8_MAX) { ret = regmap_write(ili->regmap, ILI9322_VREG1_VOLTAGE, ili->vreg1out); if (ret) { dev_err(ili->dev, "can't set up VREG1OUT (%d)\n", ret); return ret; } } if (ili->vcom_amplitude != U8_MAX) { ret = regmap_write(ili->regmap, ILI9322_VCOM_AMP, ili->vcom_amplitude); if (ret) { dev_err(ili->dev, "can't set up VCOM amplitude (%d)\n", ret); return ret; } } if (ili->vcom_high != U8_MAX) { ret = regmap_write(ili->regmap, ILI9322_VCOM_HIGH, ili->vcom_high); if (ret) { dev_err(ili->dev, "can't set up VCOM high (%d)\n", ret); return ret; } } /* Set up gamma correction */ for (i = 0; i < ARRAY_SIZE(ili->gamma); i++) { ret = regmap_write(ili->regmap, ILI9322_GAMMA_1 + i, ili->gamma[i]); if (ret) { dev_err(ili->dev, "can't write gamma V%d to 0x%02x (%d)\n", i + 1, ILI9322_GAMMA_1 + i, ret); return ret; } } /* * Polarity and inverted color order for RGB input. * None of this applies in the BT.656 mode. */ reg = 0; if (ili->conf->dclk_active_high) reg = ILI9322_POL_DCLK; if (ili->conf->de_active_high) reg |= ILI9322_POL_DE; if (ili->conf->hsync_active_high) reg |= ILI9322_POL_HSYNC; if (ili->conf->vsync_active_high) reg |= ILI9322_POL_VSYNC; ret = regmap_write(ili->regmap, ILI9322_POL, reg); if (ret) { dev_err(ili->dev, "can't write POL register (%d)\n", ret); return ret; } /* * Set up interface control. * This is not used in the BT.656 mode (no H/Vsync or DE signals). */ reg = ili->conf->syncmode; reg |= ILI9322_IF_CTRL_LINE_INVERSION; ret = regmap_write(ili->regmap, ILI9322_IF_CTRL, reg); if (ret) { dev_err(ili->dev, "can't write IF CTRL register (%d)\n", ret); return ret; } /* Set up the input mode */ reg = (ili->input << 4); /* These are inverted, setting to 1 is the default, clearing flips */ if (!ili->conf->flip_horizontal) reg |= ILI9322_ENTRY_HDIR; if (!ili->conf->flip_vertical) reg |= ILI9322_ENTRY_VDIR; reg |= ILI9322_ENTRY_AUTODETECT; ret = regmap_write(ili->regmap, ILI9322_ENTRY, reg); if (ret) { dev_err(ili->dev, "can't write ENTRY reg (%d)\n", ret); return ret; } dev_info(ili->dev, "display is in %s mode, syncmode %02x\n", ili9322_inputs[ili->input], ili->conf->syncmode); dev_info(ili->dev, "initialized display\n"); return 0; } /* * This power-on sequence if from the datasheet, page 57. */ static int ili9322_power_on(struct ili9322 *ili) { int ret; /* Assert RESET */ gpiod_set_value(ili->reset_gpio, 1); ret = regulator_bulk_enable(ARRAY_SIZE(ili->supplies), ili->supplies); if (ret < 0) { dev_err(ili->dev, "unable to enable regulators\n"); return ret; } msleep(20); /* De-assert RESET */ gpiod_set_value(ili->reset_gpio, 0); msleep(10); return 0; } static int ili9322_power_off(struct ili9322 *ili) { return regulator_bulk_disable(ARRAY_SIZE(ili->supplies), ili->supplies); } static int ili9322_disable(struct drm_panel *panel) { struct ili9322 *ili = panel_to_ili9322(panel); int ret; ret = regmap_write(ili->regmap, ILI9322_POW_CTRL, ILI9322_POW_CTRL_STANDBY); if (ret) { dev_err(ili->dev, "unable to go to standby mode\n"); return ret; } return 0; } static int ili9322_unprepare(struct drm_panel *panel) { struct ili9322 *ili = panel_to_ili9322(panel); return ili9322_power_off(ili); } static int ili9322_prepare(struct drm_panel *panel) { struct ili9322 *ili = panel_to_ili9322(panel); int ret; ret = ili9322_power_on(ili); if (ret < 0) return ret; ret = ili9322_init(panel, ili); if (ret < 0) ili9322_unprepare(panel); return ret; } static int ili9322_enable(struct drm_panel *panel) { struct ili9322 *ili = panel_to_ili9322(panel); int ret; ret = regmap_write(ili->regmap, ILI9322_POW_CTRL, ILI9322_POW_CTRL_DEFAULT); if (ret) { dev_err(ili->dev, "unable to enable panel\n"); return ret; } return 0; } /* Serial RGB modes */ static const struct drm_display_mode srgb_320x240_mode = { .clock = 24535, .hdisplay = 320, .hsync_start = 320 + 359, .hsync_end = 320 + 359 + 1, .htotal = 320 + 359 + 1 + 241, .vdisplay = 240, .vsync_start = 240 + 4, .vsync_end = 240 + 4 + 1, .vtotal = 262, .flags = 0, }; static const struct drm_display_mode srgb_360x240_mode = { .clock = 27000, .hdisplay = 360, .hsync_start = 360 + 35, .hsync_end = 360 + 35 + 1, .htotal = 360 + 35 + 1 + 241, .vdisplay = 240, .vsync_start = 240 + 21, .vsync_end = 240 + 21 + 1, .vtotal = 262, .flags = 0, }; /* This is the only mode listed for parallel RGB in the datasheet */ static const struct drm_display_mode prgb_320x240_mode = { .clock = 64000, .hdisplay = 320, .hsync_start = 320 + 38, .hsync_end = 320 + 38 + 1, .htotal = 320 + 38 + 1 + 50, .vdisplay = 240, .vsync_start = 240 + 4, .vsync_end = 240 + 4 + 1, .vtotal = 262, .flags = 0, }; /* YUV modes */ static const struct drm_display_mode yuv_640x320_mode = { .clock = 24540, .hdisplay = 640, .hsync_start = 640 + 252, .hsync_end = 640 + 252 + 1, .htotal = 640 + 252 + 1 + 28, .vdisplay = 320, .vsync_start = 320 + 4, .vsync_end = 320 + 4 + 1, .vtotal = 320 + 4 + 1 + 18, .flags = 0, }; static const struct drm_display_mode yuv_720x360_mode = { .clock = 27000, .hdisplay = 720, .hsync_start = 720 + 252, .hsync_end = 720 + 252 + 1, .htotal = 720 + 252 + 1 + 24, .vdisplay = 360, .vsync_start = 360 + 4, .vsync_end = 360 + 4 + 1, .vtotal = 360 + 4 + 1 + 18, .flags = 0, }; /* BT.656 VGA mode, 640x480 */ static const struct drm_display_mode itu_r_bt_656_640_mode = { .clock = 24540, .hdisplay = 640, .hsync_start = 640 + 3, .hsync_end = 640 + 3 + 1, .htotal = 640 + 3 + 1 + 272, .vdisplay = 480, .vsync_start = 480 + 4, .vsync_end = 480 + 4 + 1, .vtotal = 500, .flags = 0, }; /* BT.656 D1 mode 720x480 */ static const struct drm_display_mode itu_r_bt_656_720_mode = { .clock = 27000, .hdisplay = 720, .hsync_start = 720 + 3, .hsync_end = 720 + 3 + 1, .htotal = 720 + 3 + 1 + 272, .vdisplay = 480, .vsync_start = 480 + 4, .vsync_end = 480 + 4 + 1, .vtotal = 500, .flags = 0, }; static int ili9322_get_modes(struct drm_panel *panel, struct drm_connector *connector) { struct ili9322 *ili = panel_to_ili9322(panel); struct drm_device *drm = connector->dev; struct drm_display_mode *mode; struct drm_display_info *info; info = &connector->display_info; info->width_mm = ili->conf->width_mm; info->height_mm = ili->conf->height_mm; if (ili->conf->dclk_active_high) info->bus_flags |= DRM_BUS_FLAG_PIXDATA_DRIVE_POSEDGE; else info->bus_flags |= DRM_BUS_FLAG_PIXDATA_DRIVE_NEGEDGE; if (ili->conf->de_active_high) info->bus_flags |= DRM_BUS_FLAG_DE_HIGH; else info->bus_flags |= DRM_BUS_FLAG_DE_LOW; switch (ili->input) { case ILI9322_INPUT_SRGB_DUMMY_320X240: mode = drm_mode_duplicate(drm, &srgb_320x240_mode); break; case ILI9322_INPUT_SRGB_DUMMY_360X240: mode = drm_mode_duplicate(drm, &srgb_360x240_mode); break; case ILI9322_INPUT_PRGB_THROUGH: case ILI9322_INPUT_PRGB_ALIGNED: mode = drm_mode_duplicate(drm, &prgb_320x240_mode); break; case ILI9322_INPUT_YUV_640X320_YCBCR: mode = drm_mode_duplicate(drm, &yuv_640x320_mode); break; case ILI9322_INPUT_YUV_720X360_YCBCR: mode = drm_mode_duplicate(drm, &yuv_720x360_mode); break; case ILI9322_INPUT_ITU_R_BT656_720X360_YCBCR: mode = drm_mode_duplicate(drm, &itu_r_bt_656_720_mode); break; case ILI9322_INPUT_ITU_R_BT656_640X320_YCBCR: mode = drm_mode_duplicate(drm, &itu_r_bt_656_640_mode); break; default: mode = NULL; break; } if (!mode) { dev_err(panel->dev, "bad mode or failed to add mode\n"); return -EINVAL; } drm_mode_set_name(mode); /* * This is the preferred mode because most people are going * to want to use the display with VGA type graphics. */ mode->type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED; /* Set up the polarity */ if (ili->conf->hsync_active_high) mode->flags |= DRM_MODE_FLAG_PHSYNC; else mode->flags |= DRM_MODE_FLAG_NHSYNC; if (ili->conf->vsync_active_high) mode->flags |= DRM_MODE_FLAG_PVSYNC; else mode->flags |= DRM_MODE_FLAG_NVSYNC; mode->width_mm = ili->conf->width_mm; mode->height_mm = ili->conf->height_mm; drm_mode_probed_add(connector, mode); return 1; /* Number of modes */ } static const struct drm_panel_funcs ili9322_drm_funcs = { .disable = ili9322_disable, .unprepare = ili9322_unprepare, .prepare = ili9322_prepare, .enable = ili9322_enable, .get_modes = ili9322_get_modes, }; static int ili9322_probe(struct spi_device *spi) { struct device *dev = &spi->dev; struct ili9322 *ili; const struct regmap_config *regmap_config; u8 gamma; u32 val; int ret; int i; ili = devm_kzalloc(dev, sizeof(struct ili9322), GFP_KERNEL); if (!ili) return -ENOMEM; spi_set_drvdata(spi, ili); ili->dev = dev; /* * Every new incarnation of this display must have a unique * data entry for the system in this driver. */ ili->conf = of_device_get_match_data(dev); if (!ili->conf) { dev_err(dev, "missing device configuration\n"); return -ENODEV; } val = ili->conf->vreg1out_mv; if (!val) { /* Default HW value, do not touch (should be 4.5V) */ ili->vreg1out = U8_MAX; } else { if (val < 3600) { dev_err(dev, "too low VREG1OUT\n"); return -EINVAL; } if (val > 6000) { dev_err(dev, "too high VREG1OUT\n"); return -EINVAL; } if ((val % 100) != 0) { dev_err(dev, "VREG1OUT is no even 100 microvolt\n"); return -EINVAL; } val -= 3600; val /= 100; dev_dbg(dev, "VREG1OUT = 0x%02x\n", val); ili->vreg1out = val; } val = ili->conf->vcom_high_percent; if (!val) { /* Default HW value, do not touch (should be 91%) */ ili->vcom_high = U8_MAX; } else { if (val < 37) { dev_err(dev, "too low VCOM high\n"); return -EINVAL; } if (val > 100) { dev_err(dev, "too high VCOM high\n"); return -EINVAL; } val -= 37; dev_dbg(dev, "VCOM high = 0x%02x\n", val); ili->vcom_high = val; } val = ili->conf->vcom_amplitude_percent; if (!val) { /* Default HW value, do not touch (should be 114%) */ ili->vcom_high = U8_MAX; } else { if (val < 70) { dev_err(dev, "too low VCOM amplitude\n"); return -EINVAL; } if (val > 132) { dev_err(dev, "too high VCOM amplitude\n"); return -EINVAL; } val -= 70; val >>= 1; /* Increments of 2% */ dev_dbg(dev, "VCOM amplitude = 0x%02x\n", val); ili->vcom_amplitude = val; } for (i = 0; i < ARRAY_SIZE(ili->gamma); i++) { val = ili->conf->gamma_corr_neg[i]; if (val > 15) { dev_err(dev, "negative gamma %u > 15, capping\n", val); val = 15; } gamma = val << 4; val = ili->conf->gamma_corr_pos[i]; if (val > 15) { dev_err(dev, "positive gamma %u > 15, capping\n", val); val = 15; } gamma |= val; ili->gamma[i] = gamma; dev_dbg(dev, "gamma V%d: 0x%02x\n", i + 1, gamma); } ili->supplies[0].supply = "vcc"; /* 2.7-3.6 V */ ili->supplies[1].supply = "iovcc"; /* 1.65-3.6V */ ili->supplies[2].supply = "vci"; /* 2.7-3.6V */ ret = devm_regulator_bulk_get(dev, ARRAY_SIZE(ili->supplies), ili->supplies); if (ret < 0) return ret; ret = regulator_set_voltage(ili->supplies[0].consumer, 2700000, 3600000); if (ret) return ret; ret = regulator_set_voltage(ili->supplies[1].consumer, 1650000, 3600000); if (ret) return ret; ret = regulator_set_voltage(ili->supplies[2].consumer, 2700000, 3600000); if (ret) return ret; ili->reset_gpio = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_HIGH); if (IS_ERR(ili->reset_gpio)) { dev_err(dev, "failed to get RESET GPIO\n"); return PTR_ERR(ili->reset_gpio); } spi->bits_per_word = 8; ret = spi_setup(spi); if (ret < 0) { dev_err(dev, "spi setup failed.\n"); return ret; } regmap_config = &ili9322_regmap_config; ili->regmap = devm_regmap_init(dev, &ili9322_regmap_bus, dev, regmap_config); if (IS_ERR(ili->regmap)) { dev_err(dev, "failed to allocate register map\n"); return PTR_ERR(ili->regmap); } ret = regmap_read(ili->regmap, ILI9322_CHIP_ID, &val); if (ret) { dev_err(dev, "can't get chip ID (%d)\n", ret); return ret; } if (val != ILI9322_CHIP_ID_MAGIC) { dev_err(dev, "chip ID 0x%0x2, expected 0x%02x\n", val, ILI9322_CHIP_ID_MAGIC); return -ENODEV; } /* Probe the system to find the display setting */ if (ili->conf->input == ILI9322_INPUT_UNKNOWN) { ret = regmap_read(ili->regmap, ILI9322_ENTRY, &val); if (ret) { dev_err(dev, "can't get entry setting (%d)\n", ret); return ret; } /* Input enum corresponds to HW setting */ ili->input = (val >> 4) & 0x0f; if (ili->input >= ILI9322_INPUT_UNKNOWN) ili->input = ILI9322_INPUT_UNKNOWN; } else { ili->input = ili->conf->input; } drm_panel_init(&ili->panel, dev, &ili9322_drm_funcs, DRM_MODE_CONNECTOR_DPI); drm_panel_add(&ili->panel); return 0; } static void ili9322_remove(struct spi_device *spi) { struct ili9322 *ili = spi_get_drvdata(spi); ili9322_power_off(ili); drm_panel_remove(&ili->panel); } /* * The D-Link DIR-685 panel is marked LM918A01-1A SY-B4-091116-E0199 */ static const struct ili9322_config ili9322_dir_685 = { .width_mm = 65, .height_mm = 50, .input = ILI9322_INPUT_ITU_R_BT656_640X320_YCBCR, .vreg1out_mv = 4600, .vcom_high_percent = 91, .vcom_amplitude_percent = 114, .syncmode = ILI9322_IF_CTRL_SYNC_DISABLED, .dclk_active_high = true, .gamma_corr_neg = { 0xa, 0x5, 0x7, 0x7, 0x7, 0x5, 0x1, 0x6 }, .gamma_corr_pos = { 0x7, 0x7, 0x3, 0x2, 0x3, 0x5, 0x7, 0x2 }, }; static const struct of_device_id ili9322_of_match[] = { { .compatible = "dlink,dir-685-panel", .data = &ili9322_dir_685, }, { .compatible = "ilitek,ili9322", .data = NULL, }, { } }; MODULE_DEVICE_TABLE(of, ili9322_of_match); static struct spi_driver ili9322_driver = { .probe = ili9322_probe, .remove = ili9322_remove, .driver = { .name = "panel-ilitek-ili9322", .of_match_table = ili9322_of_match, }, }; module_spi_driver(ili9322_driver); MODULE_AUTHOR("Linus Walleij <[email protected]>"); MODULE_DESCRIPTION("ILI9322 LCD panel driver"); MODULE_LICENSE("GPL v2");
linux-master
drivers/gpu/drm/panel/panel-ilitek-ili9322.c
//SPDX-License-Identifier: GPL-2.0-only //Copyright (c) 2023 Qualcomm Innovation Center, Inc. All rights reserved. #include <linux/backlight.h> #include <linux/delay.h> #include <linux/gpio/consumer.h> #include <linux/module.h> #include <linux/of.h> #include <linux/regulator/consumer.h> #include <drm/drm_mipi_dsi.h> #include <drm/drm_probe_helper.h> #include <drm/drm_modes.h> #include <drm/drm_panel.h> #include <drm/display/drm_dsc.h> #include <drm/display/drm_dsc_helper.h> #include <video/mipi_display.h> struct visionox_r66451 { struct drm_panel panel; struct mipi_dsi_device *dsi; struct gpio_desc *reset_gpio; struct regulator_bulk_data supplies[2]; bool prepared, enabled; }; static inline struct visionox_r66451 *to_visionox_r66451(struct drm_panel *panel) { return container_of(panel, struct visionox_r66451, panel); } static void visionox_r66451_reset(struct visionox_r66451 *ctx) { gpiod_set_value_cansleep(ctx->reset_gpio, 0); usleep_range(10000, 10100); gpiod_set_value_cansleep(ctx->reset_gpio, 1); usleep_range(10000, 10100); gpiod_set_value_cansleep(ctx->reset_gpio, 0); usleep_range(10000, 10100); } static int visionox_r66451_on(struct visionox_r66451 *ctx) { struct mipi_dsi_device *dsi = ctx->dsi; dsi->mode_flags |= MIPI_DSI_MODE_LPM; mipi_dsi_dcs_write_seq(dsi, 0xb0, 0x00); mipi_dsi_dcs_write_seq(dsi, 0xc2, 0x09, 0x24, 0x0c, 0x00, 0x00, 0x0c, 0x00, 0x00, 0x00, 0x09, 0x3c); mipi_dsi_dcs_write_seq(dsi, 0xd7, 0x00, 0xb9, 0x3c, 0x00, 0x40, 0x04, 0x00, 0xa0, 0x0a, 0x00, 0x40, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x19, 0x3c, 0x00, 0x40, 0x04, 0x00, 0xa0, 0x0a); mipi_dsi_dcs_write_seq(dsi, 0xb0, 0x80); mipi_dsi_dcs_write_seq(dsi, 0xde, 0x40, 0x00, 0x18, 0x00, 0x18, 0x00, 0x18, 0x00, 0x18, 0x10, 0x00, 0x18, 0x00, 0x18, 0x00, 0x18, 0x02, 0x00, 0x00); mipi_dsi_dcs_write_seq(dsi, 0xb0, 0x04); mipi_dsi_dcs_write_seq(dsi, 0xe8, 0x00, 0x02); mipi_dsi_dcs_write_seq(dsi, 0xe4, 0x00, 0x08); mipi_dsi_dcs_write_seq(dsi, 0xb0, 0x00); mipi_dsi_dcs_write_seq(dsi, 0xc4, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x32); mipi_dsi_dcs_write_seq(dsi, 0xcf, 0x64, 0x0b, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x08, 0x00, 0x0b, 0x77, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x02, 0x02, 0x02, 0x02, 0x02, 0x03); mipi_dsi_dcs_write_seq(dsi, 0xd3, 0x45, 0x00, 0x00, 0x01, 0x13, 0x15, 0x00, 0x15, 0x07, 0x0f, 0x77, 0x77, 0x77, 0x37, 0xb2, 0x11, 0x00, 0xa0, 0x3c, 0x9c); mipi_dsi_dcs_write_seq(dsi, 0xd7, 0x00, 0xb9, 0x34, 0x00, 0x40, 0x04, 0x00, 0xa0, 0x0a, 0x00, 0x40, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x19, 0x34, 0x00, 0x40, 0x04, 0x00, 0xa0, 0x0a); mipi_dsi_dcs_write_seq(dsi, 0xd8, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x3a, 0x00, 0x3a, 0x00, 0x3a, 0x00, 0x3a, 0x00, 0x3a, 0x05, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0a, 0x00, 0x0a, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0a, 0x00, 0x32, 0x00, 0x0a, 0x00, 0x22); mipi_dsi_dcs_write_seq(dsi, 0xdf, 0x50, 0x42, 0x58, 0x81, 0x2d, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x6b, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x0f, 0xff, 0xd4, 0x0e, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0f, 0x53, 0xf1, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00); mipi_dsi_dcs_write_seq(dsi, 0xf7, 0x01); mipi_dsi_dcs_write_seq(dsi, 0xb0, 0x80); mipi_dsi_dcs_write_seq(dsi, 0xe4, 0x34, 0xb4, 0x00, 0x00, 0x00, 0x39, 0x04, 0x09, 0x34); mipi_dsi_dcs_write_seq(dsi, 0xe6, 0x00); mipi_dsi_dcs_write_seq(dsi, 0xb0, 0x04); mipi_dsi_dcs_write_seq(dsi, 0xdf, 0x50, 0x40); mipi_dsi_dcs_write_seq(dsi, 0xf3, 0x50, 0x00, 0x00, 0x00, 0x00); mipi_dsi_dcs_write_seq(dsi, 0xf2, 0x11); mipi_dsi_dcs_write_seq(dsi, 0xf3, 0x01, 0x00, 0x00, 0x00, 0x01); mipi_dsi_dcs_write_seq(dsi, 0xf4, 0x00, 0x02); mipi_dsi_dcs_write_seq(dsi, 0xf2, 0x19); mipi_dsi_dcs_write_seq(dsi, 0xdf, 0x50, 0x42); mipi_dsi_dcs_set_tear_on(dsi, MIPI_DSI_DCS_TEAR_MODE_VBLANK); mipi_dsi_dcs_set_column_address(dsi, 0, 1080 - 1); mipi_dsi_dcs_set_page_address(dsi, 0, 2340 - 1); dsi->mode_flags &= ~MIPI_DSI_MODE_LPM; return 0; } static int visionox_r66451_off(struct visionox_r66451 *ctx) { ctx->dsi->mode_flags &= ~MIPI_DSI_MODE_LPM; return 0; } static int visionox_r66451_prepare(struct drm_panel *panel) { struct visionox_r66451 *ctx = to_visionox_r66451(panel); struct mipi_dsi_device *dsi = ctx->dsi; struct device *dev = &dsi->dev; int ret; if (ctx->prepared) return 0; ret = regulator_bulk_enable(ARRAY_SIZE(ctx->supplies), ctx->supplies); if (ret < 0) return ret; visionox_r66451_reset(ctx); ret = visionox_r66451_on(ctx); if (ret < 0) { dev_err(dev, "Failed to initialize panel: %d\n", ret); gpiod_set_value_cansleep(ctx->reset_gpio, 1); regulator_bulk_disable(ARRAY_SIZE(ctx->supplies), ctx->supplies); return ret; } mipi_dsi_compression_mode(ctx->dsi, true); ctx->prepared = true; return 0; } static int visionox_r66451_unprepare(struct drm_panel *panel) { struct visionox_r66451 *ctx = to_visionox_r66451(panel); struct device *dev = &ctx->dsi->dev; int ret; if (!ctx->prepared) return 0; ret = visionox_r66451_off(ctx); if (ret < 0) dev_err(dev, "Failed to un-initialize panel: %d\n", ret); gpiod_set_value_cansleep(ctx->reset_gpio, 1); regulator_bulk_disable(ARRAY_SIZE(ctx->supplies), ctx->supplies); ctx->prepared = false; return 0; } static const struct drm_display_mode visionox_r66451_mode = { .clock = 345830, .hdisplay = 1080, .hsync_start = 1175, .hsync_end = 1176, .htotal = 1216, .vdisplay = 2340, .vsync_start = 2365, .vsync_end = 2366, .vtotal = 2370, .width_mm = 0, .height_mm = 0, .type = DRM_MODE_TYPE_DRIVER, }; static int visionox_r66451_enable(struct drm_panel *panel) { struct visionox_r66451 *ctx = to_visionox_r66451(panel); struct mipi_dsi_device *dsi = ctx->dsi; struct drm_dsc_picture_parameter_set pps; int ret; if (ctx->enabled) return 0; if (!dsi->dsc) { dev_err(&dsi->dev, "DSC not attached to DSI\n"); return -ENODEV; } drm_dsc_pps_payload_pack(&pps, dsi->dsc); ret = mipi_dsi_picture_parameter_set(dsi, &pps); if (ret) { dev_err(&dsi->dev, "Failed to set PPS\n"); return ret; } ret = mipi_dsi_dcs_exit_sleep_mode(dsi); if (ret < 0) { dev_err(&dsi->dev, "Failed to exit sleep mode: %d\n", ret); return ret; } msleep(120); ret = mipi_dsi_dcs_set_display_on(dsi); if (ret < 0) { dev_err(&dsi->dev, "Failed on set display on: %d\n", ret); return ret; } msleep(20); ctx->enabled = true; return 0; } static int visionox_r66451_disable(struct drm_panel *panel) { struct visionox_r66451 *ctx = to_visionox_r66451(panel); struct mipi_dsi_device *dsi = ctx->dsi; struct device *dev = &dsi->dev; int ret; ctx->enabled = false; ret = mipi_dsi_dcs_set_display_off(dsi); if (ret < 0) { dev_err(dev, "Failed to set display off: %d\n", ret); return ret; } msleep(20); ret = mipi_dsi_dcs_enter_sleep_mode(dsi); if (ret < 0) { dev_err(dev, "Failed to enter sleep mode: %d\n", ret); return ret; } msleep(120); return 0; } static int visionox_r66451_get_modes(struct drm_panel *panel, struct drm_connector *connector) { drm_connector_helper_get_modes_fixed(connector, &visionox_r66451_mode); return 1; } static const struct drm_panel_funcs visionox_r66451_funcs = { .prepare = visionox_r66451_prepare, .unprepare = visionox_r66451_unprepare, .get_modes = visionox_r66451_get_modes, .enable = visionox_r66451_enable, .disable = visionox_r66451_disable, }; static int visionox_r66451_bl_update_status(struct backlight_device *bl) { struct mipi_dsi_device *dsi = bl_get_data(bl); u16 brightness = backlight_get_brightness(bl); return mipi_dsi_dcs_set_display_brightness(dsi, brightness); } static const struct backlight_ops visionox_r66451_bl_ops = { .update_status = visionox_r66451_bl_update_status, }; static struct backlight_device * visionox_r66451_create_backlight(struct mipi_dsi_device *dsi) { struct device *dev = &dsi->dev; const struct backlight_properties props = { .type = BACKLIGHT_RAW, .brightness = 255, .max_brightness = 4095, }; return devm_backlight_device_register(dev, dev_name(dev), dev, dsi, &visionox_r66451_bl_ops, &props); } static int visionox_r66451_probe(struct mipi_dsi_device *dsi) { struct device *dev = &dsi->dev; struct visionox_r66451 *ctx; struct drm_dsc_config *dsc; int ret = 0; ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL); if (!ctx) return -ENOMEM; dsc = devm_kzalloc(dev, sizeof(*dsc), GFP_KERNEL); if (!dsc) return -ENOMEM; /* Set DSC params */ dsc->dsc_version_major = 0x1; dsc->dsc_version_minor = 0x2; dsc->slice_height = 20; dsc->slice_width = 540; dsc->slice_count = 2; dsc->bits_per_component = 8; dsc->bits_per_pixel = 8 << 4; dsc->block_pred_enable = true; dsi->dsc = dsc; ctx->supplies[0].supply = "vddio"; ctx->supplies[1].supply = "vdd"; ret = devm_regulator_bulk_get(&dsi->dev, ARRAY_SIZE(ctx->supplies), ctx->supplies); if (ret < 0) return ret; ctx->reset_gpio = devm_gpiod_get(dev, "reset", GPIOD_OUT_LOW); if (IS_ERR(ctx->reset_gpio)) return dev_err_probe(dev, PTR_ERR(ctx->reset_gpio), "Failed to get reset-gpios\n"); ctx->dsi = dsi; mipi_dsi_set_drvdata(dsi, ctx); dsi->lanes = 4; dsi->format = MIPI_DSI_FMT_RGB888; dsi->mode_flags = MIPI_DSI_MODE_LPM | MIPI_DSI_CLOCK_NON_CONTINUOUS; drm_panel_init(&ctx->panel, dev, &visionox_r66451_funcs, DRM_MODE_CONNECTOR_DSI); ctx->panel.backlight = visionox_r66451_create_backlight(dsi); if (IS_ERR(ctx->panel.backlight)) return dev_err_probe(dev, PTR_ERR(ctx->panel.backlight), "Failed to create backlight\n"); drm_panel_add(&ctx->panel); ret = mipi_dsi_attach(dsi); if (ret < 0) { dev_err(dev, "Failed to attach to DSI host: %d\n", ret); drm_panel_remove(&ctx->panel); } return ret; } static void visionox_r66451_remove(struct mipi_dsi_device *dsi) { struct visionox_r66451 *ctx = mipi_dsi_get_drvdata(dsi); int ret; ret = mipi_dsi_detach(dsi); if (ret < 0) dev_err(&dsi->dev, "Failed to detach DSI host: %d\n", ret); drm_panel_remove(&ctx->panel); } static const struct of_device_id visionox_r66451_of_match[] = { {.compatible = "visionox,r66451"}, { /*sentinel*/ } }; MODULE_DEVICE_TABLE(of, visionox_r66451_of_match); static struct mipi_dsi_driver visionox_r66451_driver = { .probe = visionox_r66451_probe, .remove = visionox_r66451_remove, .driver = { .name = "panel-visionox-r66451", .of_match_table = visionox_r66451_of_match, }, }; module_mipi_dsi_driver(visionox_r66451_driver); MODULE_AUTHOR("Jessica Zhang <[email protected]>"); MODULE_DESCRIPTION("Panel driver for the Visionox R66451 AMOLED DSI panel"); MODULE_LICENSE("GPL");
linux-master
drivers/gpu/drm/panel/panel-visionox-r66451.c
// SPDX-License-Identifier: GPL-2.0-only /* * Copyright (c) 2021 Raffaele Tranquillini <[email protected]> * * Generated using linux-mdss-dsi-panel-driver-generator from Lineage OS device tree: * https://github.com/LineageOS/android_kernel_xiaomi_msm8996/blob/lineage-18.1/arch/arm/boot/dts/qcom/a1-msm8996-mtp.dtsi */ #include <linux/delay.h> #include <linux/gpio/consumer.h> #include <linux/module.h> #include <linux/of.h> #include <video/mipi_display.h> #include <drm/drm_mipi_dsi.h> #include <drm/drm_modes.h> #include <drm/drm_panel.h> struct jdi_fhd_r63452 { struct drm_panel panel; struct mipi_dsi_device *dsi; struct gpio_desc *reset_gpio; bool prepared; }; static inline struct jdi_fhd_r63452 *to_jdi_fhd_r63452(struct drm_panel *panel) { return container_of(panel, struct jdi_fhd_r63452, panel); } static void jdi_fhd_r63452_reset(struct jdi_fhd_r63452 *ctx) { gpiod_set_value_cansleep(ctx->reset_gpio, 0); usleep_range(10000, 11000); gpiod_set_value_cansleep(ctx->reset_gpio, 1); usleep_range(1000, 2000); gpiod_set_value_cansleep(ctx->reset_gpio, 0); usleep_range(10000, 11000); } static int jdi_fhd_r63452_on(struct jdi_fhd_r63452 *ctx) { struct mipi_dsi_device *dsi = ctx->dsi; struct device *dev = &dsi->dev; int ret; dsi->mode_flags |= MIPI_DSI_MODE_LPM; mipi_dsi_generic_write_seq(dsi, 0xb0, 0x00); mipi_dsi_generic_write_seq(dsi, 0xd6, 0x01); mipi_dsi_generic_write_seq(dsi, 0xec, 0x64, 0xdc, 0xec, 0x3b, 0x52, 0x00, 0x0b, 0x0b, 0x13, 0x15, 0x68, 0x0b, 0xb5); mipi_dsi_generic_write_seq(dsi, 0xb0, 0x03); ret = mipi_dsi_dcs_set_tear_on(dsi, MIPI_DSI_DCS_TEAR_MODE_VBLANK); if (ret < 0) { dev_err(dev, "Failed to set tear on: %d\n", ret); return ret; } mipi_dsi_dcs_write_seq(dsi, MIPI_DCS_SET_ADDRESS_MODE, 0x00); ret = mipi_dsi_dcs_set_pixel_format(dsi, 0x77); if (ret < 0) { dev_err(dev, "Failed to set pixel format: %d\n", ret); return ret; } ret = mipi_dsi_dcs_set_column_address(dsi, 0x0000, 0x0437); if (ret < 0) { dev_err(dev, "Failed to set column address: %d\n", ret); return ret; } ret = mipi_dsi_dcs_set_page_address(dsi, 0x0000, 0x077f); if (ret < 0) { dev_err(dev, "Failed to set page address: %d\n", ret); return ret; } ret = mipi_dsi_dcs_set_tear_scanline(dsi, 0x0000); if (ret < 0) { dev_err(dev, "Failed to set tear scanline: %d\n", ret); return ret; } ret = mipi_dsi_dcs_set_display_brightness(dsi, 0x00ff); if (ret < 0) { dev_err(dev, "Failed to set display brightness: %d\n", ret); return ret; } mipi_dsi_dcs_write_seq(dsi, MIPI_DCS_WRITE_CONTROL_DISPLAY, 0x24); mipi_dsi_dcs_write_seq(dsi, MIPI_DCS_WRITE_POWER_SAVE, 0x00); mipi_dsi_dcs_write_seq(dsi, MIPI_DCS_SET_CABC_MIN_BRIGHTNESS, 0x00); mipi_dsi_dcs_write_seq(dsi, 0x84, 0x00); ret = mipi_dsi_dcs_set_display_on(dsi); if (ret < 0) { dev_err(dev, "Failed to set display on: %d\n", ret); return ret; } msleep(20); ret = mipi_dsi_dcs_exit_sleep_mode(dsi); if (ret < 0) { dev_err(dev, "Failed to exit sleep mode: %d\n", ret); return ret; } msleep(80); mipi_dsi_generic_write_seq(dsi, 0xb0, 0x04); mipi_dsi_dcs_write_seq(dsi, 0x84, 0x00); mipi_dsi_generic_write_seq(dsi, 0xc8, 0x11); mipi_dsi_generic_write_seq(dsi, 0xb0, 0x03); return 0; } static int jdi_fhd_r63452_off(struct jdi_fhd_r63452 *ctx) { struct mipi_dsi_device *dsi = ctx->dsi; struct device *dev = &dsi->dev; int ret; dsi->mode_flags &= ~MIPI_DSI_MODE_LPM; mipi_dsi_generic_write_seq(dsi, 0xb0, 0x00); mipi_dsi_generic_write_seq(dsi, 0xd6, 0x01); mipi_dsi_generic_write_seq(dsi, 0xec, 0x64, 0xdc, 0xec, 0x3b, 0x52, 0x00, 0x0b, 0x0b, 0x13, 0x15, 0x68, 0x0b, 0x95); mipi_dsi_generic_write_seq(dsi, 0xb0, 0x03); ret = mipi_dsi_dcs_set_display_off(dsi); if (ret < 0) { dev_err(dev, "Failed to set display off: %d\n", ret); return ret; } usleep_range(2000, 3000); ret = mipi_dsi_dcs_enter_sleep_mode(dsi); if (ret < 0) { dev_err(dev, "Failed to enter sleep mode: %d\n", ret); return ret; } msleep(120); return 0; } static int jdi_fhd_r63452_prepare(struct drm_panel *panel) { struct jdi_fhd_r63452 *ctx = to_jdi_fhd_r63452(panel); struct device *dev = &ctx->dsi->dev; int ret; if (ctx->prepared) return 0; jdi_fhd_r63452_reset(ctx); ret = jdi_fhd_r63452_on(ctx); if (ret < 0) { dev_err(dev, "Failed to initialize panel: %d\n", ret); gpiod_set_value_cansleep(ctx->reset_gpio, 1); return ret; } ctx->prepared = true; return 0; } static int jdi_fhd_r63452_unprepare(struct drm_panel *panel) { struct jdi_fhd_r63452 *ctx = to_jdi_fhd_r63452(panel); struct device *dev = &ctx->dsi->dev; int ret; if (!ctx->prepared) return 0; ret = jdi_fhd_r63452_off(ctx); if (ret < 0) dev_err(dev, "Failed to un-initialize panel: %d\n", ret); gpiod_set_value_cansleep(ctx->reset_gpio, 1); ctx->prepared = false; return 0; } static const struct drm_display_mode jdi_fhd_r63452_mode = { .clock = (1080 + 120 + 16 + 40) * (1920 + 4 + 2 + 4) * 60 / 1000, .hdisplay = 1080, .hsync_start = 1080 + 120, .hsync_end = 1080 + 120 + 16, .htotal = 1080 + 120 + 16 + 40, .vdisplay = 1920, .vsync_start = 1920 + 4, .vsync_end = 1920 + 4 + 2, .vtotal = 1920 + 4 + 2 + 4, .width_mm = 64, .height_mm = 114, }; static int jdi_fhd_r63452_get_modes(struct drm_panel *panel, struct drm_connector *connector) { struct drm_display_mode *mode; mode = drm_mode_duplicate(connector->dev, &jdi_fhd_r63452_mode); if (!mode) return -ENOMEM; drm_mode_set_name(mode); mode->type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED; connector->display_info.width_mm = mode->width_mm; connector->display_info.height_mm = mode->height_mm; drm_mode_probed_add(connector, mode); return 1; } static const struct drm_panel_funcs jdi_fhd_r63452_panel_funcs = { .prepare = jdi_fhd_r63452_prepare, .unprepare = jdi_fhd_r63452_unprepare, .get_modes = jdi_fhd_r63452_get_modes, }; static int jdi_fhd_r63452_probe(struct mipi_dsi_device *dsi) { struct device *dev = &dsi->dev; struct jdi_fhd_r63452 *ctx; int ret; ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL); if (!ctx) return -ENOMEM; ctx->reset_gpio = devm_gpiod_get(dev, "reset", GPIOD_OUT_HIGH); if (IS_ERR(ctx->reset_gpio)) return dev_err_probe(dev, PTR_ERR(ctx->reset_gpio), "Failed to get reset-gpios\n"); ctx->dsi = dsi; mipi_dsi_set_drvdata(dsi, ctx); dsi->lanes = 4; dsi->format = MIPI_DSI_FMT_RGB888; dsi->mode_flags = MIPI_DSI_MODE_VIDEO_BURST | MIPI_DSI_CLOCK_NON_CONTINUOUS; drm_panel_init(&ctx->panel, dev, &jdi_fhd_r63452_panel_funcs, DRM_MODE_CONNECTOR_DSI); ret = drm_panel_of_backlight(&ctx->panel); if (ret) return dev_err_probe(dev, ret, "Failed to get backlight\n"); drm_panel_add(&ctx->panel); ret = mipi_dsi_attach(dsi); if (ret < 0) { dev_err(dev, "Failed to attach to DSI host: %d\n", ret); return ret; } return 0; } static void jdi_fhd_r63452_remove(struct mipi_dsi_device *dsi) { struct jdi_fhd_r63452 *ctx = mipi_dsi_get_drvdata(dsi); int ret; ret = mipi_dsi_detach(dsi); if (ret < 0) dev_err(&dsi->dev, "Failed to detach from DSI host: %d\n", ret); drm_panel_remove(&ctx->panel); } static const struct of_device_id jdi_fhd_r63452_of_match[] = { { .compatible = "jdi,fhd-r63452" }, { /* sentinel */ } }; MODULE_DEVICE_TABLE(of, jdi_fhd_r63452_of_match); static struct mipi_dsi_driver jdi_fhd_r63452_driver = { .probe = jdi_fhd_r63452_probe, .remove = jdi_fhd_r63452_remove, .driver = { .name = "panel-jdi-fhd-r63452", .of_match_table = jdi_fhd_r63452_of_match, }, }; module_mipi_dsi_driver(jdi_fhd_r63452_driver); MODULE_AUTHOR("Raffaele Tranquillini <[email protected]>"); MODULE_DESCRIPTION("DRM driver for JDI FHD R63452 DSI panel, command mode"); MODULE_LICENSE("GPL v2");
linux-master
drivers/gpu/drm/panel/panel-jdi-fhd-r63452.c
// SPDX-License-Identifier: GPL-2.0 /* * Copyright (C) STMicroelectronics SA 2017 * * Authors: Philippe Cornu <[email protected]> * Yannick Fertre <[email protected]> */ #include <linux/delay.h> #include <linux/gpio/consumer.h> #include <linux/mod_devicetable.h> #include <linux/module.h> #include <linux/regulator/consumer.h> #include <video/mipi_display.h> #include <drm/drm_mipi_dsi.h> #include <drm/drm_modes.h> #include <drm/drm_panel.h> /*** Manufacturer Command Set ***/ #define MCS_CMD_MODE_SW 0xFE /* CMD Mode Switch */ #define MCS_CMD1_UCS 0x00 /* User Command Set (UCS = CMD1) */ #define MCS_CMD2_P0 0x01 /* Manufacture Command Set Page0 (CMD2 P0) */ #define MCS_CMD2_P1 0x02 /* Manufacture Command Set Page1 (CMD2 P1) */ #define MCS_CMD2_P2 0x03 /* Manufacture Command Set Page2 (CMD2 P2) */ #define MCS_CMD2_P3 0x04 /* Manufacture Command Set Page3 (CMD2 P3) */ /* CMD2 P0 commands (Display Options and Power) */ #define MCS_STBCTR 0x12 /* TE1 Output Setting Zig-Zag Connection */ #define MCS_SGOPCTR 0x16 /* Source Bias Current */ #define MCS_SDCTR 0x1A /* Source Output Delay Time */ #define MCS_INVCTR 0x1B /* Inversion Type */ #define MCS_EXT_PWR_IC 0x24 /* External PWR IC Control */ #define MCS_SETAVDD 0x27 /* PFM Control for AVDD Output */ #define MCS_SETAVEE 0x29 /* PFM Control for AVEE Output */ #define MCS_BT2CTR 0x2B /* DDVDL Charge Pump Control */ #define MCS_BT3CTR 0x2F /* VGH Charge Pump Control */ #define MCS_BT4CTR 0x34 /* VGL Charge Pump Control */ #define MCS_VCMCTR 0x46 /* VCOM Output Level Control */ #define MCS_SETVGN 0x52 /* VG M/S N Control */ #define MCS_SETVGP 0x54 /* VG M/S P Control */ #define MCS_SW_CTRL 0x5F /* Interface Control for PFM and MIPI */ /* CMD2 P2 commands (GOA Timing Control) - no description in datasheet */ #define GOA_VSTV1 0x00 #define GOA_VSTV2 0x07 #define GOA_VCLK1 0x0E #define GOA_VCLK2 0x17 #define GOA_VCLK_OPT1 0x20 #define GOA_BICLK1 0x2A #define GOA_BICLK2 0x37 #define GOA_BICLK3 0x44 #define GOA_BICLK4 0x4F #define GOA_BICLK_OPT1 0x5B #define GOA_BICLK_OPT2 0x60 #define MCS_GOA_GPO1 0x6D #define MCS_GOA_GPO2 0x71 #define MCS_GOA_EQ 0x74 #define MCS_GOA_CLK_GALLON 0x7C #define MCS_GOA_FS_SEL0 0x7E #define MCS_GOA_FS_SEL1 0x87 #define MCS_GOA_FS_SEL2 0x91 #define MCS_GOA_FS_SEL3 0x9B #define MCS_GOA_BS_SEL0 0xAC #define MCS_GOA_BS_SEL1 0xB5 #define MCS_GOA_BS_SEL2 0xBF #define MCS_GOA_BS_SEL3 0xC9 #define MCS_GOA_BS_SEL4 0xD3 /* CMD2 P3 commands (Gamma) */ #define MCS_GAMMA_VP 0x60 /* Gamma VP1~VP16 */ #define MCS_GAMMA_VN 0x70 /* Gamma VN1~VN16 */ struct rm68200 { struct device *dev; struct drm_panel panel; struct gpio_desc *reset_gpio; struct regulator *supply; bool prepared; bool enabled; }; static const struct drm_display_mode default_mode = { .clock = 54000, .hdisplay = 720, .hsync_start = 720 + 48, .hsync_end = 720 + 48 + 9, .htotal = 720 + 48 + 9 + 48, .vdisplay = 1280, .vsync_start = 1280 + 12, .vsync_end = 1280 + 12 + 5, .vtotal = 1280 + 12 + 5 + 12, .flags = 0, .width_mm = 68, .height_mm = 122, }; static inline struct rm68200 *panel_to_rm68200(struct drm_panel *panel) { return container_of(panel, struct rm68200, panel); } static void rm68200_dcs_write_buf(struct rm68200 *ctx, const void *data, size_t len) { struct mipi_dsi_device *dsi = to_mipi_dsi_device(ctx->dev); int err; err = mipi_dsi_dcs_write_buffer(dsi, data, len); if (err < 0) dev_err_ratelimited(ctx->dev, "MIPI DSI DCS write buffer failed: %d\n", err); } static void rm68200_dcs_write_cmd(struct rm68200 *ctx, u8 cmd, u8 value) { struct mipi_dsi_device *dsi = to_mipi_dsi_device(ctx->dev); int err; err = mipi_dsi_dcs_write(dsi, cmd, &value, 1); if (err < 0) dev_err_ratelimited(ctx->dev, "MIPI DSI DCS write failed: %d\n", err); } #define dcs_write_seq(ctx, seq...) \ ({ \ static const u8 d[] = { seq }; \ \ rm68200_dcs_write_buf(ctx, d, ARRAY_SIZE(d)); \ }) /* * This panel is not able to auto-increment all cmd addresses so for some of * them, we need to send them one by one... */ #define dcs_write_cmd_seq(ctx, cmd, seq...) \ ({ \ static const u8 d[] = { seq }; \ unsigned int i; \ \ for (i = 0; i < ARRAY_SIZE(d) ; i++) \ rm68200_dcs_write_cmd(ctx, cmd + i, d[i]); \ }) static void rm68200_init_sequence(struct rm68200 *ctx) { /* Enter CMD2 with page 0 */ dcs_write_seq(ctx, MCS_CMD_MODE_SW, MCS_CMD2_P0); dcs_write_cmd_seq(ctx, MCS_EXT_PWR_IC, 0xC0, 0x53, 0x00); dcs_write_seq(ctx, MCS_BT2CTR, 0xE5); dcs_write_seq(ctx, MCS_SETAVDD, 0x0A); dcs_write_seq(ctx, MCS_SETAVEE, 0x0A); dcs_write_seq(ctx, MCS_SGOPCTR, 0x52); dcs_write_seq(ctx, MCS_BT3CTR, 0x53); dcs_write_seq(ctx, MCS_BT4CTR, 0x5A); dcs_write_seq(ctx, MCS_INVCTR, 0x00); dcs_write_seq(ctx, MCS_STBCTR, 0x0A); dcs_write_seq(ctx, MCS_SDCTR, 0x06); dcs_write_seq(ctx, MCS_VCMCTR, 0x56); dcs_write_seq(ctx, MCS_SETVGN, 0xA0, 0x00); dcs_write_seq(ctx, MCS_SETVGP, 0xA0, 0x00); dcs_write_seq(ctx, MCS_SW_CTRL, 0x11); /* 2 data lanes, see doc */ dcs_write_seq(ctx, MCS_CMD_MODE_SW, MCS_CMD2_P2); dcs_write_seq(ctx, GOA_VSTV1, 0x05); dcs_write_seq(ctx, 0x02, 0x0B); dcs_write_seq(ctx, 0x03, 0x0F); dcs_write_seq(ctx, 0x04, 0x7D, 0x00, 0x50); dcs_write_cmd_seq(ctx, GOA_VSTV2, 0x05, 0x16, 0x0D, 0x11, 0x7D, 0x00, 0x50); dcs_write_cmd_seq(ctx, GOA_VCLK1, 0x07, 0x08, 0x01, 0x02, 0x00, 0x7D, 0x00, 0x85, 0x08); dcs_write_cmd_seq(ctx, GOA_VCLK2, 0x03, 0x04, 0x05, 0x06, 0x00, 0x7D, 0x00, 0x85, 0x08); dcs_write_seq(ctx, GOA_VCLK_OPT1, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00); dcs_write_cmd_seq(ctx, GOA_BICLK1, 0x07, 0x08); dcs_write_seq(ctx, 0x2D, 0x01); dcs_write_seq(ctx, 0x2F, 0x02, 0x00, 0x40, 0x05, 0x08, 0x54, 0x7D, 0x00); dcs_write_cmd_seq(ctx, GOA_BICLK2, 0x03, 0x04, 0x05, 0x06, 0x00); dcs_write_seq(ctx, 0x3D, 0x40); dcs_write_seq(ctx, 0x3F, 0x05, 0x08, 0x54, 0x7D, 0x00); dcs_write_seq(ctx, GOA_BICLK3, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00); dcs_write_seq(ctx, GOA_BICLK4, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00); dcs_write_seq(ctx, 0x58, 0x00, 0x00, 0x00); dcs_write_seq(ctx, GOA_BICLK_OPT1, 0x00, 0x00, 0x00, 0x00, 0x00); dcs_write_seq(ctx, GOA_BICLK_OPT2, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00); dcs_write_seq(ctx, MCS_GOA_GPO1, 0x00, 0x00, 0x00, 0x00); dcs_write_seq(ctx, MCS_GOA_GPO2, 0x00, 0x20, 0x00); dcs_write_seq(ctx, MCS_GOA_EQ, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x00, 0x00); dcs_write_seq(ctx, MCS_GOA_CLK_GALLON, 0x00, 0x00); dcs_write_cmd_seq(ctx, MCS_GOA_FS_SEL0, 0xBF, 0x02, 0x06, 0x14, 0x10, 0x16, 0x12, 0x08, 0x3F); dcs_write_cmd_seq(ctx, MCS_GOA_FS_SEL1, 0x3F, 0x3F, 0x3F, 0x3F, 0x0C, 0x0A, 0x0E, 0x3F, 0x3F, 0x00); dcs_write_cmd_seq(ctx, MCS_GOA_FS_SEL2, 0x04, 0x3F, 0x3F, 0x3F, 0x3F, 0x05, 0x01, 0x3F, 0x3F, 0x0F); dcs_write_cmd_seq(ctx, MCS_GOA_FS_SEL3, 0x0B, 0x0D, 0x3F, 0x3F, 0x3F, 0x3F); dcs_write_cmd_seq(ctx, 0xA2, 0x3F, 0x09, 0x13, 0x17, 0x11, 0x15); dcs_write_cmd_seq(ctx, 0xA9, 0x07, 0x03, 0x3F); dcs_write_cmd_seq(ctx, MCS_GOA_BS_SEL0, 0x3F, 0x05, 0x01, 0x17, 0x13, 0x15, 0x11, 0x0F, 0x3F); dcs_write_cmd_seq(ctx, MCS_GOA_BS_SEL1, 0x3F, 0x3F, 0x3F, 0x3F, 0x0B, 0x0D, 0x09, 0x3F, 0x3F, 0x07); dcs_write_cmd_seq(ctx, MCS_GOA_BS_SEL2, 0x03, 0x3F, 0x3F, 0x3F, 0x3F, 0x02, 0x06, 0x3F, 0x3F, 0x08); dcs_write_cmd_seq(ctx, MCS_GOA_BS_SEL3, 0x0C, 0x0A, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x0E, 0x10, 0x14); dcs_write_cmd_seq(ctx, MCS_GOA_BS_SEL4, 0x12, 0x16, 0x00, 0x04, 0x3F); dcs_write_seq(ctx, 0xDC, 0x02); dcs_write_seq(ctx, 0xDE, 0x12); dcs_write_seq(ctx, MCS_CMD_MODE_SW, 0x0E); /* No documentation */ dcs_write_seq(ctx, 0x01, 0x75); dcs_write_seq(ctx, MCS_CMD_MODE_SW, MCS_CMD2_P3); dcs_write_cmd_seq(ctx, MCS_GAMMA_VP, 0x00, 0x0C, 0x12, 0x0E, 0x06, 0x12, 0x0E, 0x0B, 0x15, 0x0B, 0x10, 0x07, 0x0F, 0x12, 0x0C, 0x00); dcs_write_cmd_seq(ctx, MCS_GAMMA_VN, 0x00, 0x0C, 0x12, 0x0E, 0x06, 0x12, 0x0E, 0x0B, 0x15, 0x0B, 0x10, 0x07, 0x0F, 0x12, 0x0C, 0x00); /* Exit CMD2 */ dcs_write_seq(ctx, MCS_CMD_MODE_SW, MCS_CMD1_UCS); } static int rm68200_disable(struct drm_panel *panel) { struct rm68200 *ctx = panel_to_rm68200(panel); if (!ctx->enabled) return 0; ctx->enabled = false; return 0; } static int rm68200_unprepare(struct drm_panel *panel) { struct rm68200 *ctx = panel_to_rm68200(panel); struct mipi_dsi_device *dsi = to_mipi_dsi_device(ctx->dev); int ret; if (!ctx->prepared) return 0; ret = mipi_dsi_dcs_set_display_off(dsi); if (ret) dev_warn(panel->dev, "failed to set display off: %d\n", ret); ret = mipi_dsi_dcs_enter_sleep_mode(dsi); if (ret) dev_warn(panel->dev, "failed to enter sleep mode: %d\n", ret); msleep(120); if (ctx->reset_gpio) { gpiod_set_value_cansleep(ctx->reset_gpio, 1); msleep(20); } regulator_disable(ctx->supply); ctx->prepared = false; return 0; } static int rm68200_prepare(struct drm_panel *panel) { struct rm68200 *ctx = panel_to_rm68200(panel); struct mipi_dsi_device *dsi = to_mipi_dsi_device(ctx->dev); int ret; if (ctx->prepared) return 0; ret = regulator_enable(ctx->supply); if (ret < 0) { dev_err(ctx->dev, "failed to enable supply: %d\n", ret); return ret; } if (ctx->reset_gpio) { gpiod_set_value_cansleep(ctx->reset_gpio, 1); msleep(20); gpiod_set_value_cansleep(ctx->reset_gpio, 0); msleep(100); } rm68200_init_sequence(ctx); ret = mipi_dsi_dcs_exit_sleep_mode(dsi); if (ret) return ret; msleep(125); ret = mipi_dsi_dcs_set_display_on(dsi); if (ret) return ret; msleep(20); ctx->prepared = true; return 0; } static int rm68200_enable(struct drm_panel *panel) { struct rm68200 *ctx = panel_to_rm68200(panel); if (ctx->enabled) return 0; ctx->enabled = true; return 0; } static int rm68200_get_modes(struct drm_panel *panel, struct drm_connector *connector) { struct drm_display_mode *mode; mode = drm_mode_duplicate(connector->dev, &default_mode); if (!mode) { dev_err(panel->dev, "failed to add mode %ux%u@%u\n", default_mode.hdisplay, default_mode.vdisplay, drm_mode_vrefresh(&default_mode)); return -ENOMEM; } drm_mode_set_name(mode); mode->type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED; drm_mode_probed_add(connector, mode); connector->display_info.width_mm = mode->width_mm; connector->display_info.height_mm = mode->height_mm; return 1; } static const struct drm_panel_funcs rm68200_drm_funcs = { .disable = rm68200_disable, .unprepare = rm68200_unprepare, .prepare = rm68200_prepare, .enable = rm68200_enable, .get_modes = rm68200_get_modes, }; static int rm68200_probe(struct mipi_dsi_device *dsi) { struct device *dev = &dsi->dev; struct rm68200 *ctx; int ret; ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL); if (!ctx) return -ENOMEM; ctx->reset_gpio = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_LOW); if (IS_ERR(ctx->reset_gpio)) { ret = PTR_ERR(ctx->reset_gpio); dev_err(dev, "cannot get reset GPIO: %d\n", ret); return ret; } ctx->supply = devm_regulator_get(dev, "power"); if (IS_ERR(ctx->supply)) { ret = PTR_ERR(ctx->supply); if (ret != -EPROBE_DEFER) dev_err(dev, "cannot get regulator: %d\n", ret); return ret; } mipi_dsi_set_drvdata(dsi, ctx); ctx->dev = dev; dsi->lanes = 2; dsi->format = MIPI_DSI_FMT_RGB888; dsi->mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_BURST | MIPI_DSI_MODE_LPM | MIPI_DSI_CLOCK_NON_CONTINUOUS; drm_panel_init(&ctx->panel, dev, &rm68200_drm_funcs, DRM_MODE_CONNECTOR_DSI); ret = drm_panel_of_backlight(&ctx->panel); if (ret) return ret; drm_panel_add(&ctx->panel); ret = mipi_dsi_attach(dsi); if (ret < 0) { dev_err(dev, "mipi_dsi_attach() failed: %d\n", ret); drm_panel_remove(&ctx->panel); return ret; } return 0; } static void rm68200_remove(struct mipi_dsi_device *dsi) { struct rm68200 *ctx = mipi_dsi_get_drvdata(dsi); mipi_dsi_detach(dsi); drm_panel_remove(&ctx->panel); } static const struct of_device_id raydium_rm68200_of_match[] = { { .compatible = "raydium,rm68200" }, { } }; MODULE_DEVICE_TABLE(of, raydium_rm68200_of_match); static struct mipi_dsi_driver raydium_rm68200_driver = { .probe = rm68200_probe, .remove = rm68200_remove, .driver = { .name = "panel-raydium-rm68200", .of_match_table = raydium_rm68200_of_match, }, }; module_mipi_dsi_driver(raydium_rm68200_driver); MODULE_AUTHOR("Philippe Cornu <[email protected]>"); MODULE_AUTHOR("Yannick Fertre <[email protected]>"); MODULE_DESCRIPTION("DRM Driver for Raydium RM68200 MIPI DSI panel"); MODULE_LICENSE("GPL v2");
linux-master
drivers/gpu/drm/panel/panel-raydium-rm68200.c
// SPDX-License-Identifier: GPL-2.0 /* * Copyright (C) STMicroelectronics SA 2017 * * Authors: Philippe Cornu <[email protected]> * Yannick Fertre <[email protected]> */ #include <linux/backlight.h> #include <linux/delay.h> #include <linux/gpio/consumer.h> #include <linux/module.h> #include <linux/regulator/consumer.h> #include <video/mipi_display.h> #include <drm/drm_mipi_dsi.h> #include <drm/drm_modes.h> #include <drm/drm_panel.h> #define OTM8009A_BACKLIGHT_DEFAULT 240 #define OTM8009A_BACKLIGHT_MAX 255 /* Manufacturer Command Set */ #define MCS_ADRSFT 0x0000 /* Address Shift Function */ #define MCS_PANSET 0xB3A6 /* Panel Type Setting */ #define MCS_SD_CTRL 0xC0A2 /* Source Driver Timing Setting */ #define MCS_P_DRV_M 0xC0B4 /* Panel Driving Mode */ #define MCS_OSC_ADJ 0xC181 /* Oscillator Adjustment for Idle/Normal mode */ #define MCS_RGB_VID_SET 0xC1A1 /* RGB Video Mode Setting */ #define MCS_SD_PCH_CTRL 0xC480 /* Source Driver Precharge Control */ #define MCS_NO_DOC1 0xC48A /* Command not documented */ #define MCS_PWR_CTRL1 0xC580 /* Power Control Setting 1 */ #define MCS_PWR_CTRL2 0xC590 /* Power Control Setting 2 for Normal Mode */ #define MCS_PWR_CTRL4 0xC5B0 /* Power Control Setting 4 for DC Voltage */ #define MCS_PANCTRLSET1 0xCB80 /* Panel Control Setting 1 */ #define MCS_PANCTRLSET2 0xCB90 /* Panel Control Setting 2 */ #define MCS_PANCTRLSET3 0xCBA0 /* Panel Control Setting 3 */ #define MCS_PANCTRLSET4 0xCBB0 /* Panel Control Setting 4 */ #define MCS_PANCTRLSET5 0xCBC0 /* Panel Control Setting 5 */ #define MCS_PANCTRLSET6 0xCBD0 /* Panel Control Setting 6 */ #define MCS_PANCTRLSET7 0xCBE0 /* Panel Control Setting 7 */ #define MCS_PANCTRLSET8 0xCBF0 /* Panel Control Setting 8 */ #define MCS_PANU2D1 0xCC80 /* Panel U2D Setting 1 */ #define MCS_PANU2D2 0xCC90 /* Panel U2D Setting 2 */ #define MCS_PANU2D3 0xCCA0 /* Panel U2D Setting 3 */ #define MCS_PAND2U1 0xCCB0 /* Panel D2U Setting 1 */ #define MCS_PAND2U2 0xCCC0 /* Panel D2U Setting 2 */ #define MCS_PAND2U3 0xCCD0 /* Panel D2U Setting 3 */ #define MCS_GOAVST 0xCE80 /* GOA VST Setting */ #define MCS_GOACLKA1 0xCEA0 /* GOA CLKA1 Setting */ #define MCS_GOACLKA3 0xCEB0 /* GOA CLKA3 Setting */ #define MCS_GOAECLK 0xCFC0 /* GOA ECLK Setting */ #define MCS_NO_DOC2 0xCFD0 /* Command not documented */ #define MCS_GVDDSET 0xD800 /* GVDD/NGVDD */ #define MCS_VCOMDC 0xD900 /* VCOM Voltage Setting */ #define MCS_GMCT2_2P 0xE100 /* Gamma Correction 2.2+ Setting */ #define MCS_GMCT2_2N 0xE200 /* Gamma Correction 2.2- Setting */ #define MCS_NO_DOC3 0xF5B6 /* Command not documented */ #define MCS_CMD2_ENA1 0xFF00 /* Enable Access Command2 "CMD2" */ #define MCS_CMD2_ENA2 0xFF80 /* Enable Access Orise Command2 */ #define OTM8009A_HDISPLAY 480 #define OTM8009A_VDISPLAY 800 struct otm8009a { struct device *dev; struct drm_panel panel; struct backlight_device *bl_dev; struct gpio_desc *reset_gpio; struct regulator *supply; bool prepared; bool enabled; }; static const struct drm_display_mode modes[] = { { /* 50 Hz, preferred */ .clock = 29700, .hdisplay = 480, .hsync_start = 480 + 98, .hsync_end = 480 + 98 + 32, .htotal = 480 + 98 + 32 + 98, .vdisplay = 800, .vsync_start = 800 + 15, .vsync_end = 800 + 15 + 10, .vtotal = 800 + 15 + 10 + 14, .flags = DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC, .width_mm = 52, .height_mm = 86, }, { /* 60 Hz */ .clock = 33000, .hdisplay = 480, .hsync_start = 480 + 70, .hsync_end = 480 + 70 + 32, .htotal = 480 + 70 + 32 + 72, .vdisplay = 800, .vsync_start = 800 + 15, .vsync_end = 800 + 15 + 10, .vtotal = 800 + 15 + 10 + 16, .flags = DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC, .width_mm = 52, .height_mm = 86, }, }; static inline struct otm8009a *panel_to_otm8009a(struct drm_panel *panel) { return container_of(panel, struct otm8009a, panel); } static void otm8009a_dcs_write_buf(struct otm8009a *ctx, const void *data, size_t len) { struct mipi_dsi_device *dsi = to_mipi_dsi_device(ctx->dev); if (mipi_dsi_dcs_write_buffer(dsi, data, len) < 0) dev_warn(ctx->dev, "mipi dsi dcs write buffer failed\n"); } #define dcs_write_seq(ctx, seq...) \ ({ \ static const u8 d[] = { seq }; \ otm8009a_dcs_write_buf(ctx, d, ARRAY_SIZE(d)); \ }) #define dcs_write_cmd_at(ctx, cmd, seq...) \ ({ \ dcs_write_seq(ctx, MCS_ADRSFT, (cmd) & 0xFF); \ dcs_write_seq(ctx, (cmd) >> 8, seq); \ }) static int otm8009a_init_sequence(struct otm8009a *ctx) { struct mipi_dsi_device *dsi = to_mipi_dsi_device(ctx->dev); int ret; /* Enter CMD2 */ dcs_write_cmd_at(ctx, MCS_CMD2_ENA1, 0x80, 0x09, 0x01); /* Enter Orise Command2 */ dcs_write_cmd_at(ctx, MCS_CMD2_ENA2, 0x80, 0x09); dcs_write_cmd_at(ctx, MCS_SD_PCH_CTRL, 0x30); mdelay(10); dcs_write_cmd_at(ctx, MCS_NO_DOC1, 0x40); mdelay(10); dcs_write_cmd_at(ctx, MCS_PWR_CTRL4 + 1, 0xA9); dcs_write_cmd_at(ctx, MCS_PWR_CTRL2 + 1, 0x34); dcs_write_cmd_at(ctx, MCS_P_DRV_M, 0x50); dcs_write_cmd_at(ctx, MCS_VCOMDC, 0x4E); dcs_write_cmd_at(ctx, MCS_OSC_ADJ, 0x66); /* 65Hz */ dcs_write_cmd_at(ctx, MCS_PWR_CTRL2 + 2, 0x01); dcs_write_cmd_at(ctx, MCS_PWR_CTRL2 + 5, 0x34); dcs_write_cmd_at(ctx, MCS_PWR_CTRL2 + 4, 0x33); dcs_write_cmd_at(ctx, MCS_GVDDSET, 0x79, 0x79); dcs_write_cmd_at(ctx, MCS_SD_CTRL + 1, 0x1B); dcs_write_cmd_at(ctx, MCS_PWR_CTRL1 + 2, 0x83); dcs_write_cmd_at(ctx, MCS_SD_PCH_CTRL + 1, 0x83); dcs_write_cmd_at(ctx, MCS_RGB_VID_SET, 0x0E); dcs_write_cmd_at(ctx, MCS_PANSET, 0x00, 0x01); dcs_write_cmd_at(ctx, MCS_GOAVST, 0x85, 0x01, 0x00, 0x84, 0x01, 0x00); dcs_write_cmd_at(ctx, MCS_GOACLKA1, 0x18, 0x04, 0x03, 0x39, 0x00, 0x00, 0x00, 0x18, 0x03, 0x03, 0x3A, 0x00, 0x00, 0x00); dcs_write_cmd_at(ctx, MCS_GOACLKA3, 0x18, 0x02, 0x03, 0x3B, 0x00, 0x00, 0x00, 0x18, 0x01, 0x03, 0x3C, 0x00, 0x00, 0x00); dcs_write_cmd_at(ctx, MCS_GOAECLK, 0x01, 0x01, 0x20, 0x20, 0x00, 0x00, 0x01, 0x02, 0x00, 0x00); dcs_write_cmd_at(ctx, MCS_NO_DOC2, 0x00); dcs_write_cmd_at(ctx, MCS_PANCTRLSET1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0); dcs_write_cmd_at(ctx, MCS_PANCTRLSET2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0); dcs_write_cmd_at(ctx, MCS_PANCTRLSET3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0); dcs_write_cmd_at(ctx, MCS_PANCTRLSET4, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0); dcs_write_cmd_at(ctx, MCS_PANCTRLSET5, 0, 4, 4, 4, 4, 4, 0, 0, 0, 0, 0, 0, 0, 0, 0); dcs_write_cmd_at(ctx, MCS_PANCTRLSET6, 0, 0, 0, 0, 0, 0, 4, 4, 4, 4, 4, 0, 0, 0, 0); dcs_write_cmd_at(ctx, MCS_PANCTRLSET7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0); dcs_write_cmd_at(ctx, MCS_PANCTRLSET8, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF); dcs_write_cmd_at(ctx, MCS_PANU2D1, 0x00, 0x26, 0x09, 0x0B, 0x01, 0x25, 0x00, 0x00, 0x00, 0x00); dcs_write_cmd_at(ctx, MCS_PANU2D2, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x26, 0x0A, 0x0C, 0x02); dcs_write_cmd_at(ctx, MCS_PANU2D3, 0x25, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00); dcs_write_cmd_at(ctx, MCS_PAND2U1, 0x00, 0x25, 0x0C, 0x0A, 0x02, 0x26, 0x00, 0x00, 0x00, 0x00); dcs_write_cmd_at(ctx, MCS_PAND2U2, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x25, 0x0B, 0x09, 0x01); dcs_write_cmd_at(ctx, MCS_PAND2U3, 0x26, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00); dcs_write_cmd_at(ctx, MCS_PWR_CTRL1 + 1, 0x66); dcs_write_cmd_at(ctx, MCS_NO_DOC3, 0x06); dcs_write_cmd_at(ctx, MCS_GMCT2_2P, 0x00, 0x09, 0x0F, 0x0E, 0x07, 0x10, 0x0B, 0x0A, 0x04, 0x07, 0x0B, 0x08, 0x0F, 0x10, 0x0A, 0x01); dcs_write_cmd_at(ctx, MCS_GMCT2_2N, 0x00, 0x09, 0x0F, 0x0E, 0x07, 0x10, 0x0B, 0x0A, 0x04, 0x07, 0x0B, 0x08, 0x0F, 0x10, 0x0A, 0x01); /* Exit CMD2 */ dcs_write_cmd_at(ctx, MCS_CMD2_ENA1, 0xFF, 0xFF, 0xFF); ret = mipi_dsi_dcs_nop(dsi); if (ret) return ret; ret = mipi_dsi_dcs_exit_sleep_mode(dsi); if (ret) return ret; /* Wait for sleep out exit */ mdelay(120); /* Default portrait 480x800 rgb24 */ dcs_write_seq(ctx, MIPI_DCS_SET_ADDRESS_MODE, 0x00); ret = mipi_dsi_dcs_set_column_address(dsi, 0, OTM8009A_HDISPLAY - 1); if (ret) return ret; ret = mipi_dsi_dcs_set_page_address(dsi, 0, OTM8009A_VDISPLAY - 1); if (ret) return ret; /* See otm8009a driver documentation for pixel format descriptions */ ret = mipi_dsi_dcs_set_pixel_format(dsi, MIPI_DCS_PIXEL_FMT_24BIT | MIPI_DCS_PIXEL_FMT_24BIT << 4); if (ret) return ret; /* Disable CABC feature */ dcs_write_seq(ctx, MIPI_DCS_WRITE_POWER_SAVE, 0x00); ret = mipi_dsi_dcs_set_display_on(dsi); if (ret) return ret; ret = mipi_dsi_dcs_nop(dsi); if (ret) return ret; /* Send Command GRAM memory write (no parameters) */ dcs_write_seq(ctx, MIPI_DCS_WRITE_MEMORY_START); /* Wait a short while to let the panel be ready before the 1st frame */ mdelay(10); return 0; } static int otm8009a_disable(struct drm_panel *panel) { struct otm8009a *ctx = panel_to_otm8009a(panel); struct mipi_dsi_device *dsi = to_mipi_dsi_device(ctx->dev); int ret; if (!ctx->enabled) return 0; /* This is not an issue so we return 0 here */ backlight_disable(ctx->bl_dev); ret = mipi_dsi_dcs_set_display_off(dsi); if (ret) return ret; ret = mipi_dsi_dcs_enter_sleep_mode(dsi); if (ret) return ret; msleep(120); ctx->enabled = false; return 0; } static int otm8009a_unprepare(struct drm_panel *panel) { struct otm8009a *ctx = panel_to_otm8009a(panel); if (!ctx->prepared) return 0; if (ctx->reset_gpio) { gpiod_set_value_cansleep(ctx->reset_gpio, 1); msleep(20); } regulator_disable(ctx->supply); ctx->prepared = false; return 0; } static int otm8009a_prepare(struct drm_panel *panel) { struct otm8009a *ctx = panel_to_otm8009a(panel); int ret; if (ctx->prepared) return 0; ret = regulator_enable(ctx->supply); if (ret < 0) { dev_err(panel->dev, "failed to enable supply: %d\n", ret); return ret; } if (ctx->reset_gpio) { gpiod_set_value_cansleep(ctx->reset_gpio, 0); gpiod_set_value_cansleep(ctx->reset_gpio, 1); msleep(20); gpiod_set_value_cansleep(ctx->reset_gpio, 0); msleep(100); } ret = otm8009a_init_sequence(ctx); if (ret) return ret; ctx->prepared = true; return 0; } static int otm8009a_enable(struct drm_panel *panel) { struct otm8009a *ctx = panel_to_otm8009a(panel); if (ctx->enabled) return 0; backlight_enable(ctx->bl_dev); ctx->enabled = true; return 0; } static int otm8009a_get_modes(struct drm_panel *panel, struct drm_connector *connector) { struct drm_display_mode *mode; unsigned int num_modes = ARRAY_SIZE(modes); unsigned int i; for (i = 0; i < num_modes; i++) { mode = drm_mode_duplicate(connector->dev, &modes[i]); if (!mode) { dev_err(panel->dev, "failed to add mode %ux%u@%u\n", modes[i].hdisplay, modes[i].vdisplay, drm_mode_vrefresh(&modes[i])); return -ENOMEM; } mode->type = DRM_MODE_TYPE_DRIVER; /* Setting first mode as preferred */ if (!i) mode->type |= DRM_MODE_TYPE_PREFERRED; drm_mode_set_name(mode); drm_mode_probed_add(connector, mode); } connector->display_info.width_mm = mode->width_mm; connector->display_info.height_mm = mode->height_mm; return num_modes; } static const struct drm_panel_funcs otm8009a_drm_funcs = { .disable = otm8009a_disable, .unprepare = otm8009a_unprepare, .prepare = otm8009a_prepare, .enable = otm8009a_enable, .get_modes = otm8009a_get_modes, }; /* * DSI-BASED BACKLIGHT */ static int otm8009a_backlight_update_status(struct backlight_device *bd) { struct otm8009a *ctx = bl_get_data(bd); u8 data[2]; if (!ctx->prepared) { dev_dbg(&bd->dev, "lcd not ready yet for setting its backlight!\n"); return -ENXIO; } if (bd->props.power <= FB_BLANK_NORMAL) { /* Power on the backlight with the requested brightness * Note We can not use mipi_dsi_dcs_set_display_brightness() * as otm8009a driver support only 8-bit brightness (1 param). */ data[0] = MIPI_DCS_SET_DISPLAY_BRIGHTNESS; data[1] = bd->props.brightness; otm8009a_dcs_write_buf(ctx, data, ARRAY_SIZE(data)); /* set Brightness Control & Backlight on */ data[1] = 0x24; } else { /* Power off the backlight: set Brightness Control & Bl off */ data[1] = 0; } /* Update Brightness Control & Backlight */ data[0] = MIPI_DCS_WRITE_CONTROL_DISPLAY; otm8009a_dcs_write_buf(ctx, data, ARRAY_SIZE(data)); return 0; } static const struct backlight_ops otm8009a_backlight_ops = { .update_status = otm8009a_backlight_update_status, }; static int otm8009a_probe(struct mipi_dsi_device *dsi) { struct device *dev = &dsi->dev; struct otm8009a *ctx; int ret; ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL); if (!ctx) return -ENOMEM; ctx->reset_gpio = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_LOW); if (IS_ERR(ctx->reset_gpio)) { dev_err(dev, "cannot get reset-gpio\n"); return PTR_ERR(ctx->reset_gpio); } ctx->supply = devm_regulator_get(dev, "power"); if (IS_ERR(ctx->supply)) { ret = PTR_ERR(ctx->supply); if (ret != -EPROBE_DEFER) dev_err(dev, "failed to request regulator: %d\n", ret); return ret; } mipi_dsi_set_drvdata(dsi, ctx); ctx->dev = dev; dsi->lanes = 2; dsi->format = MIPI_DSI_FMT_RGB888; dsi->mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_BURST | MIPI_DSI_MODE_LPM | MIPI_DSI_CLOCK_NON_CONTINUOUS; drm_panel_init(&ctx->panel, dev, &otm8009a_drm_funcs, DRM_MODE_CONNECTOR_DSI); ctx->bl_dev = devm_backlight_device_register(dev, dev_name(dev), dev, ctx, &otm8009a_backlight_ops, NULL); if (IS_ERR(ctx->bl_dev)) { ret = PTR_ERR(ctx->bl_dev); dev_err(dev, "failed to register backlight: %d\n", ret); return ret; } ctx->bl_dev->props.max_brightness = OTM8009A_BACKLIGHT_MAX; ctx->bl_dev->props.brightness = OTM8009A_BACKLIGHT_DEFAULT; ctx->bl_dev->props.power = FB_BLANK_POWERDOWN; ctx->bl_dev->props.type = BACKLIGHT_RAW; drm_panel_add(&ctx->panel); ret = mipi_dsi_attach(dsi); if (ret < 0) { dev_err(dev, "mipi_dsi_attach failed. Is host ready?\n"); drm_panel_remove(&ctx->panel); return ret; } return 0; } static void otm8009a_remove(struct mipi_dsi_device *dsi) { struct otm8009a *ctx = mipi_dsi_get_drvdata(dsi); mipi_dsi_detach(dsi); drm_panel_remove(&ctx->panel); } static const struct of_device_id orisetech_otm8009a_of_match[] = { { .compatible = "orisetech,otm8009a" }, { } }; MODULE_DEVICE_TABLE(of, orisetech_otm8009a_of_match); static struct mipi_dsi_driver orisetech_otm8009a_driver = { .probe = otm8009a_probe, .remove = otm8009a_remove, .driver = { .name = "panel-orisetech-otm8009a", .of_match_table = orisetech_otm8009a_of_match, }, }; module_mipi_dsi_driver(orisetech_otm8009a_driver); MODULE_AUTHOR("Philippe Cornu <[email protected]>"); MODULE_AUTHOR("Yannick Fertre <[email protected]>"); MODULE_DESCRIPTION("DRM driver for Orise Tech OTM8009A MIPI DSI panel"); MODULE_LICENSE("GPL v2");
linux-master
drivers/gpu/drm/panel/panel-orisetech-otm8009a.c
// SPDX-License-Identifier: GPL-2.0 /* * NV3051D MIPI-DSI panel driver for Anbernic RG353x * Copyright (C) 2022 Chris Morgan * * based on * * Elida kd35t133 3.5" MIPI-DSI panel driver * Copyright (C) Theobroma Systems 2020 */ #include <linux/delay.h> #include <linux/gpio/consumer.h> #include <linux/media-bus-format.h> #include <linux/module.h> #include <linux/of.h> #include <linux/regulator/consumer.h> #include <video/display_timing.h> #include <video/mipi_display.h> #include <drm/drm_mipi_dsi.h> #include <drm/drm_modes.h> #include <drm/drm_panel.h> struct nv3051d_panel_info { const struct drm_display_mode *display_modes; unsigned int num_modes; u16 width_mm, height_mm; u32 bus_flags; }; struct panel_nv3051d { struct device *dev; struct drm_panel panel; struct gpio_desc *reset_gpio; const struct nv3051d_panel_info *panel_info; struct regulator *vdd; }; static inline struct panel_nv3051d *panel_to_panelnv3051d(struct drm_panel *panel) { return container_of(panel, struct panel_nv3051d, panel); } static int panel_nv3051d_init_sequence(struct panel_nv3051d *ctx) { struct mipi_dsi_device *dsi = to_mipi_dsi_device(ctx->dev); /* * Init sequence was supplied by device vendor with no * documentation. */ mipi_dsi_dcs_write_seq(dsi, 0xFF, 0x30); mipi_dsi_dcs_write_seq(dsi, 0xFF, 0x52); mipi_dsi_dcs_write_seq(dsi, 0xFF, 0x01); mipi_dsi_dcs_write_seq(dsi, 0xE3, 0x00); mipi_dsi_dcs_write_seq(dsi, 0x03, 0x40); mipi_dsi_dcs_write_seq(dsi, 0x04, 0x00); mipi_dsi_dcs_write_seq(dsi, 0x05, 0x03); mipi_dsi_dcs_write_seq(dsi, 0x24, 0x12); mipi_dsi_dcs_write_seq(dsi, 0x25, 0x1E); mipi_dsi_dcs_write_seq(dsi, 0x26, 0x28); mipi_dsi_dcs_write_seq(dsi, 0x27, 0x52); mipi_dsi_dcs_write_seq(dsi, 0x28, 0x57); mipi_dsi_dcs_write_seq(dsi, 0x29, 0x01); mipi_dsi_dcs_write_seq(dsi, 0x2A, 0xDF); mipi_dsi_dcs_write_seq(dsi, 0x38, 0x9C); mipi_dsi_dcs_write_seq(dsi, 0x39, 0xA7); mipi_dsi_dcs_write_seq(dsi, 0x3A, 0x53); mipi_dsi_dcs_write_seq(dsi, 0x44, 0x00); mipi_dsi_dcs_write_seq(dsi, 0x49, 0x3C); mipi_dsi_dcs_write_seq(dsi, 0x59, 0xFE); mipi_dsi_dcs_write_seq(dsi, 0x5C, 0x00); mipi_dsi_dcs_write_seq(dsi, 0x91, 0x77); mipi_dsi_dcs_write_seq(dsi, 0x92, 0x77); mipi_dsi_dcs_write_seq(dsi, 0xA0, 0x55); mipi_dsi_dcs_write_seq(dsi, 0xA1, 0x50); mipi_dsi_dcs_write_seq(dsi, 0xA4, 0x9C); mipi_dsi_dcs_write_seq(dsi, 0xA7, 0x02); mipi_dsi_dcs_write_seq(dsi, 0xA8, 0x01); mipi_dsi_dcs_write_seq(dsi, 0xA9, 0x01); mipi_dsi_dcs_write_seq(dsi, 0xAA, 0xFC); mipi_dsi_dcs_write_seq(dsi, 0xAB, 0x28); mipi_dsi_dcs_write_seq(dsi, 0xAC, 0x06); mipi_dsi_dcs_write_seq(dsi, 0xAD, 0x06); mipi_dsi_dcs_write_seq(dsi, 0xAE, 0x06); mipi_dsi_dcs_write_seq(dsi, 0xAF, 0x03); mipi_dsi_dcs_write_seq(dsi, 0xB0, 0x08); mipi_dsi_dcs_write_seq(dsi, 0xB1, 0x26); mipi_dsi_dcs_write_seq(dsi, 0xB2, 0x28); mipi_dsi_dcs_write_seq(dsi, 0xB3, 0x28); mipi_dsi_dcs_write_seq(dsi, 0xB4, 0x33); mipi_dsi_dcs_write_seq(dsi, 0xB5, 0x08); mipi_dsi_dcs_write_seq(dsi, 0xB6, 0x26); mipi_dsi_dcs_write_seq(dsi, 0xB7, 0x08); mipi_dsi_dcs_write_seq(dsi, 0xB8, 0x26); mipi_dsi_dcs_write_seq(dsi, 0xFF, 0x30); mipi_dsi_dcs_write_seq(dsi, 0xFF, 0x52); mipi_dsi_dcs_write_seq(dsi, 0xFF, 0x02); mipi_dsi_dcs_write_seq(dsi, 0xB1, 0x0E); mipi_dsi_dcs_write_seq(dsi, 0xD1, 0x0E); mipi_dsi_dcs_write_seq(dsi, 0xB4, 0x29); mipi_dsi_dcs_write_seq(dsi, 0xD4, 0x2B); mipi_dsi_dcs_write_seq(dsi, 0xB2, 0x0C); mipi_dsi_dcs_write_seq(dsi, 0xD2, 0x0A); mipi_dsi_dcs_write_seq(dsi, 0xB3, 0x28); mipi_dsi_dcs_write_seq(dsi, 0xD3, 0x28); mipi_dsi_dcs_write_seq(dsi, 0xB6, 0x11); mipi_dsi_dcs_write_seq(dsi, 0xD6, 0x0D); mipi_dsi_dcs_write_seq(dsi, 0xB7, 0x32); mipi_dsi_dcs_write_seq(dsi, 0xD7, 0x30); mipi_dsi_dcs_write_seq(dsi, 0xC1, 0x04); mipi_dsi_dcs_write_seq(dsi, 0xE1, 0x06); mipi_dsi_dcs_write_seq(dsi, 0xB8, 0x0A); mipi_dsi_dcs_write_seq(dsi, 0xD8, 0x0A); mipi_dsi_dcs_write_seq(dsi, 0xB9, 0x01); mipi_dsi_dcs_write_seq(dsi, 0xD9, 0x01); mipi_dsi_dcs_write_seq(dsi, 0xBD, 0x13); mipi_dsi_dcs_write_seq(dsi, 0xDD, 0x13); mipi_dsi_dcs_write_seq(dsi, 0xBC, 0x11); mipi_dsi_dcs_write_seq(dsi, 0xDC, 0x11); mipi_dsi_dcs_write_seq(dsi, 0xBB, 0x0F); mipi_dsi_dcs_write_seq(dsi, 0xDB, 0x0F); mipi_dsi_dcs_write_seq(dsi, 0xBA, 0x0F); mipi_dsi_dcs_write_seq(dsi, 0xDA, 0x0F); mipi_dsi_dcs_write_seq(dsi, 0xBE, 0x18); mipi_dsi_dcs_write_seq(dsi, 0xDE, 0x18); mipi_dsi_dcs_write_seq(dsi, 0xBF, 0x0F); mipi_dsi_dcs_write_seq(dsi, 0xDF, 0x0F); mipi_dsi_dcs_write_seq(dsi, 0xC0, 0x17); mipi_dsi_dcs_write_seq(dsi, 0xE0, 0x17); mipi_dsi_dcs_write_seq(dsi, 0xB5, 0x3B); mipi_dsi_dcs_write_seq(dsi, 0xD5, 0x3C); mipi_dsi_dcs_write_seq(dsi, 0xB0, 0x0B); mipi_dsi_dcs_write_seq(dsi, 0xD0, 0x0C); mipi_dsi_dcs_write_seq(dsi, 0xFF, 0x30); mipi_dsi_dcs_write_seq(dsi, 0xFF, 0x52); mipi_dsi_dcs_write_seq(dsi, 0xFF, 0x03); mipi_dsi_dcs_write_seq(dsi, 0x00, 0x2A); mipi_dsi_dcs_write_seq(dsi, 0x01, 0x2A); mipi_dsi_dcs_write_seq(dsi, 0x02, 0x2A); mipi_dsi_dcs_write_seq(dsi, 0x03, 0x2A); mipi_dsi_dcs_write_seq(dsi, 0x04, 0x61); mipi_dsi_dcs_write_seq(dsi, 0x05, 0x80); mipi_dsi_dcs_write_seq(dsi, 0x06, 0xC7); mipi_dsi_dcs_write_seq(dsi, 0x07, 0x01); mipi_dsi_dcs_write_seq(dsi, 0x08, 0x82); mipi_dsi_dcs_write_seq(dsi, 0x09, 0x83); mipi_dsi_dcs_write_seq(dsi, 0x30, 0x2A); mipi_dsi_dcs_write_seq(dsi, 0x31, 0x2A); mipi_dsi_dcs_write_seq(dsi, 0x32, 0x2A); mipi_dsi_dcs_write_seq(dsi, 0x33, 0x2A); mipi_dsi_dcs_write_seq(dsi, 0x34, 0x61); mipi_dsi_dcs_write_seq(dsi, 0x35, 0xC5); mipi_dsi_dcs_write_seq(dsi, 0x36, 0x80); mipi_dsi_dcs_write_seq(dsi, 0x37, 0x23); mipi_dsi_dcs_write_seq(dsi, 0x40, 0x82); mipi_dsi_dcs_write_seq(dsi, 0x41, 0x83); mipi_dsi_dcs_write_seq(dsi, 0x42, 0x80); mipi_dsi_dcs_write_seq(dsi, 0x43, 0x81); mipi_dsi_dcs_write_seq(dsi, 0x44, 0x11); mipi_dsi_dcs_write_seq(dsi, 0x45, 0xF2); mipi_dsi_dcs_write_seq(dsi, 0x46, 0xF1); mipi_dsi_dcs_write_seq(dsi, 0x47, 0x11); mipi_dsi_dcs_write_seq(dsi, 0x48, 0xF4); mipi_dsi_dcs_write_seq(dsi, 0x49, 0xF3); mipi_dsi_dcs_write_seq(dsi, 0x50, 0x02); mipi_dsi_dcs_write_seq(dsi, 0x51, 0x01); mipi_dsi_dcs_write_seq(dsi, 0x52, 0x04); mipi_dsi_dcs_write_seq(dsi, 0x53, 0x03); mipi_dsi_dcs_write_seq(dsi, 0x54, 0x11); mipi_dsi_dcs_write_seq(dsi, 0x55, 0xF6); mipi_dsi_dcs_write_seq(dsi, 0x56, 0xF5); mipi_dsi_dcs_write_seq(dsi, 0x57, 0x11); mipi_dsi_dcs_write_seq(dsi, 0x58, 0xF8); mipi_dsi_dcs_write_seq(dsi, 0x59, 0xF7); mipi_dsi_dcs_write_seq(dsi, 0x7E, 0x02); mipi_dsi_dcs_write_seq(dsi, 0x7F, 0x80); mipi_dsi_dcs_write_seq(dsi, 0xE0, 0x5A); mipi_dsi_dcs_write_seq(dsi, 0xB1, 0x00); mipi_dsi_dcs_write_seq(dsi, 0xB4, 0x0E); mipi_dsi_dcs_write_seq(dsi, 0xB5, 0x0F); mipi_dsi_dcs_write_seq(dsi, 0xB6, 0x04); mipi_dsi_dcs_write_seq(dsi, 0xB7, 0x07); mipi_dsi_dcs_write_seq(dsi, 0xB8, 0x06); mipi_dsi_dcs_write_seq(dsi, 0xB9, 0x05); mipi_dsi_dcs_write_seq(dsi, 0xBA, 0x0F); mipi_dsi_dcs_write_seq(dsi, 0xC7, 0x00); mipi_dsi_dcs_write_seq(dsi, 0xCA, 0x0E); mipi_dsi_dcs_write_seq(dsi, 0xCB, 0x0F); mipi_dsi_dcs_write_seq(dsi, 0xCC, 0x04); mipi_dsi_dcs_write_seq(dsi, 0xCD, 0x07); mipi_dsi_dcs_write_seq(dsi, 0xCE, 0x06); mipi_dsi_dcs_write_seq(dsi, 0xCF, 0x05); mipi_dsi_dcs_write_seq(dsi, 0xD0, 0x0F); mipi_dsi_dcs_write_seq(dsi, 0x81, 0x0F); mipi_dsi_dcs_write_seq(dsi, 0x84, 0x0E); mipi_dsi_dcs_write_seq(dsi, 0x85, 0x0F); mipi_dsi_dcs_write_seq(dsi, 0x86, 0x07); mipi_dsi_dcs_write_seq(dsi, 0x87, 0x04); mipi_dsi_dcs_write_seq(dsi, 0x88, 0x05); mipi_dsi_dcs_write_seq(dsi, 0x89, 0x06); mipi_dsi_dcs_write_seq(dsi, 0x8A, 0x00); mipi_dsi_dcs_write_seq(dsi, 0x97, 0x0F); mipi_dsi_dcs_write_seq(dsi, 0x9A, 0x0E); mipi_dsi_dcs_write_seq(dsi, 0x9B, 0x0F); mipi_dsi_dcs_write_seq(dsi, 0x9C, 0x07); mipi_dsi_dcs_write_seq(dsi, 0x9D, 0x04); mipi_dsi_dcs_write_seq(dsi, 0x9E, 0x05); mipi_dsi_dcs_write_seq(dsi, 0x9F, 0x06); mipi_dsi_dcs_write_seq(dsi, 0xA0, 0x00); mipi_dsi_dcs_write_seq(dsi, 0xFF, 0x30); mipi_dsi_dcs_write_seq(dsi, 0xFF, 0x52); mipi_dsi_dcs_write_seq(dsi, 0xFF, 0x02); mipi_dsi_dcs_write_seq(dsi, 0x01, 0x01); mipi_dsi_dcs_write_seq(dsi, 0x02, 0xDA); mipi_dsi_dcs_write_seq(dsi, 0x03, 0xBA); mipi_dsi_dcs_write_seq(dsi, 0x04, 0xA8); mipi_dsi_dcs_write_seq(dsi, 0x05, 0x9A); mipi_dsi_dcs_write_seq(dsi, 0x06, 0x70); mipi_dsi_dcs_write_seq(dsi, 0x07, 0xFF); mipi_dsi_dcs_write_seq(dsi, 0x08, 0x91); mipi_dsi_dcs_write_seq(dsi, 0x09, 0x90); mipi_dsi_dcs_write_seq(dsi, 0x0A, 0xFF); mipi_dsi_dcs_write_seq(dsi, 0x0B, 0x8F); mipi_dsi_dcs_write_seq(dsi, 0x0C, 0x60); mipi_dsi_dcs_write_seq(dsi, 0x0D, 0x58); mipi_dsi_dcs_write_seq(dsi, 0x0E, 0x48); mipi_dsi_dcs_write_seq(dsi, 0x0F, 0x38); mipi_dsi_dcs_write_seq(dsi, 0x10, 0x2B); mipi_dsi_dcs_write_seq(dsi, 0xFF, 0x30); mipi_dsi_dcs_write_seq(dsi, 0xFF, 0x52); mipi_dsi_dcs_write_seq(dsi, 0xFF, 0x00); mipi_dsi_dcs_write_seq(dsi, 0x36, 0x02); mipi_dsi_dcs_write_seq(dsi, 0x3A, 0x70); dev_dbg(ctx->dev, "Panel init sequence done\n"); return 0; } static int panel_nv3051d_unprepare(struct drm_panel *panel) { struct panel_nv3051d *ctx = panel_to_panelnv3051d(panel); struct mipi_dsi_device *dsi = to_mipi_dsi_device(ctx->dev); int ret; ret = mipi_dsi_dcs_set_display_off(dsi); if (ret < 0) dev_err(ctx->dev, "failed to set display off: %d\n", ret); msleep(20); ret = mipi_dsi_dcs_enter_sleep_mode(dsi); if (ret < 0) { dev_err(ctx->dev, "failed to enter sleep mode: %d\n", ret); return ret; } usleep_range(10000, 15000); regulator_disable(ctx->vdd); return 0; } static int panel_nv3051d_prepare(struct drm_panel *panel) { struct panel_nv3051d *ctx = panel_to_panelnv3051d(panel); struct mipi_dsi_device *dsi = to_mipi_dsi_device(ctx->dev); int ret; dev_dbg(ctx->dev, "Resetting the panel\n"); ret = regulator_enable(ctx->vdd); if (ret < 0) { dev_err(ctx->dev, "Failed to enable vdd supply: %d\n", ret); return ret; } usleep_range(2000, 3000); gpiod_set_value_cansleep(ctx->reset_gpio, 1); msleep(150); gpiod_set_value_cansleep(ctx->reset_gpio, 0); msleep(20); ret = panel_nv3051d_init_sequence(ctx); if (ret < 0) { dev_err(ctx->dev, "Panel init sequence failed: %d\n", ret); goto disable_vdd; } ret = mipi_dsi_dcs_exit_sleep_mode(dsi); if (ret < 0) { dev_err(ctx->dev, "Failed to exit sleep mode: %d\n", ret); goto disable_vdd; } msleep(200); ret = mipi_dsi_dcs_set_display_on(dsi); if (ret < 0) { dev_err(ctx->dev, "Failed to set display on: %d\n", ret); goto disable_vdd; } usleep_range(10000, 15000); return 0; disable_vdd: regulator_disable(ctx->vdd); return ret; } static int panel_nv3051d_get_modes(struct drm_panel *panel, struct drm_connector *connector) { struct panel_nv3051d *ctx = panel_to_panelnv3051d(panel); const struct nv3051d_panel_info *panel_info = ctx->panel_info; struct drm_display_mode *mode; unsigned int i; for (i = 0; i < panel_info->num_modes; i++) { mode = drm_mode_duplicate(connector->dev, &panel_info->display_modes[i]); if (!mode) return -ENOMEM; drm_mode_set_name(mode); mode->type = DRM_MODE_TYPE_DRIVER; if (panel_info->num_modes == 1) mode->type |= DRM_MODE_TYPE_PREFERRED; drm_mode_probed_add(connector, mode); } connector->display_info.bpc = 8; connector->display_info.width_mm = panel_info->width_mm; connector->display_info.height_mm = panel_info->height_mm; connector->display_info.bus_flags = panel_info->bus_flags; return panel_info->num_modes; } static const struct drm_panel_funcs panel_nv3051d_funcs = { .unprepare = panel_nv3051d_unprepare, .prepare = panel_nv3051d_prepare, .get_modes = panel_nv3051d_get_modes, }; static int panel_nv3051d_probe(struct mipi_dsi_device *dsi) { struct device *dev = &dsi->dev; struct panel_nv3051d *ctx; int ret; ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL); if (!ctx) return -ENOMEM; ctx->dev = dev; ctx->panel_info = of_device_get_match_data(dev); if (!ctx->panel_info) return -EINVAL; ctx->reset_gpio = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_HIGH); if (IS_ERR(ctx->reset_gpio)) { dev_err(dev, "cannot get reset gpio\n"); return PTR_ERR(ctx->reset_gpio); } ctx->vdd = devm_regulator_get(dev, "vdd"); if (IS_ERR(ctx->vdd)) { ret = PTR_ERR(ctx->vdd); if (ret != -EPROBE_DEFER) dev_err(dev, "Failed to request vdd regulator: %d\n", ret); return ret; } mipi_dsi_set_drvdata(dsi, ctx); dsi->lanes = 4; dsi->format = MIPI_DSI_FMT_RGB888; dsi->mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_BURST | MIPI_DSI_MODE_LPM | MIPI_DSI_MODE_NO_EOT_PACKET; drm_panel_init(&ctx->panel, &dsi->dev, &panel_nv3051d_funcs, DRM_MODE_CONNECTOR_DSI); ret = drm_panel_of_backlight(&ctx->panel); if (ret) return ret; drm_panel_add(&ctx->panel); ret = mipi_dsi_attach(dsi); if (ret < 0) { dev_err(dev, "mipi_dsi_attach failed: %d\n", ret); drm_panel_remove(&ctx->panel); return ret; } return 0; } static void panel_nv3051d_shutdown(struct mipi_dsi_device *dsi) { struct panel_nv3051d *ctx = mipi_dsi_get_drvdata(dsi); int ret; ret = drm_panel_unprepare(&ctx->panel); if (ret < 0) dev_err(&dsi->dev, "Failed to unprepare panel: %d\n", ret); ret = drm_panel_disable(&ctx->panel); if (ret < 0) dev_err(&dsi->dev, "Failed to disable panel: %d\n", ret); } static void panel_nv3051d_remove(struct mipi_dsi_device *dsi) { struct panel_nv3051d *ctx = mipi_dsi_get_drvdata(dsi); int ret; panel_nv3051d_shutdown(dsi); ret = mipi_dsi_detach(dsi); if (ret < 0) dev_err(&dsi->dev, "Failed to detach from DSI host: %d\n", ret); drm_panel_remove(&ctx->panel); } static const struct drm_display_mode nv3051d_rgxx3_modes[] = { { /* 120hz */ .hdisplay = 640, .hsync_start = 640 + 40, .hsync_end = 640 + 40 + 2, .htotal = 640 + 40 + 2 + 80, .vdisplay = 480, .vsync_start = 480 + 18, .vsync_end = 480 + 18 + 2, .vtotal = 480 + 18 + 2 + 28, .clock = 48300, .flags = DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC, }, { /* 100hz */ .hdisplay = 640, .hsync_start = 640 + 40, .hsync_end = 640 + 40 + 2, .htotal = 640 + 40 + 2 + 80, .vdisplay = 480, .vsync_start = 480 + 18, .vsync_end = 480 + 18 + 2, .vtotal = 480 + 18 + 2 + 28, .clock = 40250, .flags = DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC, }, { /* 60hz */ .hdisplay = 640, .hsync_start = 640 + 40, .hsync_end = 640 + 40 + 2, .htotal = 640 + 40 + 2 + 80, .vdisplay = 480, .vsync_start = 480 + 18, .vsync_end = 480 + 18 + 2, .vtotal = 480 + 18 + 2 + 28, .clock = 24150, .flags = DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC, }, }; static const struct nv3051d_panel_info nv3051d_rgxx3_info = { .display_modes = nv3051d_rgxx3_modes, .num_modes = ARRAY_SIZE(nv3051d_rgxx3_modes), .width_mm = 70, .height_mm = 57, .bus_flags = DRM_BUS_FLAG_DE_LOW | DRM_BUS_FLAG_PIXDATA_DRIVE_NEGEDGE, }; static const struct of_device_id newvision_nv3051d_of_match[] = { { .compatible = "newvision,nv3051d", .data = &nv3051d_rgxx3_info }, { /* sentinel */ } }; MODULE_DEVICE_TABLE(of, newvision_nv3051d_of_match); static struct mipi_dsi_driver newvision_nv3051d_driver = { .driver = { .name = "panel-newvision-nv3051d", .of_match_table = newvision_nv3051d_of_match, }, .probe = panel_nv3051d_probe, .remove = panel_nv3051d_remove, .shutdown = panel_nv3051d_shutdown, }; module_mipi_dsi_driver(newvision_nv3051d_driver); MODULE_AUTHOR("Chris Morgan <[email protected]>"); MODULE_DESCRIPTION("DRM driver for Newvision NV3051D based MIPI DSI panels"); MODULE_LICENSE("GPL");
linux-master
drivers/gpu/drm/panel/panel-newvision-nv3051d.c
// SPDX-License-Identifier: GPL-2.0 /* * Copyright (c) 2019, Huaqin Telecom Technology Co., Ltd * * Author: Jerry Han <[email protected]> * */ #include <linux/delay.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/of.h> #include <linux/gpio/consumer.h> #include <linux/regulator/consumer.h> #include <drm/drm_device.h> #include <drm/drm_mipi_dsi.h> #include <drm/drm_modes.h> #include <drm/drm_panel.h> #include <video/mipi_display.h> struct panel_cmd { char cmd; char data; }; struct panel_desc { const struct drm_display_mode *display_mode; unsigned int bpc; unsigned int width_mm; unsigned int height_mm; unsigned long mode_flags; enum mipi_dsi_pixel_format format; unsigned int lanes; const struct panel_cmd *on_cmds; unsigned int on_cmds_num; }; struct panel_info { struct drm_panel base; struct mipi_dsi_device *link; const struct panel_desc *desc; struct gpio_desc *enable_gpio; struct gpio_desc *pp33_gpio; struct gpio_desc *pp18_gpio; bool prepared; bool enabled; }; static inline struct panel_info *to_panel_info(struct drm_panel *panel) { return container_of(panel, struct panel_info, base); } static void disable_gpios(struct panel_info *pinfo) { gpiod_set_value(pinfo->enable_gpio, 0); gpiod_set_value(pinfo->pp33_gpio, 0); gpiod_set_value(pinfo->pp18_gpio, 0); } static int send_mipi_cmds(struct drm_panel *panel, const struct panel_cmd *cmds) { struct panel_info *pinfo = to_panel_info(panel); unsigned int i = 0; int err; for (i = 0; i < pinfo->desc->on_cmds_num; i++) { err = mipi_dsi_dcs_write_buffer(pinfo->link, &cmds[i], sizeof(struct panel_cmd)); if (err < 0) return err; } return 0; } static int boe_panel_disable(struct drm_panel *panel) { struct panel_info *pinfo = to_panel_info(panel); int err; if (!pinfo->enabled) return 0; err = mipi_dsi_dcs_set_display_off(pinfo->link); if (err < 0) { dev_err(panel->dev, "failed to set display off: %d\n", err); return err; } pinfo->enabled = false; return 0; } static int boe_panel_unprepare(struct drm_panel *panel) { struct panel_info *pinfo = to_panel_info(panel); int err; if (!pinfo->prepared) return 0; err = mipi_dsi_dcs_set_display_off(pinfo->link); if (err < 0) dev_err(panel->dev, "failed to set display off: %d\n", err); err = mipi_dsi_dcs_enter_sleep_mode(pinfo->link); if (err < 0) dev_err(panel->dev, "failed to enter sleep mode: %d\n", err); /* sleep_mode_delay: 1ms - 2ms */ usleep_range(1000, 2000); disable_gpios(pinfo); pinfo->prepared = false; return 0; } static int boe_panel_prepare(struct drm_panel *panel) { struct panel_info *pinfo = to_panel_info(panel); int err; if (pinfo->prepared) return 0; gpiod_set_value(pinfo->pp18_gpio, 1); /* T1: 5ms - 6ms */ usleep_range(5000, 6000); gpiod_set_value(pinfo->pp33_gpio, 1); /* reset sequence */ /* T2: 14ms - 15ms */ usleep_range(14000, 15000); gpiod_set_value(pinfo->enable_gpio, 1); /* T3: 1ms - 2ms */ usleep_range(1000, 2000); gpiod_set_value(pinfo->enable_gpio, 0); /* T4: 1ms - 2ms */ usleep_range(1000, 2000); gpiod_set_value(pinfo->enable_gpio, 1); /* T5: 5ms - 6ms */ usleep_range(5000, 6000); /* send init code */ err = send_mipi_cmds(panel, pinfo->desc->on_cmds); if (err < 0) { dev_err(panel->dev, "failed to send DCS Init Code: %d\n", err); goto poweroff; } err = mipi_dsi_dcs_exit_sleep_mode(pinfo->link); if (err < 0) { dev_err(panel->dev, "failed to exit sleep mode: %d\n", err); goto poweroff; } /* T6: 120ms - 121ms */ usleep_range(120000, 121000); err = mipi_dsi_dcs_set_display_on(pinfo->link); if (err < 0) { dev_err(panel->dev, "failed to set display on: %d\n", err); goto poweroff; } /* T7: 20ms - 21ms */ usleep_range(20000, 21000); pinfo->prepared = true; return 0; poweroff: disable_gpios(pinfo); return err; } static int boe_panel_enable(struct drm_panel *panel) { struct panel_info *pinfo = to_panel_info(panel); int ret; if (pinfo->enabled) return 0; usleep_range(120000, 121000); ret = mipi_dsi_dcs_set_display_on(pinfo->link); if (ret < 0) { dev_err(panel->dev, "failed to set display on: %d\n", ret); return ret; } pinfo->enabled = true; return 0; } static int boe_panel_get_modes(struct drm_panel *panel, struct drm_connector *connector) { struct panel_info *pinfo = to_panel_info(panel); const struct drm_display_mode *m = pinfo->desc->display_mode; struct drm_display_mode *mode; mode = drm_mode_duplicate(connector->dev, m); if (!mode) { dev_err(pinfo->base.dev, "failed to add mode %ux%u@%u\n", m->hdisplay, m->vdisplay, drm_mode_vrefresh(m)); return -ENOMEM; } drm_mode_set_name(mode); drm_mode_probed_add(connector, mode); connector->display_info.width_mm = pinfo->desc->width_mm; connector->display_info.height_mm = pinfo->desc->height_mm; connector->display_info.bpc = pinfo->desc->bpc; return 1; } static const struct drm_panel_funcs panel_funcs = { .disable = boe_panel_disable, .unprepare = boe_panel_unprepare, .prepare = boe_panel_prepare, .enable = boe_panel_enable, .get_modes = boe_panel_get_modes, }; static const struct drm_display_mode default_display_mode = { .clock = 159420, .hdisplay = 1200, .hsync_start = 1200 + 80, .hsync_end = 1200 + 80 + 60, .htotal = 1200 + 80 + 60 + 24, .vdisplay = 1920, .vsync_start = 1920 + 10, .vsync_end = 1920 + 10 + 14, .vtotal = 1920 + 10 + 14 + 4, }; /* 8 inch */ static const struct panel_cmd boe_himax8279d8p_on_cmds[] = { { 0xB0, 0x05 }, { 0xB1, 0xE5 }, { 0xB3, 0x52 }, { 0xC0, 0x00 }, { 0xC2, 0x57 }, { 0xD9, 0x85 }, { 0xB0, 0x01 }, { 0xC8, 0x00 }, { 0xC9, 0x00 }, { 0xCC, 0x26 }, { 0xCD, 0x26 }, { 0xDC, 0x00 }, { 0xDD, 0x00 }, { 0xE0, 0x26 }, { 0xE1, 0x26 }, { 0xB0, 0x03 }, { 0xC3, 0x2A }, { 0xE7, 0x2A }, { 0xC5, 0x2A }, { 0xDE, 0x2A }, { 0xBC, 0x02 }, { 0xCB, 0x02 }, { 0xB0, 0x00 }, { 0xB6, 0x03 }, { 0xBA, 0x8B }, { 0xBF, 0x15 }, { 0xC0, 0x18 }, { 0xC2, 0x14 }, { 0xC3, 0x02 }, { 0xC4, 0x14 }, { 0xC5, 0x02 }, { 0xCC, 0x0A }, { 0xB0, 0x06 }, { 0xC0, 0xA5 }, { 0xD5, 0x20 }, { 0xC0, 0x00 }, { 0xB0, 0x02 }, { 0xC0, 0x00 }, { 0xC1, 0x02 }, { 0xC2, 0x06 }, { 0xC3, 0x16 }, { 0xC4, 0x0E }, { 0xC5, 0x18 }, { 0xC6, 0x26 }, { 0xC7, 0x32 }, { 0xC8, 0x3F }, { 0xC9, 0x3F }, { 0xCA, 0x3F }, { 0xCB, 0x3F }, { 0xCC, 0x3D }, { 0xCD, 0x2F }, { 0xCE, 0x2F }, { 0xCF, 0x2F }, { 0xD0, 0x07 }, { 0xD2, 0x00 }, { 0xD3, 0x02 }, { 0xD4, 0x06 }, { 0xD5, 0x12 }, { 0xD6, 0x0A }, { 0xD7, 0x14 }, { 0xD8, 0x22 }, { 0xD9, 0x2E }, { 0xDA, 0x3D }, { 0xDB, 0x3F }, { 0xDC, 0x3F }, { 0xDD, 0x3F }, { 0xDE, 0x3D }, { 0xDF, 0x2F }, { 0xE0, 0x2F }, { 0xE1, 0x2F }, { 0xE2, 0x07 }, { 0xB0, 0x07 }, { 0xB1, 0x18 }, { 0xB2, 0x19 }, { 0xB3, 0x2E }, { 0xB4, 0x52 }, { 0xB5, 0x72 }, { 0xB6, 0x8C }, { 0xB7, 0xBD }, { 0xB8, 0xEB }, { 0xB9, 0x47 }, { 0xBA, 0x96 }, { 0xBB, 0x1E }, { 0xBC, 0x90 }, { 0xBD, 0x93 }, { 0xBE, 0xFA }, { 0xBF, 0x56 }, { 0xC0, 0x8C }, { 0xC1, 0xB7 }, { 0xC2, 0xCC }, { 0xC3, 0xDF }, { 0xC4, 0xE8 }, { 0xC5, 0xF0 }, { 0xC6, 0xF8 }, { 0xC7, 0xFA }, { 0xC8, 0xFC }, { 0xC9, 0x00 }, { 0xCA, 0x00 }, { 0xCB, 0x5A }, { 0xCC, 0xAF }, { 0xCD, 0xFF }, { 0xCE, 0xFF }, { 0xB0, 0x08 }, { 0xB1, 0x04 }, { 0xB2, 0x15 }, { 0xB3, 0x2D }, { 0xB4, 0x51 }, { 0xB5, 0x72 }, { 0xB6, 0x8D }, { 0xB7, 0xBE }, { 0xB8, 0xED }, { 0xB9, 0x4A }, { 0xBA, 0x9A }, { 0xBB, 0x23 }, { 0xBC, 0x95 }, { 0xBD, 0x98 }, { 0xBE, 0xFF }, { 0xBF, 0x59 }, { 0xC0, 0x8E }, { 0xC1, 0xB9 }, { 0xC2, 0xCD }, { 0xC3, 0xDF }, { 0xC4, 0xE8 }, { 0xC5, 0xF0 }, { 0xC6, 0xF8 }, { 0xC7, 0xFA }, { 0xC8, 0xFC }, { 0xC9, 0x00 }, { 0xCA, 0x00 }, { 0xCB, 0x5A }, { 0xCC, 0xAF }, { 0xCD, 0xFF }, { 0xCE, 0xFF }, { 0xB0, 0x09 }, { 0xB1, 0x04 }, { 0xB2, 0x2C }, { 0xB3, 0x36 }, { 0xB4, 0x53 }, { 0xB5, 0x73 }, { 0xB6, 0x8E }, { 0xB7, 0xC0 }, { 0xB8, 0xEF }, { 0xB9, 0x4C }, { 0xBA, 0x9D }, { 0xBB, 0x25 }, { 0xBC, 0x96 }, { 0xBD, 0x9A }, { 0xBE, 0x01 }, { 0xBF, 0x59 }, { 0xC0, 0x8E }, { 0xC1, 0xB9 }, { 0xC2, 0xCD }, { 0xC3, 0xDF }, { 0xC4, 0xE8 }, { 0xC5, 0xF0 }, { 0xC6, 0xF8 }, { 0xC7, 0xFA }, { 0xC8, 0xFC }, { 0xC9, 0x00 }, { 0xCA, 0x00 }, { 0xCB, 0x5A }, { 0xCC, 0xBF }, { 0xCD, 0xFF }, { 0xCE, 0xFF }, { 0xB0, 0x0A }, { 0xB1, 0x18 }, { 0xB2, 0x19 }, { 0xB3, 0x2E }, { 0xB4, 0x52 }, { 0xB5, 0x72 }, { 0xB6, 0x8C }, { 0xB7, 0xBD }, { 0xB8, 0xEB }, { 0xB9, 0x47 }, { 0xBA, 0x96 }, { 0xBB, 0x1E }, { 0xBC, 0x90 }, { 0xBD, 0x93 }, { 0xBE, 0xFA }, { 0xBF, 0x56 }, { 0xC0, 0x8C }, { 0xC1, 0xB7 }, { 0xC2, 0xCC }, { 0xC3, 0xDF }, { 0xC4, 0xE8 }, { 0xC5, 0xF0 }, { 0xC6, 0xF8 }, { 0xC7, 0xFA }, { 0xC8, 0xFC }, { 0xC9, 0x00 }, { 0xCA, 0x00 }, { 0xCB, 0x5A }, { 0xCC, 0xAF }, { 0xCD, 0xFF }, { 0xCE, 0xFF }, { 0xB0, 0x0B }, { 0xB1, 0x04 }, { 0xB2, 0x15 }, { 0xB3, 0x2D }, { 0xB4, 0x51 }, { 0xB5, 0x72 }, { 0xB6, 0x8D }, { 0xB7, 0xBE }, { 0xB8, 0xED }, { 0xB9, 0x4A }, { 0xBA, 0x9A }, { 0xBB, 0x23 }, { 0xBC, 0x95 }, { 0xBD, 0x98 }, { 0xBE, 0xFF }, { 0xBF, 0x59 }, { 0xC0, 0x8E }, { 0xC1, 0xB9 }, { 0xC2, 0xCD }, { 0xC3, 0xDF }, { 0xC4, 0xE8 }, { 0xC5, 0xF0 }, { 0xC6, 0xF8 }, { 0xC7, 0xFA }, { 0xC8, 0xFC }, { 0xC9, 0x00 }, { 0xCA, 0x00 }, { 0xCB, 0x5A }, { 0xCC, 0xAF }, { 0xCD, 0xFF }, { 0xCE, 0xFF }, { 0xB0, 0x0C }, { 0xB1, 0x04 }, { 0xB2, 0x2C }, { 0xB3, 0x36 }, { 0xB4, 0x53 }, { 0xB5, 0x73 }, { 0xB6, 0x8E }, { 0xB7, 0xC0 }, { 0xB8, 0xEF }, { 0xB9, 0x4C }, { 0xBA, 0x9D }, { 0xBB, 0x25 }, { 0xBC, 0x96 }, { 0xBD, 0x9A }, { 0xBE, 0x01 }, { 0xBF, 0x59 }, { 0xC0, 0x8E }, { 0xC1, 0xB9 }, { 0xC2, 0xCD }, { 0xC3, 0xDF }, { 0xC4, 0xE8 }, { 0xC5, 0xF0 }, { 0xC6, 0xF8 }, { 0xC7, 0xFA }, { 0xC8, 0xFC }, { 0xC9, 0x00 }, { 0xCA, 0x00 }, { 0xCB, 0x5A }, { 0xCC, 0xBF }, { 0xCD, 0xFF }, { 0xCE, 0xFF }, { 0xB0, 0x04 }, { 0xB5, 0x02 }, { 0xB6, 0x01 }, }; static const struct panel_desc boe_himax8279d8p_panel_desc = { .display_mode = &default_display_mode, .bpc = 8, .width_mm = 107, .height_mm = 172, .mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_SYNC_PULSE | MIPI_DSI_CLOCK_NON_CONTINUOUS | MIPI_DSI_MODE_LPM, .format = MIPI_DSI_FMT_RGB888, .lanes = 4, .on_cmds = boe_himax8279d8p_on_cmds, .on_cmds_num = 260, }; /* 10 inch */ static const struct panel_cmd boe_himax8279d10p_on_cmds[] = { { 0xB0, 0x05 }, { 0xB1, 0xE5 }, { 0xB3, 0x52 }, { 0xB0, 0x00 }, { 0xB6, 0x03 }, { 0xBA, 0x8B }, { 0xBF, 0x1A }, { 0xC0, 0x0F }, { 0xC2, 0x0C }, { 0xC3, 0x02 }, { 0xC4, 0x0C }, { 0xC5, 0x02 }, { 0xB0, 0x01 }, { 0xE0, 0x26 }, { 0xE1, 0x26 }, { 0xDC, 0x00 }, { 0xDD, 0x00 }, { 0xCC, 0x26 }, { 0xCD, 0x26 }, { 0xC8, 0x00 }, { 0xC9, 0x00 }, { 0xD2, 0x03 }, { 0xD3, 0x03 }, { 0xE6, 0x04 }, { 0xE7, 0x04 }, { 0xC4, 0x09 }, { 0xC5, 0x09 }, { 0xD8, 0x0A }, { 0xD9, 0x0A }, { 0xC2, 0x0B }, { 0xC3, 0x0B }, { 0xD6, 0x0C }, { 0xD7, 0x0C }, { 0xC0, 0x05 }, { 0xC1, 0x05 }, { 0xD4, 0x06 }, { 0xD5, 0x06 }, { 0xCA, 0x07 }, { 0xCB, 0x07 }, { 0xDE, 0x08 }, { 0xDF, 0x08 }, { 0xB0, 0x02 }, { 0xC0, 0x00 }, { 0xC1, 0x0D }, { 0xC2, 0x17 }, { 0xC3, 0x26 }, { 0xC4, 0x31 }, { 0xC5, 0x1C }, { 0xC6, 0x2C }, { 0xC7, 0x33 }, { 0xC8, 0x31 }, { 0xC9, 0x37 }, { 0xCA, 0x37 }, { 0xCB, 0x37 }, { 0xCC, 0x39 }, { 0xCD, 0x2E }, { 0xCE, 0x2F }, { 0xCF, 0x2F }, { 0xD0, 0x07 }, { 0xD2, 0x00 }, { 0xD3, 0x0D }, { 0xD4, 0x17 }, { 0xD5, 0x26 }, { 0xD6, 0x31 }, { 0xD7, 0x3F }, { 0xD8, 0x3F }, { 0xD9, 0x3F }, { 0xDA, 0x3F }, { 0xDB, 0x37 }, { 0xDC, 0x37 }, { 0xDD, 0x37 }, { 0xDE, 0x39 }, { 0xDF, 0x2E }, { 0xE0, 0x2F }, { 0xE1, 0x2F }, { 0xE2, 0x07 }, { 0xB0, 0x03 }, { 0xC8, 0x0B }, { 0xC9, 0x07 }, { 0xC3, 0x00 }, { 0xE7, 0x00 }, { 0xC5, 0x2A }, { 0xDE, 0x2A }, { 0xCA, 0x43 }, { 0xC9, 0x07 }, { 0xE4, 0xC0 }, { 0xE5, 0x0D }, { 0xCB, 0x01 }, { 0xBC, 0x01 }, { 0xB0, 0x06 }, { 0xB8, 0xA5 }, { 0xC0, 0xA5 }, { 0xC7, 0x0F }, { 0xD5, 0x32 }, { 0xB8, 0x00 }, { 0xC0, 0x00 }, { 0xBC, 0x00 }, { 0xB0, 0x07 }, { 0xB1, 0x00 }, { 0xB2, 0x05 }, { 0xB3, 0x10 }, { 0xB4, 0x22 }, { 0xB5, 0x36 }, { 0xB6, 0x4A }, { 0xB7, 0x6C }, { 0xB8, 0x9A }, { 0xB9, 0xD7 }, { 0xBA, 0x17 }, { 0xBB, 0x92 }, { 0xBC, 0x15 }, { 0xBD, 0x18 }, { 0xBE, 0x8C }, { 0xBF, 0x00 }, { 0xC0, 0x3A }, { 0xC1, 0x72 }, { 0xC2, 0x8C }, { 0xC3, 0xA5 }, { 0xC4, 0xB1 }, { 0xC5, 0xBE }, { 0xC6, 0xCA }, { 0xC7, 0xD1 }, { 0xC8, 0xD4 }, { 0xC9, 0x00 }, { 0xCA, 0x00 }, { 0xCB, 0x16 }, { 0xCC, 0xAF }, { 0xCD, 0xFF }, { 0xCE, 0xFF }, { 0xB0, 0x08 }, { 0xB1, 0x04 }, { 0xB2, 0x05 }, { 0xB3, 0x11 }, { 0xB4, 0x24 }, { 0xB5, 0x39 }, { 0xB6, 0x4E }, { 0xB7, 0x72 }, { 0xB8, 0xA3 }, { 0xB9, 0xE1 }, { 0xBA, 0x25 }, { 0xBB, 0xA8 }, { 0xBC, 0x2E }, { 0xBD, 0x32 }, { 0xBE, 0xAD }, { 0xBF, 0x28 }, { 0xC0, 0x63 }, { 0xC1, 0x9B }, { 0xC2, 0xB5 }, { 0xC3, 0xCF }, { 0xC4, 0xDB }, { 0xC5, 0xE8 }, { 0xC6, 0xF5 }, { 0xC7, 0xFA }, { 0xC8, 0xFC }, { 0xC9, 0x00 }, { 0xCA, 0x00 }, { 0xCB, 0x16 }, { 0xCC, 0xAF }, { 0xCD, 0xFF }, { 0xCE, 0xFF }, { 0xB0, 0x09 }, { 0xB1, 0x04 }, { 0xB2, 0x04 }, { 0xB3, 0x0F }, { 0xB4, 0x22 }, { 0xB5, 0x37 }, { 0xB6, 0x4D }, { 0xB7, 0x71 }, { 0xB8, 0xA2 }, { 0xB9, 0xE1 }, { 0xBA, 0x26 }, { 0xBB, 0xA9 }, { 0xBC, 0x2F }, { 0xBD, 0x33 }, { 0xBE, 0xAC }, { 0xBF, 0x24 }, { 0xC0, 0x5D }, { 0xC1, 0x94 }, { 0xC2, 0xAC }, { 0xC3, 0xC5 }, { 0xC4, 0xD1 }, { 0xC5, 0xDC }, { 0xC6, 0xE8 }, { 0xC7, 0xED }, { 0xC8, 0xF0 }, { 0xC9, 0x00 }, { 0xCA, 0x00 }, { 0xCB, 0x16 }, { 0xCC, 0xAF }, { 0xCD, 0xFF }, { 0xCE, 0xFF }, { 0xB0, 0x0A }, { 0xB1, 0x00 }, { 0xB2, 0x05 }, { 0xB3, 0x10 }, { 0xB4, 0x22 }, { 0xB5, 0x36 }, { 0xB6, 0x4A }, { 0xB7, 0x6C }, { 0xB8, 0x9A }, { 0xB9, 0xD7 }, { 0xBA, 0x17 }, { 0xBB, 0x92 }, { 0xBC, 0x15 }, { 0xBD, 0x18 }, { 0xBE, 0x8C }, { 0xBF, 0x00 }, { 0xC0, 0x3A }, { 0xC1, 0x72 }, { 0xC2, 0x8C }, { 0xC3, 0xA5 }, { 0xC4, 0xB1 }, { 0xC5, 0xBE }, { 0xC6, 0xCA }, { 0xC7, 0xD1 }, { 0xC8, 0xD4 }, { 0xC9, 0x00 }, { 0xCA, 0x00 }, { 0xCB, 0x16 }, { 0xCC, 0xAF }, { 0xCD, 0xFF }, { 0xCE, 0xFF }, { 0xB0, 0x0B }, { 0xB1, 0x04 }, { 0xB2, 0x05 }, { 0xB3, 0x11 }, { 0xB4, 0x24 }, { 0xB5, 0x39 }, { 0xB6, 0x4E }, { 0xB7, 0x72 }, { 0xB8, 0xA3 }, { 0xB9, 0xE1 }, { 0xBA, 0x25 }, { 0xBB, 0xA8 }, { 0xBC, 0x2E }, { 0xBD, 0x32 }, { 0xBE, 0xAD }, { 0xBF, 0x28 }, { 0xC0, 0x63 }, { 0xC1, 0x9B }, { 0xC2, 0xB5 }, { 0xC3, 0xCF }, { 0xC4, 0xDB }, { 0xC5, 0xE8 }, { 0xC6, 0xF5 }, { 0xC7, 0xFA }, { 0xC8, 0xFC }, { 0xC9, 0x00 }, { 0xCA, 0x00 }, { 0xCB, 0x16 }, { 0xCC, 0xAF }, { 0xCD, 0xFF }, { 0xCE, 0xFF }, { 0xB0, 0x0C }, { 0xB1, 0x04 }, { 0xB2, 0x04 }, { 0xB3, 0x0F }, { 0xB4, 0x22 }, { 0xB5, 0x37 }, { 0xB6, 0x4D }, { 0xB7, 0x71 }, { 0xB8, 0xA2 }, { 0xB9, 0xE1 }, { 0xBA, 0x26 }, { 0xBB, 0xA9 }, { 0xBC, 0x2F }, { 0xBD, 0x33 }, { 0xBE, 0xAC }, { 0xBF, 0x24 }, { 0xC0, 0x5D }, { 0xC1, 0x94 }, { 0xC2, 0xAC }, { 0xC3, 0xC5 }, { 0xC4, 0xD1 }, { 0xC5, 0xDC }, { 0xC6, 0xE8 }, { 0xC7, 0xED }, { 0xC8, 0xF0 }, { 0xC9, 0x00 }, { 0xCA, 0x00 }, { 0xCB, 0x16 }, { 0xCC, 0xAF }, { 0xCD, 0xFF }, { 0xCE, 0xFF }, }; static const struct panel_desc boe_himax8279d10p_panel_desc = { .display_mode = &default_display_mode, .bpc = 8, .width_mm = 135, .height_mm = 216, .mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_SYNC_PULSE | MIPI_DSI_CLOCK_NON_CONTINUOUS | MIPI_DSI_MODE_LPM, .format = MIPI_DSI_FMT_RGB888, .lanes = 4, .on_cmds = boe_himax8279d10p_on_cmds, .on_cmds_num = 283, }; static const struct of_device_id panel_of_match[] = { { .compatible = "boe,himax8279d8p", .data = &boe_himax8279d8p_panel_desc, }, { .compatible = "boe,himax8279d10p", .data = &boe_himax8279d10p_panel_desc, }, { /* sentinel */ } }; MODULE_DEVICE_TABLE(of, panel_of_match); static int panel_add(struct panel_info *pinfo) { struct device *dev = &pinfo->link->dev; int ret; pinfo->pp18_gpio = devm_gpiod_get(dev, "pp18", GPIOD_OUT_HIGH); if (IS_ERR(pinfo->pp18_gpio)) { ret = PTR_ERR(pinfo->pp18_gpio); if (ret != -EPROBE_DEFER) dev_err(dev, "failed to get pp18 gpio: %d\n", ret); return ret; } pinfo->pp33_gpio = devm_gpiod_get(dev, "pp33", GPIOD_OUT_HIGH); if (IS_ERR(pinfo->pp33_gpio)) { ret = PTR_ERR(pinfo->pp33_gpio); if (ret != -EPROBE_DEFER) dev_err(dev, "failed to get pp33 gpio: %d\n", ret); return ret; } pinfo->enable_gpio = devm_gpiod_get(dev, "enable", GPIOD_OUT_HIGH); if (IS_ERR(pinfo->enable_gpio)) { ret = PTR_ERR(pinfo->enable_gpio); if (ret != -EPROBE_DEFER) dev_err(dev, "failed to get enable gpio: %d\n", ret); return ret; } drm_panel_init(&pinfo->base, dev, &panel_funcs, DRM_MODE_CONNECTOR_DSI); ret = drm_panel_of_backlight(&pinfo->base); if (ret) return ret; drm_panel_add(&pinfo->base); return 0; } static int panel_probe(struct mipi_dsi_device *dsi) { struct panel_info *pinfo; const struct panel_desc *desc; int err; pinfo = devm_kzalloc(&dsi->dev, sizeof(*pinfo), GFP_KERNEL); if (!pinfo) return -ENOMEM; desc = of_device_get_match_data(&dsi->dev); dsi->mode_flags = desc->mode_flags; dsi->format = desc->format; dsi->lanes = desc->lanes; pinfo->desc = desc; pinfo->link = dsi; mipi_dsi_set_drvdata(dsi, pinfo); err = panel_add(pinfo); if (err < 0) return err; err = mipi_dsi_attach(dsi); if (err < 0) drm_panel_remove(&pinfo->base); return err; } static void panel_remove(struct mipi_dsi_device *dsi) { struct panel_info *pinfo = mipi_dsi_get_drvdata(dsi); int err; err = boe_panel_disable(&pinfo->base); if (err < 0) dev_err(&dsi->dev, "failed to disable panel: %d\n", err); err = boe_panel_unprepare(&pinfo->base); if (err < 0) dev_err(&dsi->dev, "failed to unprepare panel: %d\n", err); err = mipi_dsi_detach(dsi); if (err < 0) dev_err(&dsi->dev, "failed to detach from DSI host: %d\n", err); drm_panel_remove(&pinfo->base); } static void panel_shutdown(struct mipi_dsi_device *dsi) { struct panel_info *pinfo = mipi_dsi_get_drvdata(dsi); boe_panel_disable(&pinfo->base); boe_panel_unprepare(&pinfo->base); } static struct mipi_dsi_driver panel_driver = { .driver = { .name = "panel-boe-himax8279d", .of_match_table = panel_of_match, }, .probe = panel_probe, .remove = panel_remove, .shutdown = panel_shutdown, }; module_mipi_dsi_driver(panel_driver); MODULE_AUTHOR("Jerry Han <[email protected]>"); MODULE_DESCRIPTION("Boe Himax8279d driver"); MODULE_LICENSE("GPL v2");
linux-master
drivers/gpu/drm/panel/panel-boe-himax8279d.c
// SPDX-License-Identifier: GPL-2.0 /* * Innolux/Chimei EJ030NA TFT LCD panel driver * * Copyright (C) 2020, Paul Cercueil <[email protected]> * Copyright (C) 2020, Christophe Branchereau <[email protected]> */ #include <linux/delay.h> #include <linux/device.h> #include <linux/gpio/consumer.h> #include <linux/media-bus-format.h> #include <linux/module.h> #include <linux/of.h> #include <linux/platform_device.h> #include <linux/regmap.h> #include <linux/regulator/consumer.h> #include <linux/spi/spi.h> #include <drm/drm_modes.h> #include <drm/drm_panel.h> struct ej030na_info { const struct drm_display_mode *display_modes; unsigned int num_modes; u16 width_mm, height_mm; u32 bus_format, bus_flags; }; struct ej030na { struct drm_panel panel; struct spi_device *spi; struct regmap *map; const struct ej030na_info *panel_info; struct regulator *supply; struct gpio_desc *reset_gpio; }; static inline struct ej030na *to_ej030na(struct drm_panel *panel) { return container_of(panel, struct ej030na, panel); } static const struct reg_sequence ej030na_init_sequence[] = { { 0x05, 0x1e }, { 0x05, 0x5c }, { 0x02, 0x14 }, { 0x03, 0x40 }, { 0x04, 0x07 }, { 0x06, 0x12 }, { 0x07, 0xd2 }, { 0x0c, 0x06 }, { 0x0d, 0x40 }, { 0x0e, 0x40 }, { 0x0f, 0x40 }, { 0x10, 0x40 }, { 0x11, 0x40 }, { 0x2f, 0x40 }, { 0x5a, 0x02 }, { 0x30, 0x07 }, { 0x31, 0x57 }, { 0x32, 0x53 }, { 0x33, 0x77 }, { 0x34, 0xb8 }, { 0x35, 0xbd }, { 0x36, 0xb8 }, { 0x37, 0xe7 }, { 0x38, 0x04 }, { 0x39, 0xff }, { 0x40, 0x0b }, { 0x41, 0xb8 }, { 0x42, 0xab }, { 0x43, 0xb9 }, { 0x44, 0x6a }, { 0x45, 0x56 }, { 0x46, 0x61 }, { 0x47, 0x08 }, { 0x48, 0x0f }, { 0x49, 0x0f }, }; static int ej030na_prepare(struct drm_panel *panel) { struct ej030na *priv = to_ej030na(panel); struct device *dev = &priv->spi->dev; int err; err = regulator_enable(priv->supply); if (err) { dev_err(dev, "Failed to enable power supply: %d\n", err); return err; } /* Reset the chip */ gpiod_set_value_cansleep(priv->reset_gpio, 1); usleep_range(50, 150); gpiod_set_value_cansleep(priv->reset_gpio, 0); usleep_range(50, 150); err = regmap_multi_reg_write(priv->map, ej030na_init_sequence, ARRAY_SIZE(ej030na_init_sequence)); if (err) { dev_err(dev, "Failed to init registers: %d\n", err); goto err_disable_regulator; } return 0; err_disable_regulator: regulator_disable(priv->supply); return err; } static int ej030na_unprepare(struct drm_panel *panel) { struct ej030na *priv = to_ej030na(panel); gpiod_set_value_cansleep(priv->reset_gpio, 1); regulator_disable(priv->supply); return 0; } static int ej030na_enable(struct drm_panel *panel) { struct ej030na *priv = to_ej030na(panel); /* standby off */ regmap_write(priv->map, 0x2b, 0x01); if (panel->backlight) { /* Wait for the picture to be ready before enabling backlight */ msleep(120); } return 0; } static int ej030na_disable(struct drm_panel *panel) { struct ej030na *priv = to_ej030na(panel); /* standby on */ regmap_write(priv->map, 0x2b, 0x00); return 0; } static int ej030na_get_modes(struct drm_panel *panel, struct drm_connector *connector) { struct ej030na *priv = to_ej030na(panel); const struct ej030na_info *panel_info = priv->panel_info; struct drm_display_mode *mode; unsigned int i; for (i = 0; i < panel_info->num_modes; i++) { mode = drm_mode_duplicate(connector->dev, &panel_info->display_modes[i]); if (!mode) return -ENOMEM; drm_mode_set_name(mode); mode->type = DRM_MODE_TYPE_DRIVER; if (panel_info->num_modes == 1) mode->type |= DRM_MODE_TYPE_PREFERRED; drm_mode_probed_add(connector, mode); } connector->display_info.bpc = 8; connector->display_info.width_mm = panel_info->width_mm; connector->display_info.height_mm = panel_info->height_mm; drm_display_info_set_bus_formats(&connector->display_info, &panel_info->bus_format, 1); connector->display_info.bus_flags = panel_info->bus_flags; return panel_info->num_modes; } static const struct drm_panel_funcs ej030na_funcs = { .prepare = ej030na_prepare, .unprepare = ej030na_unprepare, .enable = ej030na_enable, .disable = ej030na_disable, .get_modes = ej030na_get_modes, }; static const struct regmap_config ej030na_regmap_config = { .reg_bits = 8, .val_bits = 8, .max_register = 0x5a, }; static int ej030na_probe(struct spi_device *spi) { struct device *dev = &spi->dev; struct ej030na *priv; int err; priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL); if (!priv) return -ENOMEM; priv->spi = spi; spi_set_drvdata(spi, priv); priv->map = devm_regmap_init_spi(spi, &ej030na_regmap_config); if (IS_ERR(priv->map)) { dev_err(dev, "Unable to init regmap\n"); return PTR_ERR(priv->map); } priv->panel_info = of_device_get_match_data(dev); if (!priv->panel_info) return -EINVAL; priv->supply = devm_regulator_get(dev, "power"); if (IS_ERR(priv->supply)) return dev_err_probe(dev, PTR_ERR(priv->supply), "Failed to get power supply\n"); priv->reset_gpio = devm_gpiod_get(dev, "reset", GPIOD_OUT_HIGH); if (IS_ERR(priv->reset_gpio)) return dev_err_probe(dev, PTR_ERR(priv->reset_gpio), "Failed to get reset GPIO\n"); drm_panel_init(&priv->panel, dev, &ej030na_funcs, DRM_MODE_CONNECTOR_DPI); err = drm_panel_of_backlight(&priv->panel); if (err) return err; drm_panel_add(&priv->panel); return 0; } static void ej030na_remove(struct spi_device *spi) { struct ej030na *priv = spi_get_drvdata(spi); drm_panel_remove(&priv->panel); drm_panel_disable(&priv->panel); drm_panel_unprepare(&priv->panel); } static const struct drm_display_mode ej030na_modes[] = { { /* 60 Hz */ .clock = 14400, .hdisplay = 320, .hsync_start = 320 + 10, .hsync_end = 320 + 10 + 37, .htotal = 320 + 10 + 37 + 33, .vdisplay = 480, .vsync_start = 480 + 102, .vsync_end = 480 + 102 + 9 + 9, .vtotal = 480 + 102 + 9 + 9, .flags = DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC, }, { /* 50 Hz */ .clock = 12000, .hdisplay = 320, .hsync_start = 320 + 10, .hsync_end = 320 + 10 + 37, .htotal = 320 + 10 + 37 + 33, .vdisplay = 480, .vsync_start = 480 + 102, .vsync_end = 480 + 102 + 9, .vtotal = 480 + 102 + 9 + 9, .flags = DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC, }, }; static const struct ej030na_info ej030na_info = { .display_modes = ej030na_modes, .num_modes = ARRAY_SIZE(ej030na_modes), .width_mm = 70, .height_mm = 51, .bus_format = MEDIA_BUS_FMT_RGB888_3X8_DELTA, .bus_flags = DRM_BUS_FLAG_PIXDATA_SAMPLE_POSEDGE | DRM_BUS_FLAG_DE_LOW, }; static const struct of_device_id ej030na_of_match[] = { { .compatible = "innolux,ej030na", .data = &ej030na_info }, { /* sentinel */ } }; MODULE_DEVICE_TABLE(of, ej030na_of_match); static struct spi_driver ej030na_driver = { .driver = { .name = "panel-innolux-ej030na", .of_match_table = ej030na_of_match, }, .probe = ej030na_probe, .remove = ej030na_remove, }; module_spi_driver(ej030na_driver); MODULE_AUTHOR("Paul Cercueil <[email protected]>"); MODULE_AUTHOR("Christophe Branchereau <[email protected]>"); MODULE_LICENSE("GPL v2");
linux-master
drivers/gpu/drm/panel/panel-innolux-ej030na.c
/* * Copyright (C) 2013, NVIDIA Corporation. All rights reserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sub license, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice (including the * next paragraph) shall be included in all copies or substantial portions * of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. */ #include <linux/debugfs.h> #include <linux/delay.h> #include <linux/gpio/consumer.h> #include <linux/iopoll.h> #include <linux/module.h> #include <linux/of_platform.h> #include <linux/platform_device.h> #include <linux/pm_runtime.h> #include <linux/regulator/consumer.h> #include <video/display_timing.h> #include <video/of_display_timing.h> #include <video/videomode.h> #include <drm/display/drm_dp_aux_bus.h> #include <drm/display/drm_dp_helper.h> #include <drm/drm_crtc.h> #include <drm/drm_device.h> #include <drm/drm_edid.h> #include <drm/drm_panel.h> /** * struct panel_delay - Describes delays for a simple panel. */ struct panel_delay { /** * @hpd_reliable: Time for HPD to be reliable * * The time (in milliseconds) that it takes after powering the panel * before the HPD signal is reliable. Ideally this is 0 but some panels, * board designs, or bad pulldown configs can cause a glitch here. * * NOTE: on some old panel data this number appears to be much too big. * Presumably some old panels simply didn't have HPD hooked up and put * the hpd_absent here because this field predates the * hpd_absent. While that works, it's non-ideal. */ unsigned int hpd_reliable; /** * @hpd_absent: Time to wait if HPD isn't hooked up. * * Add this to the prepare delay if we know Hot Plug Detect isn't used. * * This is T3-max on eDP timing diagrams or the delay from power on * until HPD is guaranteed to be asserted. */ unsigned int hpd_absent; /** * @prepare_to_enable: Time between prepare and enable. * * The minimum time, in milliseconds, that needs to have passed * between when prepare finished and enable may begin. If at * enable time less time has passed since prepare finished, * the driver waits for the remaining time. * * If a fixed enable delay is also specified, we'll start * counting before delaying for the fixed delay. * * If a fixed prepare delay is also specified, we won't start * counting until after the fixed delay. We can't overlap this * fixed delay with the min time because the fixed delay * doesn't happen at the end of the function if a HPD GPIO was * specified. * * In other words: * prepare() * ... * // do fixed prepare delay * // wait for HPD GPIO if applicable * // start counting for prepare_to_enable * * enable() * // do fixed enable delay * // enforce prepare_to_enable min time * * This is not specified in a standard way on eDP timing diagrams. * It is effectively the time from HPD going high till you can * turn on the backlight. */ unsigned int prepare_to_enable; /** * @enable: Time for the panel to display a valid frame. * * The time (in milliseconds) that it takes for the panel to * display the first valid frame after starting to receive * video data. * * This is (T6-min + max(T7-max, T8-min)) on eDP timing diagrams or * the delay after link training finishes until we can turn the * backlight on and see valid data. */ unsigned int enable; /** * @disable: Time for the panel to turn the display off. * * The time (in milliseconds) that it takes for the panel to * turn the display off (no content is visible). * * This is T9-min (delay from backlight off to end of valid video * data) on eDP timing diagrams. It is not common to set. */ unsigned int disable; /** * @unprepare: Time to power down completely. * * The time (in milliseconds) that it takes for the panel * to power itself down completely. * * This time is used to prevent a future "prepare" from * starting until at least this many milliseconds has passed. * If at prepare time less time has passed since unprepare * finished, the driver waits for the remaining time. * * This is T12-min on eDP timing diagrams. */ unsigned int unprepare; }; /** * struct panel_desc - Describes a simple panel. */ struct panel_desc { /** * @modes: Pointer to array of fixed modes appropriate for this panel. * * If only one mode then this can just be the address of the mode. * NOTE: cannot be used with "timings" and also if this is specified * then you cannot override the mode in the device tree. */ const struct drm_display_mode *modes; /** @num_modes: Number of elements in modes array. */ unsigned int num_modes; /** * @timings: Pointer to array of display timings * * NOTE: cannot be used with "modes" and also these will be used to * validate a device tree override if one is present. */ const struct display_timing *timings; /** @num_timings: Number of elements in timings array. */ unsigned int num_timings; /** @bpc: Bits per color. */ unsigned int bpc; /** @size: Structure containing the physical size of this panel. */ struct { /** * @size.width: Width (in mm) of the active display area. */ unsigned int width; /** * @size.height: Height (in mm) of the active display area. */ unsigned int height; } size; /** @delay: Structure containing various delay values for this panel. */ struct panel_delay delay; }; /** * struct edp_panel_entry - Maps panel ID to delay / panel name. */ struct edp_panel_entry { /** @panel_id: 32-bit ID for panel, encoded with drm_edid_encode_panel_id(). */ u32 panel_id; /** @delay: The power sequencing delays needed for this panel. */ const struct panel_delay *delay; /** @name: Name of this panel (for printing to logs). */ const char *name; }; struct panel_edp { struct drm_panel base; bool enabled; bool no_hpd; bool prepared; ktime_t prepared_time; ktime_t unprepared_time; const struct panel_desc *desc; struct regulator *supply; struct i2c_adapter *ddc; struct drm_dp_aux *aux; struct gpio_desc *enable_gpio; struct gpio_desc *hpd_gpio; const struct edp_panel_entry *detected_panel; struct edid *edid; struct drm_display_mode override_mode; enum drm_panel_orientation orientation; }; static inline struct panel_edp *to_panel_edp(struct drm_panel *panel) { return container_of(panel, struct panel_edp, base); } static unsigned int panel_edp_get_timings_modes(struct panel_edp *panel, struct drm_connector *connector) { struct drm_display_mode *mode; unsigned int i, num = 0; for (i = 0; i < panel->desc->num_timings; i++) { const struct display_timing *dt = &panel->desc->timings[i]; struct videomode vm; videomode_from_timing(dt, &vm); mode = drm_mode_create(connector->dev); if (!mode) { dev_err(panel->base.dev, "failed to add mode %ux%u\n", dt->hactive.typ, dt->vactive.typ); continue; } drm_display_mode_from_videomode(&vm, mode); mode->type |= DRM_MODE_TYPE_DRIVER; if (panel->desc->num_timings == 1) mode->type |= DRM_MODE_TYPE_PREFERRED; drm_mode_probed_add(connector, mode); num++; } return num; } static unsigned int panel_edp_get_display_modes(struct panel_edp *panel, struct drm_connector *connector) { struct drm_display_mode *mode; unsigned int i, num = 0; for (i = 0; i < panel->desc->num_modes; i++) { const struct drm_display_mode *m = &panel->desc->modes[i]; mode = drm_mode_duplicate(connector->dev, m); if (!mode) { dev_err(panel->base.dev, "failed to add mode %ux%u@%u\n", m->hdisplay, m->vdisplay, drm_mode_vrefresh(m)); continue; } mode->type |= DRM_MODE_TYPE_DRIVER; if (panel->desc->num_modes == 1) mode->type |= DRM_MODE_TYPE_PREFERRED; drm_mode_set_name(mode); drm_mode_probed_add(connector, mode); num++; } return num; } static int panel_edp_get_non_edid_modes(struct panel_edp *panel, struct drm_connector *connector) { struct drm_display_mode *mode; bool has_override = panel->override_mode.type; unsigned int num = 0; if (!panel->desc) return 0; if (has_override) { mode = drm_mode_duplicate(connector->dev, &panel->override_mode); if (mode) { drm_mode_probed_add(connector, mode); num = 1; } else { dev_err(panel->base.dev, "failed to add override mode\n"); } } /* Only add timings if override was not there or failed to validate */ if (num == 0 && panel->desc->num_timings) num = panel_edp_get_timings_modes(panel, connector); /* * Only add fixed modes if timings/override added no mode. * * We should only ever have either the display timings specified * or a fixed mode. Anything else is rather bogus. */ WARN_ON(panel->desc->num_timings && panel->desc->num_modes); if (num == 0) num = panel_edp_get_display_modes(panel, connector); connector->display_info.bpc = panel->desc->bpc; connector->display_info.width_mm = panel->desc->size.width; connector->display_info.height_mm = panel->desc->size.height; return num; } static void panel_edp_wait(ktime_t start_ktime, unsigned int min_ms) { ktime_t now_ktime, min_ktime; if (!min_ms) return; min_ktime = ktime_add(start_ktime, ms_to_ktime(min_ms)); now_ktime = ktime_get_boottime(); if (ktime_before(now_ktime, min_ktime)) msleep(ktime_to_ms(ktime_sub(min_ktime, now_ktime)) + 1); } static int panel_edp_disable(struct drm_panel *panel) { struct panel_edp *p = to_panel_edp(panel); if (!p->enabled) return 0; if (p->desc->delay.disable) msleep(p->desc->delay.disable); p->enabled = false; return 0; } static int panel_edp_suspend(struct device *dev) { struct panel_edp *p = dev_get_drvdata(dev); gpiod_set_value_cansleep(p->enable_gpio, 0); regulator_disable(p->supply); p->unprepared_time = ktime_get_boottime(); return 0; } static int panel_edp_unprepare(struct drm_panel *panel) { struct panel_edp *p = to_panel_edp(panel); int ret; /* Unpreparing when already unprepared is a no-op */ if (!p->prepared) return 0; pm_runtime_mark_last_busy(panel->dev); ret = pm_runtime_put_autosuspend(panel->dev); if (ret < 0) return ret; p->prepared = false; return 0; } static int panel_edp_get_hpd_gpio(struct device *dev, struct panel_edp *p) { p->hpd_gpio = devm_gpiod_get_optional(dev, "hpd", GPIOD_IN); if (IS_ERR(p->hpd_gpio)) return dev_err_probe(dev, PTR_ERR(p->hpd_gpio), "failed to get 'hpd' GPIO\n"); return 0; } static bool panel_edp_can_read_hpd(struct panel_edp *p) { return !p->no_hpd && (p->hpd_gpio || (p->aux && p->aux->wait_hpd_asserted)); } static int panel_edp_prepare_once(struct panel_edp *p) { struct device *dev = p->base.dev; unsigned int delay; int err; int hpd_asserted; unsigned long hpd_wait_us; panel_edp_wait(p->unprepared_time, p->desc->delay.unprepare); err = regulator_enable(p->supply); if (err < 0) { dev_err(dev, "failed to enable supply: %d\n", err); return err; } gpiod_set_value_cansleep(p->enable_gpio, 1); delay = p->desc->delay.hpd_reliable; if (p->no_hpd) delay = max(delay, p->desc->delay.hpd_absent); if (delay) msleep(delay); if (panel_edp_can_read_hpd(p)) { if (p->desc->delay.hpd_absent) hpd_wait_us = p->desc->delay.hpd_absent * 1000UL; else hpd_wait_us = 2000000; if (p->hpd_gpio) { err = readx_poll_timeout(gpiod_get_value_cansleep, p->hpd_gpio, hpd_asserted, hpd_asserted, 1000, hpd_wait_us); if (hpd_asserted < 0) err = hpd_asserted; } else { err = p->aux->wait_hpd_asserted(p->aux, hpd_wait_us); } if (err) { if (err != -ETIMEDOUT) dev_err(dev, "error waiting for hpd GPIO: %d\n", err); goto error; } } p->prepared_time = ktime_get_boottime(); return 0; error: gpiod_set_value_cansleep(p->enable_gpio, 0); regulator_disable(p->supply); p->unprepared_time = ktime_get_boottime(); return err; } /* * Some panels simply don't always come up and need to be power cycled to * work properly. We'll allow for a handful of retries. */ #define MAX_PANEL_PREPARE_TRIES 5 static int panel_edp_resume(struct device *dev) { struct panel_edp *p = dev_get_drvdata(dev); int ret; int try; for (try = 0; try < MAX_PANEL_PREPARE_TRIES; try++) { ret = panel_edp_prepare_once(p); if (ret != -ETIMEDOUT) break; } if (ret == -ETIMEDOUT) dev_err(dev, "Prepare timeout after %d tries\n", try); else if (try) dev_warn(dev, "Prepare needed %d retries\n", try); return ret; } static int panel_edp_prepare(struct drm_panel *panel) { struct panel_edp *p = to_panel_edp(panel); int ret; /* Preparing when already prepared is a no-op */ if (p->prepared) return 0; ret = pm_runtime_get_sync(panel->dev); if (ret < 0) { pm_runtime_put_autosuspend(panel->dev); return ret; } p->prepared = true; return 0; } static int panel_edp_enable(struct drm_panel *panel) { struct panel_edp *p = to_panel_edp(panel); unsigned int delay; if (p->enabled) return 0; delay = p->desc->delay.enable; /* * If there is a "prepare_to_enable" delay then that's supposed to be * the delay from HPD going high until we can turn the backlight on. * However, we can only count this if HPD is readable by the panel * driver. * * If we aren't handling the HPD pin ourselves then the best we * can do is assume that HPD went high immediately before we were * called (and link training took zero time). Note that "no-hpd" * actually counts as handling HPD ourselves since we're doing the * worst case delay (in prepare) ourselves. * * NOTE: if we ever end up in this "if" statement then we're * guaranteed that the panel_edp_wait() call below will do no delay. * It already handles that case, though, so we don't need any special * code for it. */ if (p->desc->delay.prepare_to_enable && !panel_edp_can_read_hpd(p) && !p->no_hpd) delay = max(delay, p->desc->delay.prepare_to_enable); if (delay) msleep(delay); panel_edp_wait(p->prepared_time, p->desc->delay.prepare_to_enable); p->enabled = true; return 0; } static int panel_edp_get_modes(struct drm_panel *panel, struct drm_connector *connector) { struct panel_edp *p = to_panel_edp(panel); int num = 0; /* probe EDID if a DDC bus is available */ if (p->ddc) { pm_runtime_get_sync(panel->dev); if (!p->edid) p->edid = drm_get_edid(connector, p->ddc); if (p->edid) num += drm_add_edid_modes(connector, p->edid); pm_runtime_mark_last_busy(panel->dev); pm_runtime_put_autosuspend(panel->dev); } /* * Add hard-coded panel modes. Don't call this if there are no timings * and no modes (the generic edp-panel case) because it will clobber * the display_info that was already set by drm_add_edid_modes(). */ if (p->desc->num_timings || p->desc->num_modes) num += panel_edp_get_non_edid_modes(p, connector); else if (!num) dev_warn(p->base.dev, "No display modes\n"); /* * TODO: Remove once all drm drivers call * drm_connector_set_orientation_from_panel() */ drm_connector_set_panel_orientation(connector, p->orientation); return num; } static int panel_edp_get_timings(struct drm_panel *panel, unsigned int num_timings, struct display_timing *timings) { struct panel_edp *p = to_panel_edp(panel); unsigned int i; if (p->desc->num_timings < num_timings) num_timings = p->desc->num_timings; if (timings) for (i = 0; i < num_timings; i++) timings[i] = p->desc->timings[i]; return p->desc->num_timings; } static enum drm_panel_orientation panel_edp_get_orientation(struct drm_panel *panel) { struct panel_edp *p = to_panel_edp(panel); return p->orientation; } static int detected_panel_show(struct seq_file *s, void *data) { struct drm_panel *panel = s->private; struct panel_edp *p = to_panel_edp(panel); if (IS_ERR(p->detected_panel)) seq_puts(s, "UNKNOWN\n"); else if (!p->detected_panel) seq_puts(s, "HARDCODED\n"); else seq_printf(s, "%s\n", p->detected_panel->name); return 0; } DEFINE_SHOW_ATTRIBUTE(detected_panel); static void panel_edp_debugfs_init(struct drm_panel *panel, struct dentry *root) { debugfs_create_file("detected_panel", 0600, root, panel, &detected_panel_fops); } static const struct drm_panel_funcs panel_edp_funcs = { .disable = panel_edp_disable, .unprepare = panel_edp_unprepare, .prepare = panel_edp_prepare, .enable = panel_edp_enable, .get_modes = panel_edp_get_modes, .get_orientation = panel_edp_get_orientation, .get_timings = panel_edp_get_timings, .debugfs_init = panel_edp_debugfs_init, }; #define PANEL_EDP_BOUNDS_CHECK(to_check, bounds, field) \ (to_check->field.typ >= bounds->field.min && \ to_check->field.typ <= bounds->field.max) static void panel_edp_parse_panel_timing_node(struct device *dev, struct panel_edp *panel, const struct display_timing *ot) { const struct panel_desc *desc = panel->desc; struct videomode vm; unsigned int i; if (WARN_ON(desc->num_modes)) { dev_err(dev, "Reject override mode: panel has a fixed mode\n"); return; } if (WARN_ON(!desc->num_timings)) { dev_err(dev, "Reject override mode: no timings specified\n"); return; } for (i = 0; i < panel->desc->num_timings; i++) { const struct display_timing *dt = &panel->desc->timings[i]; if (!PANEL_EDP_BOUNDS_CHECK(ot, dt, hactive) || !PANEL_EDP_BOUNDS_CHECK(ot, dt, hfront_porch) || !PANEL_EDP_BOUNDS_CHECK(ot, dt, hback_porch) || !PANEL_EDP_BOUNDS_CHECK(ot, dt, hsync_len) || !PANEL_EDP_BOUNDS_CHECK(ot, dt, vactive) || !PANEL_EDP_BOUNDS_CHECK(ot, dt, vfront_porch) || !PANEL_EDP_BOUNDS_CHECK(ot, dt, vback_porch) || !PANEL_EDP_BOUNDS_CHECK(ot, dt, vsync_len)) continue; if (ot->flags != dt->flags) continue; videomode_from_timing(ot, &vm); drm_display_mode_from_videomode(&vm, &panel->override_mode); panel->override_mode.type |= DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED; break; } if (WARN_ON(!panel->override_mode.type)) dev_err(dev, "Reject override mode: No display_timing found\n"); } static const struct edp_panel_entry *find_edp_panel(u32 panel_id); static int generic_edp_panel_probe(struct device *dev, struct panel_edp *panel) { struct panel_desc *desc; u32 panel_id; char vend[4]; u16 product_id; u32 reliable_ms = 0; u32 absent_ms = 0; int ret; desc = devm_kzalloc(dev, sizeof(*desc), GFP_KERNEL); if (!desc) return -ENOMEM; panel->desc = desc; /* * Read the dts properties for the initial probe. These are used by * the runtime resume code which will get called by the * pm_runtime_get_sync() call below. */ of_property_read_u32(dev->of_node, "hpd-reliable-delay-ms", &reliable_ms); desc->delay.hpd_reliable = reliable_ms; of_property_read_u32(dev->of_node, "hpd-absent-delay-ms", &absent_ms); desc->delay.hpd_absent = absent_ms; /* Power the panel on so we can read the EDID */ ret = pm_runtime_get_sync(dev); if (ret < 0) { dev_err(dev, "Couldn't power on panel to read EDID: %d\n", ret); goto exit; } panel_id = drm_edid_get_panel_id(panel->ddc); if (!panel_id) { dev_err(dev, "Couldn't identify panel via EDID\n"); ret = -EIO; goto exit; } drm_edid_decode_panel_id(panel_id, vend, &product_id); panel->detected_panel = find_edp_panel(panel_id); /* * We're using non-optimized timings and want it really obvious that * someone needs to add an entry to the table, so we'll do a WARN_ON * splat. */ if (WARN_ON(!panel->detected_panel)) { dev_warn(dev, "Unknown panel %s %#06x, using conservative timings\n", vend, product_id); /* * It's highly likely that the panel will work if we use very * conservative timings, so let's do that. We already know that * the HPD-related delays must have worked since we got this * far, so we really just need the "unprepare" / "enable" * delays. We don't need "prepare_to_enable" since that * overlaps the "enable" delay anyway. * * Nearly all panels have a "unprepare" delay of 500 ms though * there are a few with 1000. Let's stick 2000 in just to be * super conservative. * * An "enable" delay of 80 ms seems the most common, but we'll * throw in 200 ms to be safe. */ desc->delay.unprepare = 2000; desc->delay.enable = 200; panel->detected_panel = ERR_PTR(-EINVAL); } else { dev_info(dev, "Detected %s %s (%#06x)\n", vend, panel->detected_panel->name, product_id); /* Update the delay; everything else comes from EDID */ desc->delay = *panel->detected_panel->delay; } ret = 0; exit: pm_runtime_mark_last_busy(dev); pm_runtime_put_autosuspend(dev); return ret; } static int panel_edp_probe(struct device *dev, const struct panel_desc *desc, struct drm_dp_aux *aux) { struct panel_edp *panel; struct display_timing dt; struct device_node *ddc; int err; panel = devm_kzalloc(dev, sizeof(*panel), GFP_KERNEL); if (!panel) return -ENOMEM; panel->enabled = false; panel->prepared_time = 0; panel->desc = desc; panel->aux = aux; panel->no_hpd = of_property_read_bool(dev->of_node, "no-hpd"); if (!panel->no_hpd) { err = panel_edp_get_hpd_gpio(dev, panel); if (err) return err; } panel->supply = devm_regulator_get(dev, "power"); if (IS_ERR(panel->supply)) return PTR_ERR(panel->supply); panel->enable_gpio = devm_gpiod_get_optional(dev, "enable", GPIOD_OUT_LOW); if (IS_ERR(panel->enable_gpio)) return dev_err_probe(dev, PTR_ERR(panel->enable_gpio), "failed to request GPIO\n"); err = of_drm_get_panel_orientation(dev->of_node, &panel->orientation); if (err) { dev_err(dev, "%pOF: failed to get orientation %d\n", dev->of_node, err); return err; } ddc = of_parse_phandle(dev->of_node, "ddc-i2c-bus", 0); if (ddc) { panel->ddc = of_find_i2c_adapter_by_node(ddc); of_node_put(ddc); if (!panel->ddc) return -EPROBE_DEFER; } else if (aux) { panel->ddc = &aux->ddc; } if (!of_get_display_timing(dev->of_node, "panel-timing", &dt)) panel_edp_parse_panel_timing_node(dev, panel, &dt); dev_set_drvdata(dev, panel); drm_panel_init(&panel->base, dev, &panel_edp_funcs, DRM_MODE_CONNECTOR_eDP); err = drm_panel_of_backlight(&panel->base); if (err) goto err_finished_ddc_init; /* * We use runtime PM for prepare / unprepare since those power the panel * on and off and those can be very slow operations. This is important * to optimize powering the panel on briefly to read the EDID before * fully enabling the panel. */ pm_runtime_enable(dev); pm_runtime_set_autosuspend_delay(dev, 1000); pm_runtime_use_autosuspend(dev); if (of_device_is_compatible(dev->of_node, "edp-panel")) { err = generic_edp_panel_probe(dev, panel); if (err) { dev_err_probe(dev, err, "Couldn't detect panel nor find a fallback\n"); goto err_finished_pm_runtime; } /* generic_edp_panel_probe() replaces desc in the panel */ desc = panel->desc; } else if (desc->bpc != 6 && desc->bpc != 8 && desc->bpc != 10) { dev_warn(dev, "Expected bpc in {6,8,10} but got: %u\n", desc->bpc); } if (!panel->base.backlight && panel->aux) { pm_runtime_get_sync(dev); err = drm_panel_dp_aux_backlight(&panel->base, panel->aux); pm_runtime_mark_last_busy(dev); pm_runtime_put_autosuspend(dev); if (err) goto err_finished_pm_runtime; } drm_panel_add(&panel->base); return 0; err_finished_pm_runtime: pm_runtime_dont_use_autosuspend(dev); pm_runtime_disable(dev); err_finished_ddc_init: if (panel->ddc && (!panel->aux || panel->ddc != &panel->aux->ddc)) put_device(&panel->ddc->dev); return err; } static void panel_edp_remove(struct device *dev) { struct panel_edp *panel = dev_get_drvdata(dev); drm_panel_remove(&panel->base); drm_panel_disable(&panel->base); drm_panel_unprepare(&panel->base); pm_runtime_dont_use_autosuspend(dev); pm_runtime_disable(dev); if (panel->ddc && (!panel->aux || panel->ddc != &panel->aux->ddc)) put_device(&panel->ddc->dev); kfree(panel->edid); panel->edid = NULL; } static void panel_edp_shutdown(struct device *dev) { struct panel_edp *panel = dev_get_drvdata(dev); drm_panel_disable(&panel->base); drm_panel_unprepare(&panel->base); } static const struct display_timing auo_b101ean01_timing = { .pixelclock = { 65300000, 72500000, 75000000 }, .hactive = { 1280, 1280, 1280 }, .hfront_porch = { 18, 119, 119 }, .hback_porch = { 21, 21, 21 }, .hsync_len = { 32, 32, 32 }, .vactive = { 800, 800, 800 }, .vfront_porch = { 4, 4, 4 }, .vback_porch = { 8, 8, 8 }, .vsync_len = { 18, 20, 20 }, }; static const struct panel_desc auo_b101ean01 = { .timings = &auo_b101ean01_timing, .num_timings = 1, .bpc = 6, .size = { .width = 217, .height = 136, }, }; static const struct drm_display_mode auo_b116xak01_mode = { .clock = 69300, .hdisplay = 1366, .hsync_start = 1366 + 48, .hsync_end = 1366 + 48 + 32, .htotal = 1366 + 48 + 32 + 10, .vdisplay = 768, .vsync_start = 768 + 4, .vsync_end = 768 + 4 + 6, .vtotal = 768 + 4 + 6 + 15, .flags = DRM_MODE_FLAG_NVSYNC | DRM_MODE_FLAG_NHSYNC, }; static const struct panel_desc auo_b116xak01 = { .modes = &auo_b116xak01_mode, .num_modes = 1, .bpc = 6, .size = { .width = 256, .height = 144, }, .delay = { .hpd_absent = 200, }, }; static const struct drm_display_mode auo_b116xw03_mode = { .clock = 70589, .hdisplay = 1366, .hsync_start = 1366 + 40, .hsync_end = 1366 + 40 + 40, .htotal = 1366 + 40 + 40 + 32, .vdisplay = 768, .vsync_start = 768 + 10, .vsync_end = 768 + 10 + 12, .vtotal = 768 + 10 + 12 + 6, .flags = DRM_MODE_FLAG_NVSYNC | DRM_MODE_FLAG_NHSYNC, }; static const struct panel_desc auo_b116xw03 = { .modes = &auo_b116xw03_mode, .num_modes = 1, .bpc = 6, .size = { .width = 256, .height = 144, }, .delay = { .enable = 400, }, }; static const struct drm_display_mode auo_b133han05_mode = { .clock = 142600, .hdisplay = 1920, .hsync_start = 1920 + 58, .hsync_end = 1920 + 58 + 42, .htotal = 1920 + 58 + 42 + 60, .vdisplay = 1080, .vsync_start = 1080 + 3, .vsync_end = 1080 + 3 + 5, .vtotal = 1080 + 3 + 5 + 54, }; static const struct panel_desc auo_b133han05 = { .modes = &auo_b133han05_mode, .num_modes = 1, .bpc = 8, .size = { .width = 293, .height = 165, }, .delay = { .hpd_reliable = 100, .enable = 20, .unprepare = 50, }, }; static const struct drm_display_mode auo_b133htn01_mode = { .clock = 150660, .hdisplay = 1920, .hsync_start = 1920 + 172, .hsync_end = 1920 + 172 + 80, .htotal = 1920 + 172 + 80 + 60, .vdisplay = 1080, .vsync_start = 1080 + 25, .vsync_end = 1080 + 25 + 10, .vtotal = 1080 + 25 + 10 + 10, }; static const struct panel_desc auo_b133htn01 = { .modes = &auo_b133htn01_mode, .num_modes = 1, .bpc = 6, .size = { .width = 293, .height = 165, }, .delay = { .hpd_reliable = 105, .enable = 20, .unprepare = 50, }, }; static const struct drm_display_mode auo_b133xtn01_mode = { .clock = 69500, .hdisplay = 1366, .hsync_start = 1366 + 48, .hsync_end = 1366 + 48 + 32, .htotal = 1366 + 48 + 32 + 20, .vdisplay = 768, .vsync_start = 768 + 3, .vsync_end = 768 + 3 + 6, .vtotal = 768 + 3 + 6 + 13, }; static const struct panel_desc auo_b133xtn01 = { .modes = &auo_b133xtn01_mode, .num_modes = 1, .bpc = 6, .size = { .width = 293, .height = 165, }, }; static const struct drm_display_mode auo_b140han06_mode = { .clock = 141000, .hdisplay = 1920, .hsync_start = 1920 + 16, .hsync_end = 1920 + 16 + 16, .htotal = 1920 + 16 + 16 + 152, .vdisplay = 1080, .vsync_start = 1080 + 3, .vsync_end = 1080 + 3 + 14, .vtotal = 1080 + 3 + 14 + 19, }; static const struct panel_desc auo_b140han06 = { .modes = &auo_b140han06_mode, .num_modes = 1, .bpc = 8, .size = { .width = 309, .height = 174, }, .delay = { .hpd_reliable = 100, .enable = 20, .unprepare = 50, }, }; static const struct drm_display_mode boe_nv101wxmn51_modes[] = { { .clock = 71900, .hdisplay = 1280, .hsync_start = 1280 + 48, .hsync_end = 1280 + 48 + 32, .htotal = 1280 + 48 + 32 + 80, .vdisplay = 800, .vsync_start = 800 + 3, .vsync_end = 800 + 3 + 5, .vtotal = 800 + 3 + 5 + 24, }, { .clock = 57500, .hdisplay = 1280, .hsync_start = 1280 + 48, .hsync_end = 1280 + 48 + 32, .htotal = 1280 + 48 + 32 + 80, .vdisplay = 800, .vsync_start = 800 + 3, .vsync_end = 800 + 3 + 5, .vtotal = 800 + 3 + 5 + 24, }, }; static const struct panel_desc boe_nv101wxmn51 = { .modes = boe_nv101wxmn51_modes, .num_modes = ARRAY_SIZE(boe_nv101wxmn51_modes), .bpc = 8, .size = { .width = 217, .height = 136, }, .delay = { /* TODO: should be hpd-absent and no-hpd should be set? */ .hpd_reliable = 210, .enable = 50, .unprepare = 160, }, }; static const struct drm_display_mode boe_nv110wtm_n61_modes[] = { { .clock = 207800, .hdisplay = 2160, .hsync_start = 2160 + 48, .hsync_end = 2160 + 48 + 32, .htotal = 2160 + 48 + 32 + 100, .vdisplay = 1440, .vsync_start = 1440 + 3, .vsync_end = 1440 + 3 + 6, .vtotal = 1440 + 3 + 6 + 31, .flags = DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC, }, { .clock = 138500, .hdisplay = 2160, .hsync_start = 2160 + 48, .hsync_end = 2160 + 48 + 32, .htotal = 2160 + 48 + 32 + 100, .vdisplay = 1440, .vsync_start = 1440 + 3, .vsync_end = 1440 + 3 + 6, .vtotal = 1440 + 3 + 6 + 31, .flags = DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC, }, }; static const struct panel_desc boe_nv110wtm_n61 = { .modes = boe_nv110wtm_n61_modes, .num_modes = ARRAY_SIZE(boe_nv110wtm_n61_modes), .bpc = 8, .size = { .width = 233, .height = 155, }, .delay = { .hpd_absent = 200, .prepare_to_enable = 80, .enable = 50, .unprepare = 500, }, }; /* Also used for boe_nv133fhm_n62 */ static const struct drm_display_mode boe_nv133fhm_n61_modes = { .clock = 147840, .hdisplay = 1920, .hsync_start = 1920 + 48, .hsync_end = 1920 + 48 + 32, .htotal = 1920 + 48 + 32 + 200, .vdisplay = 1080, .vsync_start = 1080 + 3, .vsync_end = 1080 + 3 + 6, .vtotal = 1080 + 3 + 6 + 31, .flags = DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC, }; /* Also used for boe_nv133fhm_n62 */ static const struct panel_desc boe_nv133fhm_n61 = { .modes = &boe_nv133fhm_n61_modes, .num_modes = 1, .bpc = 6, .size = { .width = 294, .height = 165, }, .delay = { /* * When power is first given to the panel there's a short * spike on the HPD line. It was explained that this spike * was until the TCON data download was complete. On * one system this was measured at 8 ms. We'll put 15 ms * in the prepare delay just to be safe. That means: * - If HPD isn't hooked up you still have 200 ms delay. * - If HPD is hooked up we won't try to look at it for the * first 15 ms. */ .hpd_reliable = 15, .hpd_absent = 200, .unprepare = 500, }, }; static const struct drm_display_mode boe_nv140fhmn49_modes[] = { { .clock = 148500, .hdisplay = 1920, .hsync_start = 1920 + 48, .hsync_end = 1920 + 48 + 32, .htotal = 2200, .vdisplay = 1080, .vsync_start = 1080 + 3, .vsync_end = 1080 + 3 + 5, .vtotal = 1125, }, }; static const struct panel_desc boe_nv140fhmn49 = { .modes = boe_nv140fhmn49_modes, .num_modes = ARRAY_SIZE(boe_nv140fhmn49_modes), .bpc = 6, .size = { .width = 309, .height = 174, }, .delay = { /* TODO: should be hpd-absent and no-hpd should be set? */ .hpd_reliable = 210, .enable = 50, .unprepare = 160, }, }; static const struct drm_display_mode innolux_n116bca_ea1_mode = { .clock = 76420, .hdisplay = 1366, .hsync_start = 1366 + 136, .hsync_end = 1366 + 136 + 30, .htotal = 1366 + 136 + 30 + 60, .vdisplay = 768, .vsync_start = 768 + 8, .vsync_end = 768 + 8 + 12, .vtotal = 768 + 8 + 12 + 12, .flags = DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC, }; static const struct panel_desc innolux_n116bca_ea1 = { .modes = &innolux_n116bca_ea1_mode, .num_modes = 1, .bpc = 6, .size = { .width = 256, .height = 144, }, .delay = { .hpd_absent = 200, .enable = 80, .disable = 50, .unprepare = 500, }, }; /* * Datasheet specifies that at 60 Hz refresh rate: * - total horizontal time: { 1506, 1592, 1716 } * - total vertical time: { 788, 800, 868 } * * ...but doesn't go into exactly how that should be split into a front * porch, back porch, or sync length. For now we'll leave a single setting * here which allows a bit of tweaking of the pixel clock at the expense of * refresh rate. */ static const struct display_timing innolux_n116bge_timing = { .pixelclock = { 72600000, 76420000, 80240000 }, .hactive = { 1366, 1366, 1366 }, .hfront_porch = { 136, 136, 136 }, .hback_porch = { 60, 60, 60 }, .hsync_len = { 30, 30, 30 }, .vactive = { 768, 768, 768 }, .vfront_porch = { 8, 8, 8 }, .vback_porch = { 12, 12, 12 }, .vsync_len = { 12, 12, 12 }, .flags = DISPLAY_FLAGS_VSYNC_LOW | DISPLAY_FLAGS_HSYNC_LOW, }; static const struct panel_desc innolux_n116bge = { .timings = &innolux_n116bge_timing, .num_timings = 1, .bpc = 6, .size = { .width = 256, .height = 144, }, }; static const struct drm_display_mode innolux_n125hce_gn1_mode = { .clock = 162000, .hdisplay = 1920, .hsync_start = 1920 + 40, .hsync_end = 1920 + 40 + 40, .htotal = 1920 + 40 + 40 + 80, .vdisplay = 1080, .vsync_start = 1080 + 4, .vsync_end = 1080 + 4 + 4, .vtotal = 1080 + 4 + 4 + 24, }; static const struct panel_desc innolux_n125hce_gn1 = { .modes = &innolux_n125hce_gn1_mode, .num_modes = 1, .bpc = 8, .size = { .width = 276, .height = 155, }, }; static const struct drm_display_mode innolux_p120zdg_bf1_mode = { .clock = 206016, .hdisplay = 2160, .hsync_start = 2160 + 48, .hsync_end = 2160 + 48 + 32, .htotal = 2160 + 48 + 32 + 80, .vdisplay = 1440, .vsync_start = 1440 + 3, .vsync_end = 1440 + 3 + 10, .vtotal = 1440 + 3 + 10 + 27, .flags = DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC, }; static const struct panel_desc innolux_p120zdg_bf1 = { .modes = &innolux_p120zdg_bf1_mode, .num_modes = 1, .bpc = 8, .size = { .width = 254, .height = 169, }, .delay = { .hpd_absent = 200, .unprepare = 500, }, }; static const struct drm_display_mode ivo_m133nwf4_r0_mode = { .clock = 138778, .hdisplay = 1920, .hsync_start = 1920 + 24, .hsync_end = 1920 + 24 + 48, .htotal = 1920 + 24 + 48 + 88, .vdisplay = 1080, .vsync_start = 1080 + 3, .vsync_end = 1080 + 3 + 12, .vtotal = 1080 + 3 + 12 + 17, .flags = DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC, }; static const struct panel_desc ivo_m133nwf4_r0 = { .modes = &ivo_m133nwf4_r0_mode, .num_modes = 1, .bpc = 8, .size = { .width = 294, .height = 165, }, .delay = { .hpd_absent = 200, .unprepare = 500, }, }; static const struct drm_display_mode kingdisplay_kd116n21_30nv_a010_mode = { .clock = 81000, .hdisplay = 1366, .hsync_start = 1366 + 40, .hsync_end = 1366 + 40 + 32, .htotal = 1366 + 40 + 32 + 62, .vdisplay = 768, .vsync_start = 768 + 5, .vsync_end = 768 + 5 + 5, .vtotal = 768 + 5 + 5 + 122, .flags = DRM_MODE_FLAG_NVSYNC | DRM_MODE_FLAG_NHSYNC, }; static const struct panel_desc kingdisplay_kd116n21_30nv_a010 = { .modes = &kingdisplay_kd116n21_30nv_a010_mode, .num_modes = 1, .bpc = 6, .size = { .width = 256, .height = 144, }, .delay = { .hpd_absent = 200, }, }; static const struct drm_display_mode lg_lp079qx1_sp0v_mode = { .clock = 200000, .hdisplay = 1536, .hsync_start = 1536 + 12, .hsync_end = 1536 + 12 + 16, .htotal = 1536 + 12 + 16 + 48, .vdisplay = 2048, .vsync_start = 2048 + 8, .vsync_end = 2048 + 8 + 4, .vtotal = 2048 + 8 + 4 + 8, .flags = DRM_MODE_FLAG_NVSYNC | DRM_MODE_FLAG_NHSYNC, }; static const struct panel_desc lg_lp079qx1_sp0v = { .modes = &lg_lp079qx1_sp0v_mode, .num_modes = 1, .size = { .width = 129, .height = 171, }, }; static const struct drm_display_mode lg_lp097qx1_spa1_mode = { .clock = 205210, .hdisplay = 2048, .hsync_start = 2048 + 150, .hsync_end = 2048 + 150 + 5, .htotal = 2048 + 150 + 5 + 5, .vdisplay = 1536, .vsync_start = 1536 + 3, .vsync_end = 1536 + 3 + 1, .vtotal = 1536 + 3 + 1 + 9, }; static const struct panel_desc lg_lp097qx1_spa1 = { .modes = &lg_lp097qx1_spa1_mode, .num_modes = 1, .size = { .width = 208, .height = 147, }, }; static const struct drm_display_mode lg_lp120up1_mode = { .clock = 162300, .hdisplay = 1920, .hsync_start = 1920 + 40, .hsync_end = 1920 + 40 + 40, .htotal = 1920 + 40 + 40 + 80, .vdisplay = 1280, .vsync_start = 1280 + 4, .vsync_end = 1280 + 4 + 4, .vtotal = 1280 + 4 + 4 + 12, }; static const struct panel_desc lg_lp120up1 = { .modes = &lg_lp120up1_mode, .num_modes = 1, .bpc = 8, .size = { .width = 267, .height = 183, }, }; static const struct drm_display_mode lg_lp129qe_mode = { .clock = 285250, .hdisplay = 2560, .hsync_start = 2560 + 48, .hsync_end = 2560 + 48 + 32, .htotal = 2560 + 48 + 32 + 80, .vdisplay = 1700, .vsync_start = 1700 + 3, .vsync_end = 1700 + 3 + 10, .vtotal = 1700 + 3 + 10 + 36, }; static const struct panel_desc lg_lp129qe = { .modes = &lg_lp129qe_mode, .num_modes = 1, .bpc = 8, .size = { .width = 272, .height = 181, }, }; static const struct drm_display_mode neweast_wjfh116008a_modes[] = { { .clock = 138500, .hdisplay = 1920, .hsync_start = 1920 + 48, .hsync_end = 1920 + 48 + 32, .htotal = 1920 + 48 + 32 + 80, .vdisplay = 1080, .vsync_start = 1080 + 3, .vsync_end = 1080 + 3 + 5, .vtotal = 1080 + 3 + 5 + 23, .flags = DRM_MODE_FLAG_NVSYNC | DRM_MODE_FLAG_NHSYNC, }, { .clock = 110920, .hdisplay = 1920, .hsync_start = 1920 + 48, .hsync_end = 1920 + 48 + 32, .htotal = 1920 + 48 + 32 + 80, .vdisplay = 1080, .vsync_start = 1080 + 3, .vsync_end = 1080 + 3 + 5, .vtotal = 1080 + 3 + 5 + 23, .flags = DRM_MODE_FLAG_NVSYNC | DRM_MODE_FLAG_NHSYNC, } }; static const struct panel_desc neweast_wjfh116008a = { .modes = neweast_wjfh116008a_modes, .num_modes = 2, .bpc = 6, .size = { .width = 260, .height = 150, }, .delay = { .hpd_reliable = 110, .enable = 20, .unprepare = 500, }, }; static const struct drm_display_mode samsung_lsn122dl01_c01_mode = { .clock = 271560, .hdisplay = 2560, .hsync_start = 2560 + 48, .hsync_end = 2560 + 48 + 32, .htotal = 2560 + 48 + 32 + 80, .vdisplay = 1600, .vsync_start = 1600 + 2, .vsync_end = 1600 + 2 + 5, .vtotal = 1600 + 2 + 5 + 57, }; static const struct panel_desc samsung_lsn122dl01_c01 = { .modes = &samsung_lsn122dl01_c01_mode, .num_modes = 1, .size = { .width = 263, .height = 164, }, }; static const struct drm_display_mode samsung_ltn140at29_301_mode = { .clock = 76300, .hdisplay = 1366, .hsync_start = 1366 + 64, .hsync_end = 1366 + 64 + 48, .htotal = 1366 + 64 + 48 + 128, .vdisplay = 768, .vsync_start = 768 + 2, .vsync_end = 768 + 2 + 5, .vtotal = 768 + 2 + 5 + 17, }; static const struct panel_desc samsung_ltn140at29_301 = { .modes = &samsung_ltn140at29_301_mode, .num_modes = 1, .bpc = 6, .size = { .width = 320, .height = 187, }, }; static const struct drm_display_mode sharp_ld_d5116z01b_mode = { .clock = 168480, .hdisplay = 1920, .hsync_start = 1920 + 48, .hsync_end = 1920 + 48 + 32, .htotal = 1920 + 48 + 32 + 80, .vdisplay = 1280, .vsync_start = 1280 + 3, .vsync_end = 1280 + 3 + 10, .vtotal = 1280 + 3 + 10 + 57, .flags = DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC, }; static const struct panel_desc sharp_ld_d5116z01b = { .modes = &sharp_ld_d5116z01b_mode, .num_modes = 1, .bpc = 8, .size = { .width = 260, .height = 120, }, }; static const struct display_timing sharp_lq123p1jx31_timing = { .pixelclock = { 252750000, 252750000, 266604720 }, .hactive = { 2400, 2400, 2400 }, .hfront_porch = { 48, 48, 48 }, .hback_porch = { 80, 80, 84 }, .hsync_len = { 32, 32, 32 }, .vactive = { 1600, 1600, 1600 }, .vfront_porch = { 3, 3, 3 }, .vback_porch = { 33, 33, 120 }, .vsync_len = { 10, 10, 10 }, .flags = DISPLAY_FLAGS_VSYNC_LOW | DISPLAY_FLAGS_HSYNC_LOW, }; static const struct panel_desc sharp_lq123p1jx31 = { .timings = &sharp_lq123p1jx31_timing, .num_timings = 1, .bpc = 8, .size = { .width = 259, .height = 173, }, .delay = { .hpd_reliable = 110, .enable = 50, .unprepare = 550, }, }; static const struct drm_display_mode sharp_lq140m1jw46_mode[] = { { .clock = 346500, .hdisplay = 1920, .hsync_start = 1920 + 48, .hsync_end = 1920 + 48 + 32, .htotal = 1920 + 48 + 32 + 80, .vdisplay = 1080, .vsync_start = 1080 + 3, .vsync_end = 1080 + 3 + 5, .vtotal = 1080 + 3 + 5 + 69, .flags = DRM_MODE_FLAG_NVSYNC | DRM_MODE_FLAG_NHSYNC, }, { .clock = 144370, .hdisplay = 1920, .hsync_start = 1920 + 48, .hsync_end = 1920 + 48 + 32, .htotal = 1920 + 48 + 32 + 80, .vdisplay = 1080, .vsync_start = 1080 + 3, .vsync_end = 1080 + 3 + 5, .vtotal = 1080 + 3 + 5 + 69, .flags = DRM_MODE_FLAG_NVSYNC | DRM_MODE_FLAG_NHSYNC, }, }; static const struct panel_desc sharp_lq140m1jw46 = { .modes = sharp_lq140m1jw46_mode, .num_modes = ARRAY_SIZE(sharp_lq140m1jw46_mode), .bpc = 8, .size = { .width = 309, .height = 174, }, .delay = { .hpd_absent = 80, .enable = 50, .unprepare = 500, }, }; static const struct drm_display_mode starry_kr122ea0sra_mode = { .clock = 147000, .hdisplay = 1920, .hsync_start = 1920 + 16, .hsync_end = 1920 + 16 + 16, .htotal = 1920 + 16 + 16 + 32, .vdisplay = 1200, .vsync_start = 1200 + 15, .vsync_end = 1200 + 15 + 2, .vtotal = 1200 + 15 + 2 + 18, .flags = DRM_MODE_FLAG_NVSYNC | DRM_MODE_FLAG_NHSYNC, }; static const struct panel_desc starry_kr122ea0sra = { .modes = &starry_kr122ea0sra_mode, .num_modes = 1, .size = { .width = 263, .height = 164, }, .delay = { /* TODO: should be hpd-absent and no-hpd should be set? */ .hpd_reliable = 10 + 200, .enable = 50, .unprepare = 10 + 500, }, }; static const struct of_device_id platform_of_match[] = { { /* Must be first */ .compatible = "edp-panel", }, { .compatible = "auo,b101ean01", .data = &auo_b101ean01, }, { .compatible = "auo,b116xa01", .data = &auo_b116xak01, }, { .compatible = "auo,b116xw03", .data = &auo_b116xw03, }, { .compatible = "auo,b133han05", .data = &auo_b133han05, }, { .compatible = "auo,b133htn01", .data = &auo_b133htn01, }, { .compatible = "auo,b133xtn01", .data = &auo_b133xtn01, }, { .compatible = "auo,b140han06", .data = &auo_b140han06, }, { .compatible = "boe,nv101wxmn51", .data = &boe_nv101wxmn51, }, { .compatible = "boe,nv110wtm-n61", .data = &boe_nv110wtm_n61, }, { .compatible = "boe,nv133fhm-n61", .data = &boe_nv133fhm_n61, }, { .compatible = "boe,nv133fhm-n62", .data = &boe_nv133fhm_n61, }, { .compatible = "boe,nv140fhmn49", .data = &boe_nv140fhmn49, }, { .compatible = "innolux,n116bca-ea1", .data = &innolux_n116bca_ea1, }, { .compatible = "innolux,n116bge", .data = &innolux_n116bge, }, { .compatible = "innolux,n125hce-gn1", .data = &innolux_n125hce_gn1, }, { .compatible = "innolux,p120zdg-bf1", .data = &innolux_p120zdg_bf1, }, { .compatible = "ivo,m133nwf4-r0", .data = &ivo_m133nwf4_r0, }, { .compatible = "kingdisplay,kd116n21-30nv-a010", .data = &kingdisplay_kd116n21_30nv_a010, }, { .compatible = "lg,lp079qx1-sp0v", .data = &lg_lp079qx1_sp0v, }, { .compatible = "lg,lp097qx1-spa1", .data = &lg_lp097qx1_spa1, }, { .compatible = "lg,lp120up1", .data = &lg_lp120up1, }, { .compatible = "lg,lp129qe", .data = &lg_lp129qe, }, { .compatible = "neweast,wjfh116008a", .data = &neweast_wjfh116008a, }, { .compatible = "samsung,lsn122dl01-c01", .data = &samsung_lsn122dl01_c01, }, { .compatible = "samsung,ltn140at29-301", .data = &samsung_ltn140at29_301, }, { .compatible = "sharp,ld-d5116z01b", .data = &sharp_ld_d5116z01b, }, { .compatible = "sharp,lq123p1jx31", .data = &sharp_lq123p1jx31, }, { .compatible = "sharp,lq140m1jw46", .data = &sharp_lq140m1jw46, }, { .compatible = "starry,kr122ea0sra", .data = &starry_kr122ea0sra, }, { /* sentinel */ } }; MODULE_DEVICE_TABLE(of, platform_of_match); static const struct panel_delay delay_200_500_p2e80 = { .hpd_absent = 200, .unprepare = 500, .prepare_to_enable = 80, }; static const struct panel_delay delay_200_500_p2e100 = { .hpd_absent = 200, .unprepare = 500, .prepare_to_enable = 100, }; static const struct panel_delay delay_200_500_e50 = { .hpd_absent = 200, .unprepare = 500, .enable = 50, }; static const struct panel_delay delay_200_500_e80_d50 = { .hpd_absent = 200, .unprepare = 500, .enable = 80, .disable = 50, }; static const struct panel_delay delay_100_500_e200 = { .hpd_absent = 100, .unprepare = 500, .enable = 200, }; static const struct panel_delay delay_200_500_e200 = { .hpd_absent = 200, .unprepare = 500, .enable = 200, }; #define EDP_PANEL_ENTRY(vend_chr_0, vend_chr_1, vend_chr_2, product_id, _delay, _name) \ { \ .name = _name, \ .panel_id = drm_edid_encode_panel_id(vend_chr_0, vend_chr_1, vend_chr_2, \ product_id), \ .delay = _delay \ } /* * This table is used to figure out power sequencing delays for panels that * are detected by EDID. Entries here may point to entries in the * platform_of_match table (if a panel is listed in both places). * * Sort first by vendor, then by product ID. */ static const struct edp_panel_entry edp_panels[] = { EDP_PANEL_ENTRY('A', 'U', 'O', 0x1062, &delay_200_500_e50, "B120XAN01.0"), EDP_PANEL_ENTRY('A', 'U', 'O', 0x145c, &delay_200_500_e50, "B116XAB01.4"), EDP_PANEL_ENTRY('A', 'U', 'O', 0x1e9b, &delay_200_500_e50, "B133UAN02.1"), EDP_PANEL_ENTRY('A', 'U', 'O', 0x1ea5, &delay_200_500_e50, "B116XAK01.6"), EDP_PANEL_ENTRY('A', 'U', 'O', 0x405c, &auo_b116xak01.delay, "B116XAK01"), EDP_PANEL_ENTRY('A', 'U', 'O', 0x582d, &delay_200_500_e50, "B133UAN01.0"), EDP_PANEL_ENTRY('A', 'U', 'O', 0x615c, &delay_200_500_e50, "B116XAN06.1"), EDP_PANEL_ENTRY('A', 'U', 'O', 0x8594, &delay_200_500_e50, "B133UAN01.0"), EDP_PANEL_ENTRY('B', 'O', 'E', 0x0786, &delay_200_500_p2e80, "NV116WHM-T01"), EDP_PANEL_ENTRY('B', 'O', 'E', 0x07d1, &boe_nv133fhm_n61.delay, "NV133FHM-N61"), EDP_PANEL_ENTRY('B', 'O', 'E', 0x082d, &boe_nv133fhm_n61.delay, "NV133FHM-N62"), EDP_PANEL_ENTRY('B', 'O', 'E', 0x094b, &delay_200_500_e50, "NT116WHM-N21"), EDP_PANEL_ENTRY('B', 'O', 'E', 0x095f, &delay_200_500_e50, "NE135FBM-N41 v8.1"), EDP_PANEL_ENTRY('B', 'O', 'E', 0x098d, &boe_nv110wtm_n61.delay, "NV110WTM-N61"), EDP_PANEL_ENTRY('B', 'O', 'E', 0x09dd, &delay_200_500_e50, "NT116WHM-N21"), EDP_PANEL_ENTRY('B', 'O', 'E', 0x0a5d, &delay_200_500_e50, "NV116WHM-N45"), EDP_PANEL_ENTRY('B', 'O', 'E', 0x0ac5, &delay_200_500_e50, "NV116WHM-N4C"), EDP_PANEL_ENTRY('C', 'M', 'N', 0x1139, &delay_200_500_e80_d50, "N116BGE-EA2"), EDP_PANEL_ENTRY('C', 'M', 'N', 0x114c, &innolux_n116bca_ea1.delay, "N116BCA-EA1"), EDP_PANEL_ENTRY('C', 'M', 'N', 0x1152, &delay_200_500_e80_d50, "N116BCN-EA1"), EDP_PANEL_ENTRY('C', 'M', 'N', 0x1153, &delay_200_500_e80_d50, "N116BGE-EA2"), EDP_PANEL_ENTRY('C', 'M', 'N', 0x1154, &delay_200_500_e80_d50, "N116BCA-EA2"), EDP_PANEL_ENTRY('C', 'M', 'N', 0x1247, &delay_200_500_e80_d50, "N120ACA-EA1"), EDP_PANEL_ENTRY('C', 'M', 'N', 0x14d4, &delay_200_500_e80_d50, "N140HCA-EAC"), EDP_PANEL_ENTRY('I', 'V', 'O', 0x057d, &delay_200_500_e200, "R140NWF5 RH"), EDP_PANEL_ENTRY('I', 'V', 'O', 0x854a, &delay_200_500_p2e100, "M133NW4J"), EDP_PANEL_ENTRY('I', 'V', 'O', 0x854b, &delay_200_500_p2e100, "R133NW4K-R0"), EDP_PANEL_ENTRY('K', 'D', 'B', 0x0624, &kingdisplay_kd116n21_30nv_a010.delay, "116N21-30NV-A010"), EDP_PANEL_ENTRY('K', 'D', 'B', 0x1120, &delay_200_500_e80_d50, "116N29-30NK-C007"), EDP_PANEL_ENTRY('S', 'H', 'P', 0x1511, &delay_200_500_e50, "LQ140M1JW48"), EDP_PANEL_ENTRY('S', 'H', 'P', 0x1523, &sharp_lq140m1jw46.delay, "LQ140M1JW46"), EDP_PANEL_ENTRY('S', 'H', 'P', 0x154c, &delay_200_500_p2e100, "LQ116M1JW10"), EDP_PANEL_ENTRY('S', 'T', 'A', 0x0100, &delay_100_500_e200, "2081116HHD028001-51D"), { /* sentinal */ } }; static const struct edp_panel_entry *find_edp_panel(u32 panel_id) { const struct edp_panel_entry *panel; if (!panel_id) return NULL; for (panel = edp_panels; panel->panel_id; panel++) if (panel->panel_id == panel_id) return panel; return NULL; } static int panel_edp_platform_probe(struct platform_device *pdev) { const struct of_device_id *id; /* Skip one since "edp-panel" is only supported on DP AUX bus */ id = of_match_node(platform_of_match + 1, pdev->dev.of_node); if (!id) return -ENODEV; return panel_edp_probe(&pdev->dev, id->data, NULL); } static void panel_edp_platform_remove(struct platform_device *pdev) { panel_edp_remove(&pdev->dev); } static void panel_edp_platform_shutdown(struct platform_device *pdev) { panel_edp_shutdown(&pdev->dev); } static const struct dev_pm_ops panel_edp_pm_ops = { SET_RUNTIME_PM_OPS(panel_edp_suspend, panel_edp_resume, NULL) SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend, pm_runtime_force_resume) }; static struct platform_driver panel_edp_platform_driver = { .driver = { .name = "panel-edp", .of_match_table = platform_of_match, .pm = &panel_edp_pm_ops, }, .probe = panel_edp_platform_probe, .remove_new = panel_edp_platform_remove, .shutdown = panel_edp_platform_shutdown, }; static int panel_edp_dp_aux_ep_probe(struct dp_aux_ep_device *aux_ep) { const struct of_device_id *id; id = of_match_node(platform_of_match, aux_ep->dev.of_node); if (!id) return -ENODEV; return panel_edp_probe(&aux_ep->dev, id->data, aux_ep->aux); } static void panel_edp_dp_aux_ep_remove(struct dp_aux_ep_device *aux_ep) { panel_edp_remove(&aux_ep->dev); } static void panel_edp_dp_aux_ep_shutdown(struct dp_aux_ep_device *aux_ep) { panel_edp_shutdown(&aux_ep->dev); } static struct dp_aux_ep_driver panel_edp_dp_aux_ep_driver = { .driver = { .name = "panel-simple-dp-aux", .of_match_table = platform_of_match, /* Same as platform one! */ .pm = &panel_edp_pm_ops, }, .probe = panel_edp_dp_aux_ep_probe, .remove = panel_edp_dp_aux_ep_remove, .shutdown = panel_edp_dp_aux_ep_shutdown, }; static int __init panel_edp_init(void) { int err; err = platform_driver_register(&panel_edp_platform_driver); if (err < 0) return err; err = dp_aux_dp_driver_register(&panel_edp_dp_aux_ep_driver); if (err < 0) goto err_did_platform_register; return 0; err_did_platform_register: platform_driver_unregister(&panel_edp_platform_driver); return err; } module_init(panel_edp_init); static void __exit panel_edp_exit(void) { dp_aux_dp_driver_unregister(&panel_edp_dp_aux_ep_driver); platform_driver_unregister(&panel_edp_platform_driver); } module_exit(panel_edp_exit); MODULE_AUTHOR("Thierry Reding <[email protected]>"); MODULE_DESCRIPTION("DRM Driver for Simple eDP Panels"); MODULE_LICENSE("GPL and additional rights");
linux-master
drivers/gpu/drm/panel/panel-edp.c
// SPDX-License-Identifier: GPL-2.0 /* * Panel driver for the WideChips WS2401 480x800 DPI RGB panel, used in * the Samsung Mobile Display (SMD) LMS380KF01. * Found in the Samsung Galaxy Ace 2 GT-I8160 mobile phone. * Linus Walleij <[email protected]> * Inspired by code and know-how in the vendor driver by Gareth Phillips. */ #include <drm/drm_mipi_dbi.h> #include <drm/drm_modes.h> #include <drm/drm_panel.h> #include <linux/backlight.h> #include <linux/delay.h> #include <linux/gpio/consumer.h> #include <linux/init.h> #include <linux/kernel.h> #include <linux/media-bus-format.h> #include <linux/module.h> #include <linux/regulator/consumer.h> #include <linux/spi/spi.h> #include <video/mipi_display.h> #define WS2401_RESCTL 0xb8 /* Resolution select control */ #define WS2401_PSMPS 0xbd /* SMPS positive control */ #define WS2401_NSMPS 0xbe /* SMPS negative control */ #define WS2401_SMPS 0xbf #define WS2401_BCMODE 0xc1 /* Backlight control mode */ #define WS2401_WRBLCTL 0xc3 /* Backlight control */ #define WS2401_WRDISBV 0xc4 /* Write manual brightness */ #define WS2401_WRCTRLD 0xc6 /* Write BL control */ #define WS2401_WRMIE 0xc7 /* Write MIE mode */ #define WS2401_READ_ID1 0xda /* Read panel ID 1 */ #define WS2401_READ_ID2 0xdb /* Read panel ID 2 */ #define WS2401_READ_ID3 0xdc /* Read panel ID 3 */ #define WS2401_GAMMA_R1 0xe7 /* Gamma red 1 */ #define WS2401_GAMMA_G1 0xe8 /* Gamma green 1 */ #define WS2401_GAMMA_B1 0xe9 /* Gamma blue 1 */ #define WS2401_GAMMA_R2 0xea /* Gamma red 2 */ #define WS2401_GAMMA_G2 0xeb /* Gamma green 2 */ #define WS2401_GAMMA_B2 0xec /* Gamma blue 2 */ #define WS2401_PASSWD1 0xf0 /* Password command for level 2 */ #define WS2401_DISCTL 0xf2 /* Display control */ #define WS2401_PWRCTL 0xf3 /* Power control */ #define WS2401_VCOMCTL 0xf4 /* VCOM control */ #define WS2401_SRCCTL 0xf5 /* Source control */ #define WS2401_PANELCTL 0xf6 /* Panel control */ static const u8 ws2401_dbi_read_commands[] = { WS2401_READ_ID1, WS2401_READ_ID2, WS2401_READ_ID3, 0, /* sentinel */ }; /** * struct ws2401 - state container for a panel controlled by the WS2401 * controller */ struct ws2401 { /** @dev: the container device */ struct device *dev; /** @dbi: the DBI bus abstraction handle */ struct mipi_dbi dbi; /** @panel: the DRM panel instance for this device */ struct drm_panel panel; /** @width: the width of this panel in mm */ u32 width; /** @height: the height of this panel in mm */ u32 height; /** @reset: reset GPIO line */ struct gpio_desc *reset; /** @regulators: VCCIO and VIO supply regulators */ struct regulator_bulk_data regulators[2]; /** @internal_bl: If using internal backlight */ bool internal_bl; }; static const struct drm_display_mode lms380kf01_480_800_mode = { /* * The vendor driver states that the "SMD panel" has a clock * frequency of 49920000 Hz / 2 = 24960000 Hz. */ .clock = 24960, .hdisplay = 480, .hsync_start = 480 + 8, .hsync_end = 480 + 8 + 10, .htotal = 480 + 8 + 10 + 8, .vdisplay = 800, .vsync_start = 800 + 8, .vsync_end = 800 + 8 + 2, .vtotal = 800 + 8 + 2 + 18, .width_mm = 50, .height_mm = 84, .flags = DRM_MODE_FLAG_NVSYNC | DRM_MODE_FLAG_NHSYNC, }; static inline struct ws2401 *to_ws2401(struct drm_panel *panel) { return container_of(panel, struct ws2401, panel); } static void ws2401_read_mtp_id(struct ws2401 *ws) { struct mipi_dbi *dbi = &ws->dbi; u8 id1, id2, id3; int ret; ret = mipi_dbi_command_read(dbi, WS2401_READ_ID1, &id1); if (ret) { dev_err(ws->dev, "unable to read MTP ID 1\n"); return; } ret = mipi_dbi_command_read(dbi, WS2401_READ_ID2, &id2); if (ret) { dev_err(ws->dev, "unable to read MTP ID 2\n"); return; } ret = mipi_dbi_command_read(dbi, WS2401_READ_ID3, &id3); if (ret) { dev_err(ws->dev, "unable to read MTP ID 3\n"); return; } dev_info(ws->dev, "MTP ID: %02x %02x %02x\n", id1, id2, id3); } static int ws2401_power_on(struct ws2401 *ws) { struct mipi_dbi *dbi = &ws->dbi; int ret; /* Power up */ ret = regulator_bulk_enable(ARRAY_SIZE(ws->regulators), ws->regulators); if (ret) { dev_err(ws->dev, "failed to enable regulators: %d\n", ret); return ret; } msleep(10); /* Assert reset >=1 ms */ gpiod_set_value_cansleep(ws->reset, 1); usleep_range(1000, 5000); /* De-assert reset */ gpiod_set_value_cansleep(ws->reset, 0); /* Wait >= 10 ms */ msleep(10); dev_dbg(ws->dev, "de-asserted RESET\n"); /* * Exit sleep mode and initialize display - some hammering is * necessary. */ mipi_dbi_command(dbi, MIPI_DCS_EXIT_SLEEP_MODE); mipi_dbi_command(dbi, MIPI_DCS_EXIT_SLEEP_MODE); msleep(50); /* Magic to unlock level 2 control of the display */ mipi_dbi_command(dbi, WS2401_PASSWD1, 0x5a, 0x5a); /* Configure resolution to 480RGBx800 */ mipi_dbi_command(dbi, WS2401_RESCTL, 0x12); /* Set addressing mode Flip V(d0), Flip H(d1) RGB/BGR(d3) */ mipi_dbi_command(dbi, MIPI_DCS_SET_ADDRESS_MODE, 0x01); /* Set pixel format: 24 bpp */ mipi_dbi_command(dbi, MIPI_DCS_SET_PIXEL_FORMAT, 0x70); mipi_dbi_command(dbi, WS2401_SMPS, 0x00, 0x0f); mipi_dbi_command(dbi, WS2401_PSMPS, 0x06, 0x03, /* DDVDH: 4.6v */ 0x7e, 0x03, 0x12, 0x37); mipi_dbi_command(dbi, WS2401_NSMPS, 0x06, 0x03, /* DDVDH: -4.6v */ 0x7e, 0x02, 0x15, 0x37); mipi_dbi_command(dbi, WS2401_SMPS, 0x02, 0x0f); mipi_dbi_command(dbi, WS2401_PWRCTL, 0x10, 0xA9, 0x00, 0x01, 0x44, 0xb4, /* VGH:16.1v, VGL:-13.8v */ 0x50, /* GREFP:4.2v (default) */ 0x50, /* GREFN:-4.2v (default) */ 0x00, 0x44); /* VOUTL:-10v (default) */ mipi_dbi_command(dbi, WS2401_DISCTL, 0x01, 0x00, 0x00, 0x00, 0x14, 0x16); mipi_dbi_command(dbi, WS2401_VCOMCTL, 0x30, 0x53, 0x53); mipi_dbi_command(dbi, WS2401_SRCCTL, 0x03, 0x0C, 0x00, 0x00, 0x00, 0x01, /* 2 dot inversion */ 0x01, 0x06, 0x03); mipi_dbi_command(dbi, WS2401_PANELCTL, 0x14, 0x00, 0x80, 0x00); mipi_dbi_command(dbi, WS2401_WRMIE, 0x01); /* Set up gamma, probably these are P-gamma and N-gamma for each color */ mipi_dbi_command(dbi, WS2401_GAMMA_R1, 0x00, 0x5b, 0x42, 0x41, 0x3f, 0x42, 0x3d, 0x38, 0x2e, 0x2b, 0x2a, 0x27, 0x22, 0x27, 0x0f, 0x00, 0x00); mipi_dbi_command(dbi, WS2401_GAMMA_R2, 0x00, 0x5b, 0x42, 0x41, 0x3f, 0x42, 0x3d, 0x38, 0x2e, 0x2b, 0x2a, 0x27, 0x22, 0x27, 0x0f, 0x00, 0x00); mipi_dbi_command(dbi, WS2401_GAMMA_G1, 0x00, 0x59, 0x40, 0x3f, 0x3e, 0x41, 0x3d, 0x39, 0x2f, 0x2c, 0x2b, 0x29, 0x25, 0x29, 0x19, 0x08, 0x00); mipi_dbi_command(dbi, WS2401_GAMMA_G2, 0x00, 0x59, 0x40, 0x3f, 0x3e, 0x41, 0x3d, 0x39, 0x2f, 0x2c, 0x2b, 0x29, 0x25, 0x29, 0x19, 0x08, 0x00); mipi_dbi_command(dbi, WS2401_GAMMA_B1, 0x00, 0x57, 0x3b, 0x3a, 0x3b, 0x3f, 0x3b, 0x38, 0x27, 0x38, 0x2a, 0x26, 0x22, 0x34, 0x0c, 0x09, 0x00); mipi_dbi_command(dbi, WS2401_GAMMA_B2, 0x00, 0x57, 0x3b, 0x3a, 0x3b, 0x3f, 0x3b, 0x38, 0x27, 0x38, 0x2a, 0x26, 0x22, 0x34, 0x0c, 0x09, 0x00); if (ws->internal_bl) { mipi_dbi_command(dbi, WS2401_WRCTRLD, 0x2c); } else { mipi_dbi_command(dbi, WS2401_WRCTRLD, 0x00); /* * When not using internal backlight we do not need any further * L2 accesses to the panel so we close the door on our way out. * Otherwise we need to leave the L2 door open. */ mipi_dbi_command(dbi, WS2401_PASSWD1, 0xa5, 0xa5); } return 0; } static int ws2401_power_off(struct ws2401 *ws) { /* Go into RESET and disable regulators */ gpiod_set_value_cansleep(ws->reset, 1); return regulator_bulk_disable(ARRAY_SIZE(ws->regulators), ws->regulators); } static int ws2401_unprepare(struct drm_panel *panel) { struct ws2401 *ws = to_ws2401(panel); struct mipi_dbi *dbi = &ws->dbi; /* Make sure we disable backlight, if any */ if (ws->internal_bl) mipi_dbi_command(dbi, WS2401_WRCTRLD, 0x00); mipi_dbi_command(dbi, MIPI_DCS_ENTER_SLEEP_MODE); msleep(120); return ws2401_power_off(to_ws2401(panel)); } static int ws2401_disable(struct drm_panel *panel) { struct ws2401 *ws = to_ws2401(panel); struct mipi_dbi *dbi = &ws->dbi; mipi_dbi_command(dbi, MIPI_DCS_SET_DISPLAY_OFF); msleep(25); return 0; } static int ws2401_prepare(struct drm_panel *panel) { return ws2401_power_on(to_ws2401(panel)); } static int ws2401_enable(struct drm_panel *panel) { struct ws2401 *ws = to_ws2401(panel); struct mipi_dbi *dbi = &ws->dbi; mipi_dbi_command(dbi, MIPI_DCS_SET_DISPLAY_ON); return 0; } /** * ws2401_get_modes() - return the mode * @panel: the panel to get the mode for * @connector: reference to the central DRM connector control structure */ static int ws2401_get_modes(struct drm_panel *panel, struct drm_connector *connector) { struct ws2401 *ws = to_ws2401(panel); struct drm_display_mode *mode; static const u32 bus_format = MEDIA_BUS_FMT_RGB888_1X24; /* * We just support the LMS380KF01 so far, if we implement more panels * this mode, the following connector display_info settings and * probably the custom DCS sequences needs to selected based on what * the target panel needs. */ mode = drm_mode_duplicate(connector->dev, &lms380kf01_480_800_mode); if (!mode) { dev_err(ws->dev, "failed to add mode\n"); return -ENOMEM; } connector->display_info.bpc = 8; connector->display_info.width_mm = mode->width_mm; connector->display_info.height_mm = mode->height_mm; connector->display_info.bus_flags = DRM_BUS_FLAG_PIXDATA_DRIVE_NEGEDGE; drm_display_info_set_bus_formats(&connector->display_info, &bus_format, 1); drm_mode_set_name(mode); mode->type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED; drm_mode_probed_add(connector, mode); return 1; } static const struct drm_panel_funcs ws2401_drm_funcs = { .disable = ws2401_disable, .unprepare = ws2401_unprepare, .prepare = ws2401_prepare, .enable = ws2401_enable, .get_modes = ws2401_get_modes, }; static int ws2401_set_brightness(struct backlight_device *bl) { struct ws2401 *ws = bl_get_data(bl); struct mipi_dbi *dbi = &ws->dbi; u8 brightness = backlight_get_brightness(bl); if (backlight_is_blank(bl)) { mipi_dbi_command(dbi, WS2401_WRCTRLD, 0x00); } else { mipi_dbi_command(dbi, WS2401_WRCTRLD, 0x2c); mipi_dbi_command(dbi, WS2401_WRDISBV, brightness); } return 0; } static const struct backlight_ops ws2401_bl_ops = { .update_status = ws2401_set_brightness, }; static const struct backlight_properties ws2401_bl_props = { .type = BACKLIGHT_PLATFORM, .brightness = 120, .max_brightness = U8_MAX, }; static int ws2401_probe(struct spi_device *spi) { struct device *dev = &spi->dev; struct ws2401 *ws; int ret; ws = devm_kzalloc(dev, sizeof(*ws), GFP_KERNEL); if (!ws) return -ENOMEM; ws->dev = dev; /* * VCI is the analog voltage supply * VCCIO is the digital I/O voltage supply */ ws->regulators[0].supply = "vci"; ws->regulators[1].supply = "vccio"; ret = devm_regulator_bulk_get(dev, ARRAY_SIZE(ws->regulators), ws->regulators); if (ret) return dev_err_probe(dev, ret, "failed to get regulators\n"); ws->reset = devm_gpiod_get(dev, "reset", GPIOD_OUT_HIGH); if (IS_ERR(ws->reset)) { ret = PTR_ERR(ws->reset); return dev_err_probe(dev, ret, "no RESET GPIO\n"); } ret = mipi_dbi_spi_init(spi, &ws->dbi, NULL); if (ret) return dev_err_probe(dev, ret, "MIPI DBI init failed\n"); ws->dbi.read_commands = ws2401_dbi_read_commands; ws2401_power_on(ws); ws2401_read_mtp_id(ws); ws2401_power_off(ws); drm_panel_init(&ws->panel, dev, &ws2401_drm_funcs, DRM_MODE_CONNECTOR_DPI); ret = drm_panel_of_backlight(&ws->panel); if (ret) return dev_err_probe(dev, ret, "failed to get external backlight device\n"); if (!ws->panel.backlight) { dev_dbg(dev, "no external backlight, using internal backlight\n"); ws->panel.backlight = devm_backlight_device_register(dev, "ws2401", dev, ws, &ws2401_bl_ops, &ws2401_bl_props); if (IS_ERR(ws->panel.backlight)) return dev_err_probe(dev, PTR_ERR(ws->panel.backlight), "failed to register backlight device\n"); } else { dev_dbg(dev, "using external backlight\n"); } spi_set_drvdata(spi, ws); drm_panel_add(&ws->panel); dev_dbg(dev, "added panel\n"); return 0; } static void ws2401_remove(struct spi_device *spi) { struct ws2401 *ws = spi_get_drvdata(spi); drm_panel_remove(&ws->panel); } /* * Samsung LMS380KF01 is the one instance of this display controller that we * know about, but if more are found, the controller can be parameterized * here and used for other configurations. */ static const struct of_device_id ws2401_match[] = { { .compatible = "samsung,lms380kf01", }, {}, }; MODULE_DEVICE_TABLE(of, ws2401_match); static const struct spi_device_id ws2401_ids[] = { { "lms380kf01" }, { }, }; MODULE_DEVICE_TABLE(spi, ws2401_ids); static struct spi_driver ws2401_driver = { .probe = ws2401_probe, .remove = ws2401_remove, .id_table = ws2401_ids, .driver = { .name = "ws2401-panel", .of_match_table = ws2401_match, }, }; module_spi_driver(ws2401_driver); MODULE_AUTHOR("Linus Walleij <[email protected]>"); MODULE_DESCRIPTION("Samsung WS2401 panel driver"); MODULE_LICENSE("GPL v2");
linux-master
drivers/gpu/drm/panel/panel-widechips-ws2401.c
// SPDX-License-Identifier: GPL-2.0 /* * Novatek NT39016 TFT LCD panel driver * * Copyright (C) 2017, Maarten ter Huurne <[email protected]> * Copyright (C) 2019, Paul Cercueil <[email protected]> */ #include <linux/delay.h> #include <linux/device.h> #include <linux/gpio/consumer.h> #include <linux/media-bus-format.h> #include <linux/module.h> #include <linux/of.h> #include <linux/regmap.h> #include <linux/regulator/consumer.h> #include <linux/spi/spi.h> #include <drm/drm_modes.h> #include <drm/drm_panel.h> enum nt39016_regs { NT39016_REG_SYSTEM, NT39016_REG_TIMING, NT39016_REG_OP, NT39016_REG_DATA_IN, NT39016_REG_SRC_TIMING_DELAY, NT39016_REG_GATE_TIMING_DELAY, NT39016_REG_RESERVED, NT39016_REG_INITIAL_FUNC, NT39016_REG_CONTRAST, NT39016_REG_BRIGHTNESS, NT39016_REG_HUE_SATURATION, NT39016_REG_RB_SUBCONTRAST, NT39016_REG_R_SUBBRIGHTNESS, NT39016_REG_B_SUBBRIGHTNESS, NT39016_REG_VCOMDC, NT39016_REG_VCOMAC, NT39016_REG_VGAM2, NT39016_REG_VGAM34, NT39016_REG_VGAM56, NT39016_REG_VCOMDC_TRIM = 0x1e, NT39016_REG_DISPLAY_MODE = 0x20, }; #define NT39016_SYSTEM_RESET_N BIT(0) #define NT39016_SYSTEM_STANDBY BIT(1) struct nt39016_panel_info { const struct drm_display_mode *display_modes; unsigned int num_modes; u16 width_mm, height_mm; u32 bus_format, bus_flags; }; struct nt39016 { struct drm_panel drm_panel; struct regmap *map; struct regulator *supply; const struct nt39016_panel_info *panel_info; struct gpio_desc *reset_gpio; }; static inline struct nt39016 *to_nt39016(struct drm_panel *panel) { return container_of(panel, struct nt39016, drm_panel); } #define RV(REG, VAL) { .reg = (REG), .def = (VAL), .delay_us = 2 } static const struct reg_sequence nt39016_panel_regs[] = { RV(NT39016_REG_SYSTEM, 0x00), RV(NT39016_REG_TIMING, 0x00), RV(NT39016_REG_OP, 0x03), RV(NT39016_REG_DATA_IN, 0xCC), RV(NT39016_REG_SRC_TIMING_DELAY, 0x46), RV(NT39016_REG_GATE_TIMING_DELAY, 0x05), RV(NT39016_REG_RESERVED, 0x00), RV(NT39016_REG_INITIAL_FUNC, 0x00), RV(NT39016_REG_CONTRAST, 0x08), RV(NT39016_REG_BRIGHTNESS, 0x40), RV(NT39016_REG_HUE_SATURATION, 0x88), RV(NT39016_REG_RB_SUBCONTRAST, 0x88), RV(NT39016_REG_R_SUBBRIGHTNESS, 0x20), RV(NT39016_REG_B_SUBBRIGHTNESS, 0x20), RV(NT39016_REG_VCOMDC, 0x67), RV(NT39016_REG_VCOMAC, 0xA4), RV(NT39016_REG_VGAM2, 0x04), RV(NT39016_REG_VGAM34, 0x24), RV(NT39016_REG_VGAM56, 0x24), RV(NT39016_REG_DISPLAY_MODE, 0x00), }; #undef RV static const struct regmap_range nt39016_regmap_no_ranges[] = { regmap_reg_range(0x13, 0x1D), regmap_reg_range(0x1F, 0x1F), }; static const struct regmap_access_table nt39016_regmap_access_table = { .no_ranges = nt39016_regmap_no_ranges, .n_no_ranges = ARRAY_SIZE(nt39016_regmap_no_ranges), }; static const struct regmap_config nt39016_regmap_config = { .reg_bits = 6, .pad_bits = 2, .val_bits = 8, .max_register = NT39016_REG_DISPLAY_MODE, .wr_table = &nt39016_regmap_access_table, .write_flag_mask = 0x02, .cache_type = REGCACHE_FLAT, }; static int nt39016_prepare(struct drm_panel *drm_panel) { struct nt39016 *panel = to_nt39016(drm_panel); int err; err = regulator_enable(panel->supply); if (err) { dev_err(drm_panel->dev, "Failed to enable power supply: %d\n", err); return err; } /* * Reset the NT39016. * The documentation says the reset pulse should be at least 40 us to * pass the glitch filter, but when testing I see some resets fail and * some succeed when using a 70 us delay, so we use 100 us instead. */ gpiod_set_value_cansleep(panel->reset_gpio, 1); usleep_range(100, 1000); gpiod_set_value_cansleep(panel->reset_gpio, 0); udelay(2); /* Init all registers. */ err = regmap_multi_reg_write(panel->map, nt39016_panel_regs, ARRAY_SIZE(nt39016_panel_regs)); if (err) { dev_err(drm_panel->dev, "Failed to init registers: %d\n", err); goto err_disable_regulator; } return 0; err_disable_regulator: regulator_disable(panel->supply); return err; } static int nt39016_unprepare(struct drm_panel *drm_panel) { struct nt39016 *panel = to_nt39016(drm_panel); gpiod_set_value_cansleep(panel->reset_gpio, 1); regulator_disable(panel->supply); return 0; } static int nt39016_enable(struct drm_panel *drm_panel) { struct nt39016 *panel = to_nt39016(drm_panel); int ret; ret = regmap_write(panel->map, NT39016_REG_SYSTEM, NT39016_SYSTEM_RESET_N | NT39016_SYSTEM_STANDBY); if (ret) { dev_err(drm_panel->dev, "Unable to enable panel: %d\n", ret); return ret; } if (drm_panel->backlight) { /* Wait for the picture to be ready before enabling backlight */ msleep(150); } return 0; } static int nt39016_disable(struct drm_panel *drm_panel) { struct nt39016 *panel = to_nt39016(drm_panel); int err; err = regmap_write(panel->map, NT39016_REG_SYSTEM, NT39016_SYSTEM_RESET_N); if (err) { dev_err(drm_panel->dev, "Unable to disable panel: %d\n", err); return err; } return 0; } static int nt39016_get_modes(struct drm_panel *drm_panel, struct drm_connector *connector) { struct nt39016 *panel = to_nt39016(drm_panel); const struct nt39016_panel_info *panel_info = panel->panel_info; struct drm_display_mode *mode; unsigned int i; for (i = 0; i < panel_info->num_modes; i++) { mode = drm_mode_duplicate(connector->dev, &panel_info->display_modes[i]); if (!mode) return -ENOMEM; drm_mode_set_name(mode); mode->type = DRM_MODE_TYPE_DRIVER; if (panel_info->num_modes == 1) mode->type |= DRM_MODE_TYPE_PREFERRED; drm_mode_probed_add(connector, mode); } connector->display_info.bpc = 8; connector->display_info.width_mm = panel_info->width_mm; connector->display_info.height_mm = panel_info->height_mm; drm_display_info_set_bus_formats(&connector->display_info, &panel_info->bus_format, 1); connector->display_info.bus_flags = panel_info->bus_flags; return panel_info->num_modes; } static const struct drm_panel_funcs nt39016_funcs = { .prepare = nt39016_prepare, .unprepare = nt39016_unprepare, .enable = nt39016_enable, .disable = nt39016_disable, .get_modes = nt39016_get_modes, }; static int nt39016_probe(struct spi_device *spi) { struct device *dev = &spi->dev; struct nt39016 *panel; int err; panel = devm_kzalloc(dev, sizeof(*panel), GFP_KERNEL); if (!panel) return -ENOMEM; spi_set_drvdata(spi, panel); panel->panel_info = of_device_get_match_data(dev); if (!panel->panel_info) return -EINVAL; panel->supply = devm_regulator_get(dev, "power"); if (IS_ERR(panel->supply)) return dev_err_probe(dev, PTR_ERR(panel->supply), "Failed to get power supply\n"); panel->reset_gpio = devm_gpiod_get(dev, "reset", GPIOD_OUT_HIGH); if (IS_ERR(panel->reset_gpio)) return dev_err_probe(dev, PTR_ERR(panel->reset_gpio), "Failed to get reset GPIO\n"); spi->bits_per_word = 8; spi->mode = SPI_MODE_3 | SPI_3WIRE; err = spi_setup(spi); if (err) { dev_err(dev, "Failed to setup SPI\n"); return err; } panel->map = devm_regmap_init_spi(spi, &nt39016_regmap_config); if (IS_ERR(panel->map)) { dev_err(dev, "Failed to init regmap\n"); return PTR_ERR(panel->map); } drm_panel_init(&panel->drm_panel, dev, &nt39016_funcs, DRM_MODE_CONNECTOR_DPI); err = drm_panel_of_backlight(&panel->drm_panel); if (err) return dev_err_probe(dev, err, "Failed to get backlight handle\n"); drm_panel_add(&panel->drm_panel); return 0; } static void nt39016_remove(struct spi_device *spi) { struct nt39016 *panel = spi_get_drvdata(spi); drm_panel_remove(&panel->drm_panel); nt39016_disable(&panel->drm_panel); nt39016_unprepare(&panel->drm_panel); } static const struct drm_display_mode kd035g6_display_modes[] = { { /* 60 Hz */ .clock = 6000, .hdisplay = 320, .hsync_start = 320 + 10, .hsync_end = 320 + 10 + 50, .htotal = 320 + 10 + 50 + 20, .vdisplay = 240, .vsync_start = 240 + 5, .vsync_end = 240 + 5 + 1, .vtotal = 240 + 5 + 1 + 4, .flags = DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC, }, { /* 50 Hz */ .clock = 5400, .hdisplay = 320, .hsync_start = 320 + 42, .hsync_end = 320 + 42 + 50, .htotal = 320 + 42 + 50 + 20, .vdisplay = 240, .vsync_start = 240 + 5, .vsync_end = 240 + 5 + 1, .vtotal = 240 + 5 + 1 + 4, .flags = DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC, }, }; static const struct nt39016_panel_info kd035g6_info = { .display_modes = kd035g6_display_modes, .num_modes = ARRAY_SIZE(kd035g6_display_modes), .width_mm = 71, .height_mm = 53, .bus_format = MEDIA_BUS_FMT_RGB888_1X24, .bus_flags = DRM_BUS_FLAG_PIXDATA_SAMPLE_POSEDGE, }; static const struct of_device_id nt39016_of_match[] = { { .compatible = "kingdisplay,kd035g6-54nt", .data = &kd035g6_info }, { /* sentinel */ } }; MODULE_DEVICE_TABLE(of, nt39016_of_match); static struct spi_driver nt39016_driver = { .driver = { .name = "nt39016", .of_match_table = nt39016_of_match, }, .probe = nt39016_probe, .remove = nt39016_remove, }; module_spi_driver(nt39016_driver); MODULE_AUTHOR("Maarten ter Huurne <[email protected]>"); MODULE_AUTHOR("Paul Cercueil <[email protected]>"); MODULE_LICENSE("GPL v2");
linux-master
drivers/gpu/drm/panel/panel-novatek-nt39016.c
// SPDX-License-Identifier: GPL-2.0-only /* * BOE BF060Y8M-AJ0 5.99" MIPI-DSI OLED Panel on SW43404 DriverIC * * Copyright (c) 2020 AngeloGioacchino Del Regno * <[email protected]> */ #include <linux/backlight.h> #include <linux/delay.h> #include <linux/gpio/consumer.h> #include <linux/module.h> #include <linux/of.h> #include <linux/regulator/consumer.h> #include <video/mipi_display.h> #include <drm/drm_mipi_dsi.h> #include <drm/drm_modes.h> #include <drm/drm_panel.h> #define DCS_ALLOW_HBM_RANGE 0x0c #define DCS_DISALLOW_HBM_RANGE 0x08 enum boe_bf060y8m_aj0_supplies { BF060Y8M_VREG_VCC, BF060Y8M_VREG_VDDIO, BF060Y8M_VREG_VCI, BF060Y8M_VREG_EL_VDD, BF060Y8M_VREG_EL_VSS, BF060Y8M_VREG_MAX }; struct boe_bf060y8m_aj0 { struct drm_panel panel; struct mipi_dsi_device *dsi; struct regulator_bulk_data vregs[BF060Y8M_VREG_MAX]; struct gpio_desc *reset_gpio; bool prepared; }; static inline struct boe_bf060y8m_aj0 *to_boe_bf060y8m_aj0(struct drm_panel *panel) { return container_of(panel, struct boe_bf060y8m_aj0, panel); } static void boe_bf060y8m_aj0_reset(struct boe_bf060y8m_aj0 *boe) { gpiod_set_value_cansleep(boe->reset_gpio, 0); usleep_range(2000, 3000); gpiod_set_value_cansleep(boe->reset_gpio, 1); usleep_range(15000, 16000); gpiod_set_value_cansleep(boe->reset_gpio, 0); usleep_range(5000, 6000); } static int boe_bf060y8m_aj0_on(struct boe_bf060y8m_aj0 *boe) { struct mipi_dsi_device *dsi = boe->dsi; struct device *dev = &dsi->dev; int ret; mipi_dsi_dcs_write_seq(dsi, 0xb0, 0xa5, 0x00); mipi_dsi_dcs_write_seq(dsi, 0xb2, 0x00, 0x4c); mipi_dsi_dcs_write_seq(dsi, MIPI_DCS_SET_3D_CONTROL, 0x10); mipi_dsi_dcs_write_seq(dsi, MIPI_DCS_WRITE_POWER_SAVE, DCS_ALLOW_HBM_RANGE); mipi_dsi_dcs_write_seq(dsi, 0xf8, 0x00, 0x08, 0x10, 0x00, 0x22, 0x00, 0x00, 0x2d); ret = mipi_dsi_dcs_exit_sleep_mode(dsi); if (ret < 0) { dev_err(dev, "Failed to exit sleep mode: %d\n", ret); return ret; } msleep(30); mipi_dsi_dcs_write_seq(dsi, 0xb0, 0xa5, 0x00); mipi_dsi_dcs_write_seq(dsi, 0xc0, 0x08, 0x48, 0x65, 0x33, 0x33, 0x33, 0x2a, 0x31, 0x39, 0x20, 0x09); mipi_dsi_dcs_write_seq(dsi, 0xc1, 0x00, 0x00, 0x00, 0x1f, 0x1f, 0x1f, 0x1f, 0x1f, 0x1f, 0x1f, 0x1f, 0x1f, 0x1f, 0x1f, 0x1f, 0x1f, 0x1f, 0x1f); mipi_dsi_dcs_write_seq(dsi, 0xe2, 0x20, 0x04, 0x10, 0x12, 0x92, 0x4f, 0x8f, 0x44, 0x84, 0x83, 0x83, 0x83, 0x5c, 0x5c, 0x5c); mipi_dsi_dcs_write_seq(dsi, 0xde, 0x01, 0x2c, 0x00, 0x77, 0x3e); msleep(30); ret = mipi_dsi_dcs_set_display_on(dsi); if (ret < 0) { dev_err(dev, "Failed to set display on: %d\n", ret); return ret; } msleep(50); return 0; } static int boe_bf060y8m_aj0_off(struct boe_bf060y8m_aj0 *boe) { struct mipi_dsi_device *dsi = boe->dsi; struct device *dev = &dsi->dev; int ret; /* OFF commands sent in HS mode */ dsi->mode_flags &= ~MIPI_DSI_MODE_LPM; ret = mipi_dsi_dcs_set_display_off(dsi); if (ret < 0) { dev_err(dev, "Failed to set display off: %d\n", ret); return ret; } msleep(20); ret = mipi_dsi_dcs_enter_sleep_mode(dsi); if (ret < 0) { dev_err(dev, "Failed to enter sleep mode: %d\n", ret); return ret; } usleep_range(1000, 2000); dsi->mode_flags |= MIPI_DSI_MODE_LPM; return 0; } static int boe_bf060y8m_aj0_prepare(struct drm_panel *panel) { struct boe_bf060y8m_aj0 *boe = to_boe_bf060y8m_aj0(panel); struct device *dev = &boe->dsi->dev; int ret; if (boe->prepared) return 0; /* * Enable EL Driving Voltage first - doing that at the beginning * or at the end of the power sequence doesn't matter, so enable * it here to avoid yet another usleep at the end. */ ret = regulator_enable(boe->vregs[BF060Y8M_VREG_EL_VDD].consumer); if (ret) return ret; ret = regulator_enable(boe->vregs[BF060Y8M_VREG_EL_VSS].consumer); if (ret) goto err_elvss; ret = regulator_enable(boe->vregs[BF060Y8M_VREG_VCC].consumer); if (ret) goto err_vcc; usleep_range(1000, 2000); ret = regulator_enable(boe->vregs[BF060Y8M_VREG_VDDIO].consumer); if (ret) goto err_vddio; usleep_range(500, 1000); ret = regulator_enable(boe->vregs[BF060Y8M_VREG_VCI].consumer); if (ret) goto err_vci; usleep_range(2000, 3000); boe_bf060y8m_aj0_reset(boe); ret = boe_bf060y8m_aj0_on(boe); if (ret < 0) { dev_err(dev, "Failed to initialize panel: %d\n", ret); gpiod_set_value_cansleep(boe->reset_gpio, 1); return ret; } boe->prepared = true; return 0; err_vci: regulator_disable(boe->vregs[BF060Y8M_VREG_VDDIO].consumer); err_vddio: regulator_disable(boe->vregs[BF060Y8M_VREG_VCC].consumer); err_vcc: regulator_disable(boe->vregs[BF060Y8M_VREG_EL_VSS].consumer); err_elvss: regulator_disable(boe->vregs[BF060Y8M_VREG_EL_VDD].consumer); return ret; } static int boe_bf060y8m_aj0_unprepare(struct drm_panel *panel) { struct boe_bf060y8m_aj0 *boe = to_boe_bf060y8m_aj0(panel); struct device *dev = &boe->dsi->dev; int ret; if (!boe->prepared) return 0; ret = boe_bf060y8m_aj0_off(boe); if (ret < 0) dev_err(dev, "Failed to un-initialize panel: %d\n", ret); gpiod_set_value_cansleep(boe->reset_gpio, 1); ret = regulator_bulk_disable(ARRAY_SIZE(boe->vregs), boe->vregs); boe->prepared = false; return 0; } static const struct drm_display_mode boe_bf060y8m_aj0_mode = { .clock = 165268, .hdisplay = 1080, .hsync_start = 1080 + 36, .hsync_end = 1080 + 36 + 24, .htotal = 1080 + 36 + 24 + 96, .vdisplay = 2160, .vsync_start = 2160 + 16, .vsync_end = 2160 + 16 + 1, .vtotal = 2160 + 16 + 1 + 15, .width_mm = 68, /* 68.04 mm */ .height_mm = 136, /* 136.08 mm */ }; static int boe_bf060y8m_aj0_get_modes(struct drm_panel *panel, struct drm_connector *connector) { struct drm_display_mode *mode; mode = drm_mode_duplicate(connector->dev, &boe_bf060y8m_aj0_mode); if (!mode) return -ENOMEM; drm_mode_set_name(mode); mode->type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED; connector->display_info.width_mm = mode->width_mm; connector->display_info.height_mm = mode->height_mm; drm_mode_probed_add(connector, mode); return 1; } static const struct drm_panel_funcs boe_bf060y8m_aj0_panel_funcs = { .prepare = boe_bf060y8m_aj0_prepare, .unprepare = boe_bf060y8m_aj0_unprepare, .get_modes = boe_bf060y8m_aj0_get_modes, }; static int boe_bf060y8m_aj0_bl_update_status(struct backlight_device *bl) { struct mipi_dsi_device *dsi = bl_get_data(bl); u16 brightness = backlight_get_brightness(bl); int ret; ret = mipi_dsi_dcs_set_display_brightness(dsi, brightness); if (ret < 0) return ret; return 0; } static int boe_bf060y8m_aj0_bl_get_brightness(struct backlight_device *bl) { struct mipi_dsi_device *dsi = bl_get_data(bl); u16 brightness; int ret; ret = mipi_dsi_dcs_get_display_brightness(dsi, &brightness); if (ret < 0) return ret; return brightness & 0xff; } static const struct backlight_ops boe_bf060y8m_aj0_bl_ops = { .update_status = boe_bf060y8m_aj0_bl_update_status, .get_brightness = boe_bf060y8m_aj0_bl_get_brightness, }; static struct backlight_device * boe_bf060y8m_aj0_create_backlight(struct mipi_dsi_device *dsi) { struct device *dev = &dsi->dev; const struct backlight_properties props = { .type = BACKLIGHT_RAW, .brightness = 127, .max_brightness = 255, .scale = BACKLIGHT_SCALE_NON_LINEAR, }; return devm_backlight_device_register(dev, dev_name(dev), dev, dsi, &boe_bf060y8m_aj0_bl_ops, &props); } static int boe_bf060y8m_aj0_init_vregs(struct boe_bf060y8m_aj0 *boe, struct device *dev) { struct regulator *vreg; int ret; boe->vregs[BF060Y8M_VREG_VCC].supply = "vcc"; boe->vregs[BF060Y8M_VREG_VDDIO].supply = "vddio"; boe->vregs[BF060Y8M_VREG_VCI].supply = "vci"; boe->vregs[BF060Y8M_VREG_EL_VDD].supply = "elvdd"; boe->vregs[BF060Y8M_VREG_EL_VSS].supply = "elvss"; ret = devm_regulator_bulk_get(dev, ARRAY_SIZE(boe->vregs), boe->vregs); if (ret < 0) { dev_err(dev, "Failed to get regulators: %d\n", ret); return ret; } vreg = boe->vregs[BF060Y8M_VREG_VCC].consumer; ret = regulator_is_supported_voltage(vreg, 2700000, 3600000); if (!ret) return ret; vreg = boe->vregs[BF060Y8M_VREG_VDDIO].consumer; ret = regulator_is_supported_voltage(vreg, 1620000, 1980000); if (!ret) return ret; vreg = boe->vregs[BF060Y8M_VREG_VCI].consumer; ret = regulator_is_supported_voltage(vreg, 2600000, 3600000); if (!ret) return ret; vreg = boe->vregs[BF060Y8M_VREG_EL_VDD].consumer; ret = regulator_is_supported_voltage(vreg, 4400000, 4800000); if (!ret) return ret; /* ELVSS is negative: -5.00V to -1.40V */ vreg = boe->vregs[BF060Y8M_VREG_EL_VSS].consumer; ret = regulator_is_supported_voltage(vreg, 1400000, 5000000); if (!ret) return ret; /* * Set min/max rated current, known only for VCI and VDDIO and, * in case of failure, just go on gracefully, as this step is not * guaranteed to succeed on all regulator HW but do a debug print * to inform the developer during debugging. * In any case, these two supplies are also optional, so they may * be fixed-regulator which, at the time of writing, does not * support fake current limiting. */ vreg = boe->vregs[BF060Y8M_VREG_VDDIO].consumer; ret = regulator_set_current_limit(vreg, 1500, 2500); if (ret) dev_dbg(dev, "Current limit cannot be set on %s: %d\n", boe->vregs[1].supply, ret); vreg = boe->vregs[BF060Y8M_VREG_VCI].consumer; ret = regulator_set_current_limit(vreg, 20000, 40000); if (ret) dev_dbg(dev, "Current limit cannot be set on %s: %d\n", boe->vregs[2].supply, ret); return 0; } static int boe_bf060y8m_aj0_probe(struct mipi_dsi_device *dsi) { struct device *dev = &dsi->dev; struct boe_bf060y8m_aj0 *boe; int ret; boe = devm_kzalloc(dev, sizeof(*boe), GFP_KERNEL); if (!boe) return -ENOMEM; ret = boe_bf060y8m_aj0_init_vregs(boe, dev); if (ret) return dev_err_probe(dev, ret, "Failed to initialize supplies.\n"); boe->reset_gpio = devm_gpiod_get_optional(dev, "reset", GPIOD_ASIS); if (IS_ERR(boe->reset_gpio)) return dev_err_probe(dev, PTR_ERR(boe->reset_gpio), "Failed to get reset-gpios\n"); boe->dsi = dsi; mipi_dsi_set_drvdata(dsi, boe); dsi->lanes = 4; dsi->format = MIPI_DSI_FMT_RGB888; dsi->mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_NO_EOT_PACKET | MIPI_DSI_MODE_VIDEO_SYNC_PULSE | MIPI_DSI_CLOCK_NON_CONTINUOUS | MIPI_DSI_MODE_LPM; drm_panel_init(&boe->panel, dev, &boe_bf060y8m_aj0_panel_funcs, DRM_MODE_CONNECTOR_DSI); boe->panel.backlight = boe_bf060y8m_aj0_create_backlight(dsi); if (IS_ERR(boe->panel.backlight)) return dev_err_probe(dev, PTR_ERR(boe->panel.backlight), "Failed to create backlight\n"); drm_panel_add(&boe->panel); ret = mipi_dsi_attach(dsi); if (ret < 0) { dev_err(dev, "Failed to attach to DSI host: %d\n", ret); return ret; } return 0; } static void boe_bf060y8m_aj0_remove(struct mipi_dsi_device *dsi) { struct boe_bf060y8m_aj0 *boe = mipi_dsi_get_drvdata(dsi); int ret; ret = mipi_dsi_detach(dsi); if (ret < 0) dev_err(&dsi->dev, "Failed to detach from DSI host: %d\n", ret); drm_panel_remove(&boe->panel); } static const struct of_device_id boe_bf060y8m_aj0_of_match[] = { { .compatible = "boe,bf060y8m-aj0" }, { /* sentinel */ } }; MODULE_DEVICE_TABLE(of, boe_bf060y8m_aj0_of_match); static struct mipi_dsi_driver boe_bf060y8m_aj0_driver = { .probe = boe_bf060y8m_aj0_probe, .remove = boe_bf060y8m_aj0_remove, .driver = { .name = "panel-sw43404-boe-fhd-amoled", .of_match_table = boe_bf060y8m_aj0_of_match, }, }; module_mipi_dsi_driver(boe_bf060y8m_aj0_driver); MODULE_AUTHOR("AngeloGioacchino Del Regno <[email protected]>"); MODULE_DESCRIPTION("BOE BF060Y8M-AJ0 MIPI-DSI OLED panel"); MODULE_LICENSE("GPL v2");
linux-master
drivers/gpu/drm/panel/panel-boe-bf060y8m-aj0.c
// SPDX-License-Identifier: GPL-2.0+ /* * Copyright (c) 2019 Radxa Limited * Copyright (c) 2022 Edgeble AI Technologies Pvt. Ltd. * * Author: * - Jagan Teki <[email protected]> * - Stephen Chen <[email protected]> */ #include <drm/drm_mipi_dsi.h> #include <drm/drm_modes.h> #include <drm/drm_panel.h> #include <drm/drm_print.h> #include <linux/gpio/consumer.h> #include <linux/delay.h> #include <linux/module.h> #include <linux/of.h> #include <linux/regulator/consumer.h> #define JD9365DA_INIT_CMD_LEN 2 struct jadard_init_cmd { u8 data[JD9365DA_INIT_CMD_LEN]; }; struct jadard_panel_desc { const struct drm_display_mode mode; unsigned int lanes; enum mipi_dsi_pixel_format format; const struct jadard_init_cmd *init_cmds; u32 num_init_cmds; }; struct jadard { struct drm_panel panel; struct mipi_dsi_device *dsi; const struct jadard_panel_desc *desc; struct regulator *vdd; struct regulator *vccio; struct gpio_desc *reset; }; static inline struct jadard *panel_to_jadard(struct drm_panel *panel) { return container_of(panel, struct jadard, panel); } static int jadard_enable(struct drm_panel *panel) { struct device *dev = panel->dev; struct jadard *jadard = panel_to_jadard(panel); const struct jadard_panel_desc *desc = jadard->desc; struct mipi_dsi_device *dsi = jadard->dsi; unsigned int i; int err; msleep(10); for (i = 0; i < desc->num_init_cmds; i++) { const struct jadard_init_cmd *cmd = &desc->init_cmds[i]; err = mipi_dsi_dcs_write_buffer(dsi, cmd->data, JD9365DA_INIT_CMD_LEN); if (err < 0) return err; } msleep(120); err = mipi_dsi_dcs_exit_sleep_mode(dsi); if (err < 0) DRM_DEV_ERROR(dev, "failed to exit sleep mode ret = %d\n", err); err = mipi_dsi_dcs_set_display_on(dsi); if (err < 0) DRM_DEV_ERROR(dev, "failed to set display on ret = %d\n", err); return 0; } static int jadard_disable(struct drm_panel *panel) { struct device *dev = panel->dev; struct jadard *jadard = panel_to_jadard(panel); int ret; ret = mipi_dsi_dcs_set_display_off(jadard->dsi); if (ret < 0) DRM_DEV_ERROR(dev, "failed to set display off: %d\n", ret); ret = mipi_dsi_dcs_enter_sleep_mode(jadard->dsi); if (ret < 0) DRM_DEV_ERROR(dev, "failed to enter sleep mode: %d\n", ret); return 0; } static int jadard_prepare(struct drm_panel *panel) { struct jadard *jadard = panel_to_jadard(panel); int ret; ret = regulator_enable(jadard->vccio); if (ret) return ret; ret = regulator_enable(jadard->vdd); if (ret) return ret; gpiod_set_value(jadard->reset, 1); msleep(5); gpiod_set_value(jadard->reset, 0); msleep(10); gpiod_set_value(jadard->reset, 1); msleep(120); return 0; } static int jadard_unprepare(struct drm_panel *panel) { struct jadard *jadard = panel_to_jadard(panel); gpiod_set_value(jadard->reset, 1); msleep(120); regulator_disable(jadard->vdd); regulator_disable(jadard->vccio); return 0; } static int jadard_get_modes(struct drm_panel *panel, struct drm_connector *connector) { struct jadard *jadard = panel_to_jadard(panel); const struct drm_display_mode *desc_mode = &jadard->desc->mode; struct drm_display_mode *mode; mode = drm_mode_duplicate(connector->dev, desc_mode); if (!mode) { DRM_DEV_ERROR(&jadard->dsi->dev, "failed to add mode %ux%ux@%u\n", desc_mode->hdisplay, desc_mode->vdisplay, drm_mode_vrefresh(desc_mode)); return -ENOMEM; } drm_mode_set_name(mode); drm_mode_probed_add(connector, mode); connector->display_info.width_mm = mode->width_mm; connector->display_info.height_mm = mode->height_mm; return 1; } static const struct drm_panel_funcs jadard_funcs = { .disable = jadard_disable, .unprepare = jadard_unprepare, .prepare = jadard_prepare, .enable = jadard_enable, .get_modes = jadard_get_modes, }; static const struct jadard_init_cmd radxa_display_8hd_ad002_init_cmds[] = { { .data = { 0xE0, 0x00 } }, { .data = { 0xE1, 0x93 } }, { .data = { 0xE2, 0x65 } }, { .data = { 0xE3, 0xF8 } }, { .data = { 0x80, 0x03 } }, { .data = { 0xE0, 0x01 } }, { .data = { 0x00, 0x00 } }, { .data = { 0x01, 0x7E } }, { .data = { 0x03, 0x00 } }, { .data = { 0x04, 0x65 } }, { .data = { 0x0C, 0x74 } }, { .data = { 0x17, 0x00 } }, { .data = { 0x18, 0xB7 } }, { .data = { 0x19, 0x00 } }, { .data = { 0x1A, 0x00 } }, { .data = { 0x1B, 0xB7 } }, { .data = { 0x1C, 0x00 } }, { .data = { 0x24, 0xFE } }, { .data = { 0x37, 0x19 } }, { .data = { 0x38, 0x05 } }, { .data = { 0x39, 0x00 } }, { .data = { 0x3A, 0x01 } }, { .data = { 0x3B, 0x01 } }, { .data = { 0x3C, 0x70 } }, { .data = { 0x3D, 0xFF } }, { .data = { 0x3E, 0xFF } }, { .data = { 0x3F, 0xFF } }, { .data = { 0x40, 0x06 } }, { .data = { 0x41, 0xA0 } }, { .data = { 0x43, 0x1E } }, { .data = { 0x44, 0x0F } }, { .data = { 0x45, 0x28 } }, { .data = { 0x4B, 0x04 } }, { .data = { 0x55, 0x02 } }, { .data = { 0x56, 0x01 } }, { .data = { 0x57, 0xA9 } }, { .data = { 0x58, 0x0A } }, { .data = { 0x59, 0x0A } }, { .data = { 0x5A, 0x37 } }, { .data = { 0x5B, 0x19 } }, { .data = { 0x5D, 0x78 } }, { .data = { 0x5E, 0x63 } }, { .data = { 0x5F, 0x54 } }, { .data = { 0x60, 0x49 } }, { .data = { 0x61, 0x45 } }, { .data = { 0x62, 0x38 } }, { .data = { 0x63, 0x3D } }, { .data = { 0x64, 0x28 } }, { .data = { 0x65, 0x43 } }, { .data = { 0x66, 0x41 } }, { .data = { 0x67, 0x43 } }, { .data = { 0x68, 0x62 } }, { .data = { 0x69, 0x50 } }, { .data = { 0x6A, 0x57 } }, { .data = { 0x6B, 0x49 } }, { .data = { 0x6C, 0x44 } }, { .data = { 0x6D, 0x37 } }, { .data = { 0x6E, 0x23 } }, { .data = { 0x6F, 0x10 } }, { .data = { 0x70, 0x78 } }, { .data = { 0x71, 0x63 } }, { .data = { 0x72, 0x54 } }, { .data = { 0x73, 0x49 } }, { .data = { 0x74, 0x45 } }, { .data = { 0x75, 0x38 } }, { .data = { 0x76, 0x3D } }, { .data = { 0x77, 0x28 } }, { .data = { 0x78, 0x43 } }, { .data = { 0x79, 0x41 } }, { .data = { 0x7A, 0x43 } }, { .data = { 0x7B, 0x62 } }, { .data = { 0x7C, 0x50 } }, { .data = { 0x7D, 0x57 } }, { .data = { 0x7E, 0x49 } }, { .data = { 0x7F, 0x44 } }, { .data = { 0x80, 0x37 } }, { .data = { 0x81, 0x23 } }, { .data = { 0x82, 0x10 } }, { .data = { 0xE0, 0x02 } }, { .data = { 0x00, 0x47 } }, { .data = { 0x01, 0x47 } }, { .data = { 0x02, 0x45 } }, { .data = { 0x03, 0x45 } }, { .data = { 0x04, 0x4B } }, { .data = { 0x05, 0x4B } }, { .data = { 0x06, 0x49 } }, { .data = { 0x07, 0x49 } }, { .data = { 0x08, 0x41 } }, { .data = { 0x09, 0x1F } }, { .data = { 0x0A, 0x1F } }, { .data = { 0x0B, 0x1F } }, { .data = { 0x0C, 0x1F } }, { .data = { 0x0D, 0x1F } }, { .data = { 0x0E, 0x1F } }, { .data = { 0x0F, 0x5F } }, { .data = { 0x10, 0x5F } }, { .data = { 0x11, 0x57 } }, { .data = { 0x12, 0x77 } }, { .data = { 0x13, 0x35 } }, { .data = { 0x14, 0x1F } }, { .data = { 0x15, 0x1F } }, { .data = { 0x16, 0x46 } }, { .data = { 0x17, 0x46 } }, { .data = { 0x18, 0x44 } }, { .data = { 0x19, 0x44 } }, { .data = { 0x1A, 0x4A } }, { .data = { 0x1B, 0x4A } }, { .data = { 0x1C, 0x48 } }, { .data = { 0x1D, 0x48 } }, { .data = { 0x1E, 0x40 } }, { .data = { 0x1F, 0x1F } }, { .data = { 0x20, 0x1F } }, { .data = { 0x21, 0x1F } }, { .data = { 0x22, 0x1F } }, { .data = { 0x23, 0x1F } }, { .data = { 0x24, 0x1F } }, { .data = { 0x25, 0x5F } }, { .data = { 0x26, 0x5F } }, { .data = { 0x27, 0x57 } }, { .data = { 0x28, 0x77 } }, { .data = { 0x29, 0x35 } }, { .data = { 0x2A, 0x1F } }, { .data = { 0x2B, 0x1F } }, { .data = { 0x58, 0x40 } }, { .data = { 0x59, 0x00 } }, { .data = { 0x5A, 0x00 } }, { .data = { 0x5B, 0x10 } }, { .data = { 0x5C, 0x06 } }, { .data = { 0x5D, 0x40 } }, { .data = { 0x5E, 0x01 } }, { .data = { 0x5F, 0x02 } }, { .data = { 0x60, 0x30 } }, { .data = { 0x61, 0x01 } }, { .data = { 0x62, 0x02 } }, { .data = { 0x63, 0x03 } }, { .data = { 0x64, 0x6B } }, { .data = { 0x65, 0x05 } }, { .data = { 0x66, 0x0C } }, { .data = { 0x67, 0x73 } }, { .data = { 0x68, 0x09 } }, { .data = { 0x69, 0x03 } }, { .data = { 0x6A, 0x56 } }, { .data = { 0x6B, 0x08 } }, { .data = { 0x6C, 0x00 } }, { .data = { 0x6D, 0x04 } }, { .data = { 0x6E, 0x04 } }, { .data = { 0x6F, 0x88 } }, { .data = { 0x70, 0x00 } }, { .data = { 0x71, 0x00 } }, { .data = { 0x72, 0x06 } }, { .data = { 0x73, 0x7B } }, { .data = { 0x74, 0x00 } }, { .data = { 0x75, 0xF8 } }, { .data = { 0x76, 0x00 } }, { .data = { 0x77, 0xD5 } }, { .data = { 0x78, 0x2E } }, { .data = { 0x79, 0x12 } }, { .data = { 0x7A, 0x03 } }, { .data = { 0x7B, 0x00 } }, { .data = { 0x7C, 0x00 } }, { .data = { 0x7D, 0x03 } }, { .data = { 0x7E, 0x7B } }, { .data = { 0xE0, 0x04 } }, { .data = { 0x00, 0x0E } }, { .data = { 0x02, 0xB3 } }, { .data = { 0x09, 0x60 } }, { .data = { 0x0E, 0x2A } }, { .data = { 0x36, 0x59 } }, { .data = { 0xE0, 0x00 } }, }; static const struct jadard_panel_desc radxa_display_8hd_ad002_desc = { .mode = { .clock = 70000, .hdisplay = 800, .hsync_start = 800 + 40, .hsync_end = 800 + 40 + 18, .htotal = 800 + 40 + 18 + 20, .vdisplay = 1280, .vsync_start = 1280 + 20, .vsync_end = 1280 + 20 + 4, .vtotal = 1280 + 20 + 4 + 20, .width_mm = 127, .height_mm = 199, .type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED, }, .lanes = 4, .format = MIPI_DSI_FMT_RGB888, .init_cmds = radxa_display_8hd_ad002_init_cmds, .num_init_cmds = ARRAY_SIZE(radxa_display_8hd_ad002_init_cmds), }; static const struct jadard_init_cmd cz101b4001_init_cmds[] = { { .data = { 0xE0, 0x00 } }, { .data = { 0xE1, 0x93 } }, { .data = { 0xE2, 0x65 } }, { .data = { 0xE3, 0xF8 } }, { .data = { 0x80, 0x03 } }, { .data = { 0xE0, 0x01 } }, { .data = { 0x00, 0x00 } }, { .data = { 0x01, 0x3B } }, { .data = { 0x0C, 0x74 } }, { .data = { 0x17, 0x00 } }, { .data = { 0x18, 0xAF } }, { .data = { 0x19, 0x00 } }, { .data = { 0x1A, 0x00 } }, { .data = { 0x1B, 0xAF } }, { .data = { 0x1C, 0x00 } }, { .data = { 0x35, 0x26 } }, { .data = { 0x37, 0x09 } }, { .data = { 0x38, 0x04 } }, { .data = { 0x39, 0x00 } }, { .data = { 0x3A, 0x01 } }, { .data = { 0x3C, 0x78 } }, { .data = { 0x3D, 0xFF } }, { .data = { 0x3E, 0xFF } }, { .data = { 0x3F, 0x7F } }, { .data = { 0x40, 0x06 } }, { .data = { 0x41, 0xA0 } }, { .data = { 0x42, 0x81 } }, { .data = { 0x43, 0x14 } }, { .data = { 0x44, 0x23 } }, { .data = { 0x45, 0x28 } }, { .data = { 0x55, 0x02 } }, { .data = { 0x57, 0x69 } }, { .data = { 0x59, 0x0A } }, { .data = { 0x5A, 0x2A } }, { .data = { 0x5B, 0x17 } }, { .data = { 0x5D, 0x7F } }, { .data = { 0x5E, 0x6B } }, { .data = { 0x5F, 0x5C } }, { .data = { 0x60, 0x4F } }, { .data = { 0x61, 0x4D } }, { .data = { 0x62, 0x3F } }, { .data = { 0x63, 0x42 } }, { .data = { 0x64, 0x2B } }, { .data = { 0x65, 0x44 } }, { .data = { 0x66, 0x43 } }, { .data = { 0x67, 0x43 } }, { .data = { 0x68, 0x63 } }, { .data = { 0x69, 0x52 } }, { .data = { 0x6A, 0x5A } }, { .data = { 0x6B, 0x4F } }, { .data = { 0x6C, 0x4E } }, { .data = { 0x6D, 0x20 } }, { .data = { 0x6E, 0x0F } }, { .data = { 0x6F, 0x00 } }, { .data = { 0x70, 0x7F } }, { .data = { 0x71, 0x6B } }, { .data = { 0x72, 0x5C } }, { .data = { 0x73, 0x4F } }, { .data = { 0x74, 0x4D } }, { .data = { 0x75, 0x3F } }, { .data = { 0x76, 0x42 } }, { .data = { 0x77, 0x2B } }, { .data = { 0x78, 0x44 } }, { .data = { 0x79, 0x43 } }, { .data = { 0x7A, 0x43 } }, { .data = { 0x7B, 0x63 } }, { .data = { 0x7C, 0x52 } }, { .data = { 0x7D, 0x5A } }, { .data = { 0x7E, 0x4F } }, { .data = { 0x7F, 0x4E } }, { .data = { 0x80, 0x20 } }, { .data = { 0x81, 0x0F } }, { .data = { 0x82, 0x00 } }, { .data = { 0xE0, 0x02 } }, { .data = { 0x00, 0x02 } }, { .data = { 0x01, 0x02 } }, { .data = { 0x02, 0x00 } }, { .data = { 0x03, 0x00 } }, { .data = { 0x04, 0x1E } }, { .data = { 0x05, 0x1E } }, { .data = { 0x06, 0x1F } }, { .data = { 0x07, 0x1F } }, { .data = { 0x08, 0x1F } }, { .data = { 0x09, 0x17 } }, { .data = { 0x0A, 0x17 } }, { .data = { 0x0B, 0x37 } }, { .data = { 0x0C, 0x37 } }, { .data = { 0x0D, 0x47 } }, { .data = { 0x0E, 0x47 } }, { .data = { 0x0F, 0x45 } }, { .data = { 0x10, 0x45 } }, { .data = { 0x11, 0x4B } }, { .data = { 0x12, 0x4B } }, { .data = { 0x13, 0x49 } }, { .data = { 0x14, 0x49 } }, { .data = { 0x15, 0x1F } }, { .data = { 0x16, 0x01 } }, { .data = { 0x17, 0x01 } }, { .data = { 0x18, 0x00 } }, { .data = { 0x19, 0x00 } }, { .data = { 0x1A, 0x1E } }, { .data = { 0x1B, 0x1E } }, { .data = { 0x1C, 0x1F } }, { .data = { 0x1D, 0x1F } }, { .data = { 0x1E, 0x1F } }, { .data = { 0x1F, 0x17 } }, { .data = { 0x20, 0x17 } }, { .data = { 0x21, 0x37 } }, { .data = { 0x22, 0x37 } }, { .data = { 0x23, 0x46 } }, { .data = { 0x24, 0x46 } }, { .data = { 0x25, 0x44 } }, { .data = { 0x26, 0x44 } }, { .data = { 0x27, 0x4A } }, { .data = { 0x28, 0x4A } }, { .data = { 0x29, 0x48 } }, { .data = { 0x2A, 0x48 } }, { .data = { 0x2B, 0x1F } }, { .data = { 0x2C, 0x01 } }, { .data = { 0x2D, 0x01 } }, { .data = { 0x2E, 0x00 } }, { .data = { 0x2F, 0x00 } }, { .data = { 0x30, 0x1F } }, { .data = { 0x31, 0x1F } }, { .data = { 0x32, 0x1E } }, { .data = { 0x33, 0x1E } }, { .data = { 0x34, 0x1F } }, { .data = { 0x35, 0x17 } }, { .data = { 0x36, 0x17 } }, { .data = { 0x37, 0x37 } }, { .data = { 0x38, 0x37 } }, { .data = { 0x39, 0x08 } }, { .data = { 0x3A, 0x08 } }, { .data = { 0x3B, 0x0A } }, { .data = { 0x3C, 0x0A } }, { .data = { 0x3D, 0x04 } }, { .data = { 0x3E, 0x04 } }, { .data = { 0x3F, 0x06 } }, { .data = { 0x40, 0x06 } }, { .data = { 0x41, 0x1F } }, { .data = { 0x42, 0x02 } }, { .data = { 0x43, 0x02 } }, { .data = { 0x44, 0x00 } }, { .data = { 0x45, 0x00 } }, { .data = { 0x46, 0x1F } }, { .data = { 0x47, 0x1F } }, { .data = { 0x48, 0x1E } }, { .data = { 0x49, 0x1E } }, { .data = { 0x4A, 0x1F } }, { .data = { 0x4B, 0x17 } }, { .data = { 0x4C, 0x17 } }, { .data = { 0x4D, 0x37 } }, { .data = { 0x4E, 0x37 } }, { .data = { 0x4F, 0x09 } }, { .data = { 0x50, 0x09 } }, { .data = { 0x51, 0x0B } }, { .data = { 0x52, 0x0B } }, { .data = { 0x53, 0x05 } }, { .data = { 0x54, 0x05 } }, { .data = { 0x55, 0x07 } }, { .data = { 0x56, 0x07 } }, { .data = { 0x57, 0x1F } }, { .data = { 0x58, 0x40 } }, { .data = { 0x5B, 0x30 } }, { .data = { 0x5C, 0x16 } }, { .data = { 0x5D, 0x34 } }, { .data = { 0x5E, 0x05 } }, { .data = { 0x5F, 0x02 } }, { .data = { 0x63, 0x00 } }, { .data = { 0x64, 0x6A } }, { .data = { 0x67, 0x73 } }, { .data = { 0x68, 0x1D } }, { .data = { 0x69, 0x08 } }, { .data = { 0x6A, 0x6A } }, { .data = { 0x6B, 0x08 } }, { .data = { 0x6C, 0x00 } }, { .data = { 0x6D, 0x00 } }, { .data = { 0x6E, 0x00 } }, { .data = { 0x6F, 0x88 } }, { .data = { 0x75, 0xFF } }, { .data = { 0x77, 0xDD } }, { .data = { 0x78, 0x3F } }, { .data = { 0x79, 0x15 } }, { .data = { 0x7A, 0x17 } }, { .data = { 0x7D, 0x14 } }, { .data = { 0x7E, 0x82 } }, { .data = { 0xE0, 0x04 } }, { .data = { 0x00, 0x0E } }, { .data = { 0x02, 0xB3 } }, { .data = { 0x09, 0x61 } }, { .data = { 0x0E, 0x48 } }, { .data = { 0xE0, 0x00 } }, { .data = { 0xE6, 0x02 } }, { .data = { 0xE7, 0x0C } }, }; static const struct jadard_panel_desc cz101b4001_desc = { .mode = { .clock = 70000, .hdisplay = 800, .hsync_start = 800 + 40, .hsync_end = 800 + 40 + 18, .htotal = 800 + 40 + 18 + 20, .vdisplay = 1280, .vsync_start = 1280 + 20, .vsync_end = 1280 + 20 + 4, .vtotal = 1280 + 20 + 4 + 20, .width_mm = 62, .height_mm = 110, .type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED, }, .lanes = 4, .format = MIPI_DSI_FMT_RGB888, .init_cmds = cz101b4001_init_cmds, .num_init_cmds = ARRAY_SIZE(cz101b4001_init_cmds), }; static int jadard_dsi_probe(struct mipi_dsi_device *dsi) { struct device *dev = &dsi->dev; const struct jadard_panel_desc *desc; struct jadard *jadard; int ret; jadard = devm_kzalloc(&dsi->dev, sizeof(*jadard), GFP_KERNEL); if (!jadard) return -ENOMEM; desc = of_device_get_match_data(dev); dsi->mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_BURST | MIPI_DSI_MODE_NO_EOT_PACKET; dsi->format = desc->format; dsi->lanes = desc->lanes; jadard->reset = devm_gpiod_get(dev, "reset", GPIOD_OUT_LOW); if (IS_ERR(jadard->reset)) { DRM_DEV_ERROR(&dsi->dev, "failed to get our reset GPIO\n"); return PTR_ERR(jadard->reset); } jadard->vdd = devm_regulator_get(dev, "vdd"); if (IS_ERR(jadard->vdd)) { DRM_DEV_ERROR(&dsi->dev, "failed to get vdd regulator\n"); return PTR_ERR(jadard->vdd); } jadard->vccio = devm_regulator_get(dev, "vccio"); if (IS_ERR(jadard->vccio)) { DRM_DEV_ERROR(&dsi->dev, "failed to get vccio regulator\n"); return PTR_ERR(jadard->vccio); } drm_panel_init(&jadard->panel, dev, &jadard_funcs, DRM_MODE_CONNECTOR_DSI); ret = drm_panel_of_backlight(&jadard->panel); if (ret) return ret; drm_panel_add(&jadard->panel); mipi_dsi_set_drvdata(dsi, jadard); jadard->dsi = dsi; jadard->desc = desc; ret = mipi_dsi_attach(dsi); if (ret < 0) drm_panel_remove(&jadard->panel); return ret; } static void jadard_dsi_remove(struct mipi_dsi_device *dsi) { struct jadard *jadard = mipi_dsi_get_drvdata(dsi); mipi_dsi_detach(dsi); drm_panel_remove(&jadard->panel); } static const struct of_device_id jadard_of_match[] = { { .compatible = "chongzhou,cz101b4001", .data = &cz101b4001_desc }, { .compatible = "radxa,display-10hd-ad001", .data = &cz101b4001_desc }, { .compatible = "radxa,display-8hd-ad002", .data = &radxa_display_8hd_ad002_desc }, { /* sentinel */ } }; MODULE_DEVICE_TABLE(of, jadard_of_match); static struct mipi_dsi_driver jadard_driver = { .probe = jadard_dsi_probe, .remove = jadard_dsi_remove, .driver = { .name = "jadard-jd9365da", .of_match_table = jadard_of_match, }, }; module_mipi_dsi_driver(jadard_driver); MODULE_AUTHOR("Jagan Teki <[email protected]>"); MODULE_AUTHOR("Stephen Chen <[email protected]>"); MODULE_DESCRIPTION("Jadard JD9365DA-H3 WXGA DSI panel"); MODULE_LICENSE("GPL");
linux-master
drivers/gpu/drm/panel/panel-jadard-jd9365da-h3.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * Copyright (c) 2017, Fuzhou Rockchip Electronics Co., Ltd */ #include <linux/delay.h> #include <linux/gpio/consumer.h> #include <linux/module.h> #include <linux/of.h> #include <linux/regulator/consumer.h> #include <video/mipi_display.h> #include <drm/drm_crtc.h> #include <drm/drm_device.h> #include <drm/drm_mipi_dsi.h> #include <drm/drm_modes.h> #include <drm/drm_panel.h> struct panel_init_cmd { size_t len; const char *data; }; #define _INIT_CMD(...) { \ .len = sizeof((char[]){__VA_ARGS__}), \ .data = (char[]){__VA_ARGS__} } struct panel_desc { const struct drm_display_mode *mode; unsigned int bpc; struct { unsigned int width; unsigned int height; } size; unsigned long flags; enum mipi_dsi_pixel_format format; const struct panel_init_cmd *init_cmds; unsigned int lanes; const char * const *supply_names; unsigned int num_supplies; unsigned int sleep_mode_delay; unsigned int power_down_delay; }; struct innolux_panel { struct drm_panel base; struct mipi_dsi_device *link; const struct panel_desc *desc; struct regulator_bulk_data *supplies; struct gpio_desc *enable_gpio; bool prepared; bool enabled; }; static inline struct innolux_panel *to_innolux_panel(struct drm_panel *panel) { return container_of(panel, struct innolux_panel, base); } static int innolux_panel_disable(struct drm_panel *panel) { struct innolux_panel *innolux = to_innolux_panel(panel); if (!innolux->enabled) return 0; innolux->enabled = false; return 0; } static int innolux_panel_unprepare(struct drm_panel *panel) { struct innolux_panel *innolux = to_innolux_panel(panel); int err; if (!innolux->prepared) return 0; err = mipi_dsi_dcs_set_display_off(innolux->link); if (err < 0) dev_err(panel->dev, "failed to set display off: %d\n", err); err = mipi_dsi_dcs_enter_sleep_mode(innolux->link); if (err < 0) { dev_err(panel->dev, "failed to enter sleep mode: %d\n", err); return err; } if (innolux->desc->sleep_mode_delay) msleep(innolux->desc->sleep_mode_delay); gpiod_set_value_cansleep(innolux->enable_gpio, 0); if (innolux->desc->power_down_delay) msleep(innolux->desc->power_down_delay); err = regulator_bulk_disable(innolux->desc->num_supplies, innolux->supplies); if (err < 0) return err; innolux->prepared = false; return 0; } static int innolux_panel_prepare(struct drm_panel *panel) { struct innolux_panel *innolux = to_innolux_panel(panel); int err; if (innolux->prepared) return 0; gpiod_set_value_cansleep(innolux->enable_gpio, 0); err = regulator_bulk_enable(innolux->desc->num_supplies, innolux->supplies); if (err < 0) return err; /* p079zca: t2 (20ms), p097pfg: t4 (15ms) */ usleep_range(20000, 21000); gpiod_set_value_cansleep(innolux->enable_gpio, 1); /* p079zca: t4, p097pfg: t5 */ usleep_range(20000, 21000); if (innolux->desc->init_cmds) { const struct panel_init_cmd *cmds = innolux->desc->init_cmds; unsigned int i; for (i = 0; cmds[i].len != 0; i++) { const struct panel_init_cmd *cmd = &cmds[i]; err = mipi_dsi_generic_write(innolux->link, cmd->data, cmd->len); if (err < 0) { dev_err(panel->dev, "failed to write command %u\n", i); goto poweroff; } /* * Included by random guessing, because without this * (or at least, some delay), the panel sometimes * didn't appear to pick up the command sequence. */ err = mipi_dsi_dcs_nop(innolux->link); if (err < 0) { dev_err(panel->dev, "failed to send DCS nop: %d\n", err); goto poweroff; } } } err = mipi_dsi_dcs_exit_sleep_mode(innolux->link); if (err < 0) { dev_err(panel->dev, "failed to exit sleep mode: %d\n", err); goto poweroff; } /* T6: 120ms - 1000ms*/ msleep(120); err = mipi_dsi_dcs_set_display_on(innolux->link); if (err < 0) { dev_err(panel->dev, "failed to set display on: %d\n", err); goto poweroff; } /* T7: 5ms */ usleep_range(5000, 6000); innolux->prepared = true; return 0; poweroff: gpiod_set_value_cansleep(innolux->enable_gpio, 0); regulator_bulk_disable(innolux->desc->num_supplies, innolux->supplies); return err; } static int innolux_panel_enable(struct drm_panel *panel) { struct innolux_panel *innolux = to_innolux_panel(panel); if (innolux->enabled) return 0; innolux->enabled = true; return 0; } static const char * const innolux_p079zca_supply_names[] = { "power", }; static const struct drm_display_mode innolux_p079zca_mode = { .clock = 56900, .hdisplay = 768, .hsync_start = 768 + 40, .hsync_end = 768 + 40 + 40, .htotal = 768 + 40 + 40 + 40, .vdisplay = 1024, .vsync_start = 1024 + 20, .vsync_end = 1024 + 20 + 4, .vtotal = 1024 + 20 + 4 + 20, }; static const struct panel_desc innolux_p079zca_panel_desc = { .mode = &innolux_p079zca_mode, .bpc = 8, .size = { .width = 120, .height = 160, }, .flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_SYNC_PULSE | MIPI_DSI_MODE_LPM, .format = MIPI_DSI_FMT_RGB888, .lanes = 4, .supply_names = innolux_p079zca_supply_names, .num_supplies = ARRAY_SIZE(innolux_p079zca_supply_names), .power_down_delay = 80, /* T8: 80ms - 1000ms */ }; static const char * const innolux_p097pfg_supply_names[] = { "avdd", "avee", }; static const struct drm_display_mode innolux_p097pfg_mode = { .clock = 229000, .hdisplay = 1536, .hsync_start = 1536 + 100, .hsync_end = 1536 + 100 + 24, .htotal = 1536 + 100 + 24 + 100, .vdisplay = 2048, .vsync_start = 2048 + 100, .vsync_end = 2048 + 100 + 2, .vtotal = 2048 + 100 + 2 + 18, }; /* * Display manufacturer failed to provide init sequencing according to * https://chromium-review.googlesource.com/c/chromiumos/third_party/coreboot/+/892065/ * so the init sequence stems from a register dump of a working panel. */ static const struct panel_init_cmd innolux_p097pfg_init_cmds[] = { /* page 0 */ _INIT_CMD(0xF0, 0x55, 0xAA, 0x52, 0x08, 0x00), _INIT_CMD(0xB1, 0xE8, 0x11), _INIT_CMD(0xB2, 0x25, 0x02), _INIT_CMD(0xB5, 0x08, 0x00), _INIT_CMD(0xBC, 0x0F, 0x00), _INIT_CMD(0xB8, 0x03, 0x06, 0x00, 0x00), _INIT_CMD(0xBD, 0x01, 0x90, 0x14, 0x14), _INIT_CMD(0x6F, 0x01), _INIT_CMD(0xC0, 0x03), _INIT_CMD(0x6F, 0x02), _INIT_CMD(0xC1, 0x0D), _INIT_CMD(0xD9, 0x01, 0x09, 0x70), _INIT_CMD(0xC5, 0x12, 0x21, 0x00), _INIT_CMD(0xBB, 0x93, 0x93), /* page 1 */ _INIT_CMD(0xF0, 0x55, 0xAA, 0x52, 0x08, 0x01), _INIT_CMD(0xB3, 0x3C, 0x3C), _INIT_CMD(0xB4, 0x0F, 0x0F), _INIT_CMD(0xB9, 0x45, 0x45), _INIT_CMD(0xBA, 0x14, 0x14), _INIT_CMD(0xCA, 0x02), _INIT_CMD(0xCE, 0x04), _INIT_CMD(0xC3, 0x9B, 0x9B), _INIT_CMD(0xD8, 0xC0, 0x03), _INIT_CMD(0xBC, 0x82, 0x01), _INIT_CMD(0xBD, 0x9E, 0x01), /* page 2 */ _INIT_CMD(0xF0, 0x55, 0xAA, 0x52, 0x08, 0x02), _INIT_CMD(0xB0, 0x82), _INIT_CMD(0xD1, 0x00, 0x00, 0x00, 0x3E, 0x00, 0x82, 0x00, 0xA5, 0x00, 0xC1, 0x00, 0xEA, 0x01, 0x0D, 0x01, 0x40), _INIT_CMD(0xD2, 0x01, 0x6A, 0x01, 0xA8, 0x01, 0xDC, 0x02, 0x29, 0x02, 0x67, 0x02, 0x68, 0x02, 0xA8, 0x02, 0xF0), _INIT_CMD(0xD3, 0x03, 0x19, 0x03, 0x49, 0x03, 0x67, 0x03, 0x8C, 0x03, 0xA6, 0x03, 0xC7, 0x03, 0xDE, 0x03, 0xEC), _INIT_CMD(0xD4, 0x03, 0xFF, 0x03, 0xFF), _INIT_CMD(0xE0, 0x00, 0x00, 0x00, 0x86, 0x00, 0xC5, 0x00, 0xE5, 0x00, 0xFF, 0x01, 0x26, 0x01, 0x45, 0x01, 0x75), _INIT_CMD(0xE1, 0x01, 0x9C, 0x01, 0xD5, 0x02, 0x05, 0x02, 0x4D, 0x02, 0x86, 0x02, 0x87, 0x02, 0xC3, 0x03, 0x03), _INIT_CMD(0xE2, 0x03, 0x2A, 0x03, 0x56, 0x03, 0x72, 0x03, 0x94, 0x03, 0xAC, 0x03, 0xCB, 0x03, 0xE0, 0x03, 0xED), _INIT_CMD(0xE3, 0x03, 0xFF, 0x03, 0xFF), /* page 3 */ _INIT_CMD(0xF0, 0x55, 0xAA, 0x52, 0x08, 0x03), _INIT_CMD(0xB0, 0x00, 0x00, 0x00, 0x00), _INIT_CMD(0xB1, 0x00, 0x00, 0x00, 0x00), _INIT_CMD(0xB2, 0x00, 0x00, 0x06, 0x04, 0x01, 0x40, 0x85), _INIT_CMD(0xB3, 0x10, 0x07, 0xFC, 0x04, 0x01, 0x40, 0x80), _INIT_CMD(0xB6, 0xF0, 0x08, 0x00, 0x04, 0x00, 0x00, 0x00, 0x01, 0x40, 0x80), _INIT_CMD(0xBA, 0xC5, 0x07, 0x00, 0x04, 0x11, 0x25, 0x8C), _INIT_CMD(0xBB, 0xC5, 0x07, 0x00, 0x03, 0x11, 0x25, 0x8C), _INIT_CMD(0xC0, 0x00, 0x3C, 0x00, 0x00, 0x00, 0x80, 0x80), _INIT_CMD(0xC1, 0x00, 0x3C, 0x00, 0x00, 0x00, 0x80, 0x80), _INIT_CMD(0xC4, 0x00, 0x00), _INIT_CMD(0xEF, 0x41), /* page 4 */ _INIT_CMD(0xF0, 0x55, 0xAA, 0x52, 0x08, 0x04), _INIT_CMD(0xEC, 0x4C), /* page 5 */ _INIT_CMD(0xF0, 0x55, 0xAA, 0x52, 0x08, 0x05), _INIT_CMD(0xB0, 0x13, 0x03, 0x03, 0x01), _INIT_CMD(0xB1, 0x30, 0x00), _INIT_CMD(0xB2, 0x02, 0x02, 0x00), _INIT_CMD(0xB3, 0x82, 0x23, 0x82, 0x9D), _INIT_CMD(0xB4, 0xC5, 0x75, 0x24, 0x57), _INIT_CMD(0xB5, 0x00, 0xD4, 0x72, 0x11, 0x11, 0xAB, 0x0A), _INIT_CMD(0xB6, 0x00, 0x00, 0xD5, 0x72, 0x24, 0x56), _INIT_CMD(0xB7, 0x5C, 0xDC, 0x5C, 0x5C), _INIT_CMD(0xB9, 0x0C, 0x00, 0x00, 0x01, 0x00), _INIT_CMD(0xC0, 0x75, 0x11, 0x11, 0x54, 0x05), _INIT_CMD(0xC6, 0x00, 0x00, 0x00, 0x00), _INIT_CMD(0xD0, 0x00, 0x48, 0x08, 0x00, 0x00), _INIT_CMD(0xD1, 0x00, 0x48, 0x09, 0x00, 0x00), /* page 6 */ _INIT_CMD(0xF0, 0x55, 0xAA, 0x52, 0x08, 0x06), _INIT_CMD(0xB0, 0x02, 0x32, 0x32, 0x08, 0x2F), _INIT_CMD(0xB1, 0x2E, 0x15, 0x14, 0x13, 0x12), _INIT_CMD(0xB2, 0x11, 0x10, 0x00, 0x3D, 0x3D), _INIT_CMD(0xB3, 0x3D, 0x3D, 0x3D, 0x3D, 0x3D), _INIT_CMD(0xB4, 0x3D, 0x32), _INIT_CMD(0xB5, 0x03, 0x32, 0x32, 0x09, 0x2F), _INIT_CMD(0xB6, 0x2E, 0x1B, 0x1A, 0x19, 0x18), _INIT_CMD(0xB7, 0x17, 0x16, 0x01, 0x3D, 0x3D), _INIT_CMD(0xB8, 0x3D, 0x3D, 0x3D, 0x3D, 0x3D), _INIT_CMD(0xB9, 0x3D, 0x32), _INIT_CMD(0xC0, 0x01, 0x32, 0x32, 0x09, 0x2F), _INIT_CMD(0xC1, 0x2E, 0x1A, 0x1B, 0x16, 0x17), _INIT_CMD(0xC2, 0x18, 0x19, 0x03, 0x3D, 0x3D), _INIT_CMD(0xC3, 0x3D, 0x3D, 0x3D, 0x3D, 0x3D), _INIT_CMD(0xC4, 0x3D, 0x32), _INIT_CMD(0xC5, 0x00, 0x32, 0x32, 0x08, 0x2F), _INIT_CMD(0xC6, 0x2E, 0x14, 0x15, 0x10, 0x11), _INIT_CMD(0xC7, 0x12, 0x13, 0x02, 0x3D, 0x3D), _INIT_CMD(0xC8, 0x3D, 0x3D, 0x3D, 0x3D, 0x3D), _INIT_CMD(0xC9, 0x3D, 0x32), {}, }; static const struct panel_desc innolux_p097pfg_panel_desc = { .mode = &innolux_p097pfg_mode, .bpc = 8, .size = { .width = 147, .height = 196, }, .flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_SYNC_PULSE | MIPI_DSI_MODE_LPM, .format = MIPI_DSI_FMT_RGB888, .init_cmds = innolux_p097pfg_init_cmds, .lanes = 4, .supply_names = innolux_p097pfg_supply_names, .num_supplies = ARRAY_SIZE(innolux_p097pfg_supply_names), .sleep_mode_delay = 100, /* T15 */ }; static int innolux_panel_get_modes(struct drm_panel *panel, struct drm_connector *connector) { struct innolux_panel *innolux = to_innolux_panel(panel); const struct drm_display_mode *m = innolux->desc->mode; struct drm_display_mode *mode; mode = drm_mode_duplicate(connector->dev, m); if (!mode) { dev_err(panel->dev, "failed to add mode %ux%u@%u\n", m->hdisplay, m->vdisplay, drm_mode_vrefresh(m)); return -ENOMEM; } drm_mode_set_name(mode); drm_mode_probed_add(connector, mode); connector->display_info.width_mm = innolux->desc->size.width; connector->display_info.height_mm = innolux->desc->size.height; connector->display_info.bpc = innolux->desc->bpc; return 1; } static const struct drm_panel_funcs innolux_panel_funcs = { .disable = innolux_panel_disable, .unprepare = innolux_panel_unprepare, .prepare = innolux_panel_prepare, .enable = innolux_panel_enable, .get_modes = innolux_panel_get_modes, }; static const struct of_device_id innolux_of_match[] = { { .compatible = "innolux,p079zca", .data = &innolux_p079zca_panel_desc }, { .compatible = "innolux,p097pfg", .data = &innolux_p097pfg_panel_desc }, { } }; MODULE_DEVICE_TABLE(of, innolux_of_match); static int innolux_panel_add(struct mipi_dsi_device *dsi, const struct panel_desc *desc) { struct innolux_panel *innolux; struct device *dev = &dsi->dev; int err, i; innolux = devm_kzalloc(dev, sizeof(*innolux), GFP_KERNEL); if (!innolux) return -ENOMEM; innolux->desc = desc; innolux->supplies = devm_kcalloc(dev, desc->num_supplies, sizeof(*innolux->supplies), GFP_KERNEL); if (!innolux->supplies) return -ENOMEM; for (i = 0; i < desc->num_supplies; i++) innolux->supplies[i].supply = desc->supply_names[i]; err = devm_regulator_bulk_get(dev, desc->num_supplies, innolux->supplies); if (err < 0) return err; innolux->enable_gpio = devm_gpiod_get_optional(dev, "enable", GPIOD_OUT_HIGH); if (IS_ERR(innolux->enable_gpio)) { err = PTR_ERR(innolux->enable_gpio); dev_dbg(dev, "failed to get enable gpio: %d\n", err); innolux->enable_gpio = NULL; } drm_panel_init(&innolux->base, dev, &innolux_panel_funcs, DRM_MODE_CONNECTOR_DSI); err = drm_panel_of_backlight(&innolux->base); if (err) return err; drm_panel_add(&innolux->base); mipi_dsi_set_drvdata(dsi, innolux); innolux->link = dsi; return 0; } static void innolux_panel_del(struct innolux_panel *innolux) { drm_panel_remove(&innolux->base); } static int innolux_panel_probe(struct mipi_dsi_device *dsi) { const struct panel_desc *desc; struct innolux_panel *innolux; int err; desc = of_device_get_match_data(&dsi->dev); dsi->mode_flags = desc->flags; dsi->format = desc->format; dsi->lanes = desc->lanes; err = innolux_panel_add(dsi, desc); if (err < 0) return err; err = mipi_dsi_attach(dsi); if (err < 0) { innolux = mipi_dsi_get_drvdata(dsi); innolux_panel_del(innolux); return err; } return 0; } static void innolux_panel_remove(struct mipi_dsi_device *dsi) { struct innolux_panel *innolux = mipi_dsi_get_drvdata(dsi); int err; err = drm_panel_unprepare(&innolux->base); if (err < 0) dev_err(&dsi->dev, "failed to unprepare panel: %d\n", err); err = drm_panel_disable(&innolux->base); if (err < 0) dev_err(&dsi->dev, "failed to disable panel: %d\n", err); err = mipi_dsi_detach(dsi); if (err < 0) dev_err(&dsi->dev, "failed to detach from DSI host: %d\n", err); innolux_panel_del(innolux); } static void innolux_panel_shutdown(struct mipi_dsi_device *dsi) { struct innolux_panel *innolux = mipi_dsi_get_drvdata(dsi); drm_panel_unprepare(&innolux->base); drm_panel_disable(&innolux->base); } static struct mipi_dsi_driver innolux_panel_driver = { .driver = { .name = "panel-innolux-p079zca", .of_match_table = innolux_of_match, }, .probe = innolux_panel_probe, .remove = innolux_panel_remove, .shutdown = innolux_panel_shutdown, }; module_mipi_dsi_driver(innolux_panel_driver); MODULE_AUTHOR("Chris Zhong <[email protected]>"); MODULE_AUTHOR("Lin Huang <[email protected]>"); MODULE_DESCRIPTION("Innolux P079ZCA panel driver"); MODULE_LICENSE("GPL v2");
linux-master
drivers/gpu/drm/panel/panel-innolux-p079zca.c
// SPDX-License-Identifier: GPL-2.0 /* * Copyright (C) 2020 BayLibre, SAS * Author: Neil Armstrong <[email protected]> */ #include <linux/delay.h> #include <linux/gpio/consumer.h> #include <linux/module.h> #include <linux/of.h> #include <linux/regulator/consumer.h> #include <video/mipi_display.h> #include <drm/drm_crtc.h> #include <drm/drm_device.h> #include <drm/drm_mipi_dsi.h> #include <drm/drm_modes.h> #include <drm/drm_panel.h> struct tdo_tl070wsh30_panel { struct drm_panel base; struct mipi_dsi_device *link; struct regulator *supply; struct gpio_desc *reset_gpio; bool prepared; }; static inline struct tdo_tl070wsh30_panel *to_tdo_tl070wsh30_panel(struct drm_panel *panel) { return container_of(panel, struct tdo_tl070wsh30_panel, base); } static int tdo_tl070wsh30_panel_prepare(struct drm_panel *panel) { struct tdo_tl070wsh30_panel *tdo_tl070wsh30 = to_tdo_tl070wsh30_panel(panel); int err; if (tdo_tl070wsh30->prepared) return 0; err = regulator_enable(tdo_tl070wsh30->supply); if (err < 0) return err; usleep_range(10000, 11000); gpiod_set_value_cansleep(tdo_tl070wsh30->reset_gpio, 1); usleep_range(10000, 11000); gpiod_set_value_cansleep(tdo_tl070wsh30->reset_gpio, 0); msleep(200); err = mipi_dsi_dcs_exit_sleep_mode(tdo_tl070wsh30->link); if (err < 0) { dev_err(panel->dev, "failed to exit sleep mode: %d\n", err); regulator_disable(tdo_tl070wsh30->supply); return err; } msleep(200); err = mipi_dsi_dcs_set_display_on(tdo_tl070wsh30->link); if (err < 0) { dev_err(panel->dev, "failed to set display on: %d\n", err); regulator_disable(tdo_tl070wsh30->supply); return err; } msleep(20); tdo_tl070wsh30->prepared = true; return 0; } static int tdo_tl070wsh30_panel_unprepare(struct drm_panel *panel) { struct tdo_tl070wsh30_panel *tdo_tl070wsh30 = to_tdo_tl070wsh30_panel(panel); int err; if (!tdo_tl070wsh30->prepared) return 0; err = mipi_dsi_dcs_set_display_off(tdo_tl070wsh30->link); if (err < 0) dev_err(panel->dev, "failed to set display off: %d\n", err); usleep_range(10000, 11000); err = mipi_dsi_dcs_enter_sleep_mode(tdo_tl070wsh30->link); if (err < 0) { dev_err(panel->dev, "failed to enter sleep mode: %d\n", err); return err; } usleep_range(10000, 11000); regulator_disable(tdo_tl070wsh30->supply); tdo_tl070wsh30->prepared = false; return 0; } static const struct drm_display_mode default_mode = { .clock = 47250, .hdisplay = 1024, .hsync_start = 1024 + 46, .hsync_end = 1024 + 46 + 80, .htotal = 1024 + 46 + 80 + 100, .vdisplay = 600, .vsync_start = 600 + 5, .vsync_end = 600 + 5 + 5, .vtotal = 600 + 5 + 5 + 20, .flags = DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC, }; static int tdo_tl070wsh30_panel_get_modes(struct drm_panel *panel, struct drm_connector *connector) { struct drm_display_mode *mode; mode = drm_mode_duplicate(connector->dev, &default_mode); if (!mode) { dev_err(panel->dev, "failed to add mode %ux%u@%u\n", default_mode.hdisplay, default_mode.vdisplay, drm_mode_vrefresh(&default_mode)); return -ENOMEM; } drm_mode_set_name(mode); drm_mode_probed_add(connector, mode); connector->display_info.width_mm = 154; connector->display_info.height_mm = 85; connector->display_info.bpc = 8; return 1; } static const struct drm_panel_funcs tdo_tl070wsh30_panel_funcs = { .unprepare = tdo_tl070wsh30_panel_unprepare, .prepare = tdo_tl070wsh30_panel_prepare, .get_modes = tdo_tl070wsh30_panel_get_modes, }; static const struct of_device_id tdo_tl070wsh30_of_match[] = { { .compatible = "tdo,tl070wsh30", }, { /* sentinel */ } }; MODULE_DEVICE_TABLE(of, tdo_tl070wsh30_of_match); static int tdo_tl070wsh30_panel_add(struct tdo_tl070wsh30_panel *tdo_tl070wsh30) { struct device *dev = &tdo_tl070wsh30->link->dev; int err; tdo_tl070wsh30->supply = devm_regulator_get(dev, "power"); if (IS_ERR(tdo_tl070wsh30->supply)) return PTR_ERR(tdo_tl070wsh30->supply); tdo_tl070wsh30->reset_gpio = devm_gpiod_get(dev, "reset", GPIOD_OUT_LOW); if (IS_ERR(tdo_tl070wsh30->reset_gpio)) { err = PTR_ERR(tdo_tl070wsh30->reset_gpio); dev_dbg(dev, "failed to get reset gpio: %d\n", err); return err; } drm_panel_init(&tdo_tl070wsh30->base, &tdo_tl070wsh30->link->dev, &tdo_tl070wsh30_panel_funcs, DRM_MODE_CONNECTOR_DSI); err = drm_panel_of_backlight(&tdo_tl070wsh30->base); if (err) return err; drm_panel_add(&tdo_tl070wsh30->base); return 0; } static int tdo_tl070wsh30_panel_probe(struct mipi_dsi_device *dsi) { struct tdo_tl070wsh30_panel *tdo_tl070wsh30; int err; dsi->lanes = 4; dsi->format = MIPI_DSI_FMT_RGB888; dsi->mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_BURST | MIPI_DSI_MODE_LPM; tdo_tl070wsh30 = devm_kzalloc(&dsi->dev, sizeof(*tdo_tl070wsh30), GFP_KERNEL); if (!tdo_tl070wsh30) return -ENOMEM; mipi_dsi_set_drvdata(dsi, tdo_tl070wsh30); tdo_tl070wsh30->link = dsi; err = tdo_tl070wsh30_panel_add(tdo_tl070wsh30); if (err < 0) return err; return mipi_dsi_attach(dsi); } static void tdo_tl070wsh30_panel_remove(struct mipi_dsi_device *dsi) { struct tdo_tl070wsh30_panel *tdo_tl070wsh30 = mipi_dsi_get_drvdata(dsi); int err; err = mipi_dsi_detach(dsi); if (err < 0) dev_err(&dsi->dev, "failed to detach from DSI host: %d\n", err); drm_panel_remove(&tdo_tl070wsh30->base); drm_panel_disable(&tdo_tl070wsh30->base); drm_panel_unprepare(&tdo_tl070wsh30->base); } static void tdo_tl070wsh30_panel_shutdown(struct mipi_dsi_device *dsi) { struct tdo_tl070wsh30_panel *tdo_tl070wsh30 = mipi_dsi_get_drvdata(dsi); drm_panel_disable(&tdo_tl070wsh30->base); drm_panel_unprepare(&tdo_tl070wsh30->base); } static struct mipi_dsi_driver tdo_tl070wsh30_panel_driver = { .driver = { .name = "panel-tdo-tl070wsh30", .of_match_table = tdo_tl070wsh30_of_match, }, .probe = tdo_tl070wsh30_panel_probe, .remove = tdo_tl070wsh30_panel_remove, .shutdown = tdo_tl070wsh30_panel_shutdown, }; module_mipi_dsi_driver(tdo_tl070wsh30_panel_driver); MODULE_AUTHOR("Neil Armstrong <[email protected]>"); MODULE_DESCRIPTION("TDO TL070WSH30 panel driver"); MODULE_LICENSE("GPL v2");
linux-master
drivers/gpu/drm/panel/panel-tdo-tl070wsh30.c