python_code
stringlengths 0
1.8M
| repo_name
stringclasses 7
values | file_path
stringlengths 5
99
|
---|---|---|
// SPDX-License-Identifier: GPL-2.0 OR MIT
/*
* Copyright 2018-2022 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
*/
#include "kfd_device_queue_manager.h"
#include "navi10_enum.h"
#include "gc/gc_10_1_0_offset.h"
#include "gc/gc_10_1_0_sh_mask.h"
static int update_qpd_v10(struct device_queue_manager *dqm,
struct qcm_process_device *qpd);
static void init_sdma_vm_v10(struct device_queue_manager *dqm, struct queue *q,
struct qcm_process_device *qpd);
void device_queue_manager_init_v10(
struct device_queue_manager_asic_ops *asic_ops)
{
asic_ops->update_qpd = update_qpd_v10;
asic_ops->init_sdma_vm = init_sdma_vm_v10;
asic_ops->mqd_manager_init = mqd_manager_init_v10;
}
static uint32_t compute_sh_mem_bases_64bit(struct kfd_process_device *pdd)
{
uint32_t shared_base = pdd->lds_base >> 48;
uint32_t private_base = pdd->scratch_base >> 48;
return (shared_base << SH_MEM_BASES__SHARED_BASE__SHIFT) |
private_base;
}
static int update_qpd_v10(struct device_queue_manager *dqm,
struct qcm_process_device *qpd)
{
struct kfd_process_device *pdd;
pdd = qpd_to_pdd(qpd);
/* check if sh_mem_config register already configured */
if (qpd->sh_mem_config == 0) {
qpd->sh_mem_config =
(SH_MEM_ALIGNMENT_MODE_UNALIGNED <<
SH_MEM_CONFIG__ALIGNMENT_MODE__SHIFT) |
(3 << SH_MEM_CONFIG__INITIAL_INST_PREFETCH__SHIFT);
qpd->sh_mem_ape1_limit = 0;
qpd->sh_mem_ape1_base = 0;
}
qpd->sh_mem_bases = compute_sh_mem_bases_64bit(pdd);
pr_debug("sh_mem_bases 0x%X\n", qpd->sh_mem_bases);
return 0;
}
static void init_sdma_vm_v10(struct device_queue_manager *dqm, struct queue *q,
struct qcm_process_device *qpd)
{
/* Not needed on SDMAv4 onwards any more */
q->properties.sdma_vm_addr = 0;
}
| linux-master | drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_v10.c |
// SPDX-License-Identifier: GPL-2.0 OR MIT
/*
* Copyright 2014-2022 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
*/
#include <linux/types.h>
#include <linux/mutex.h>
#include <linux/slab.h>
#include <linux/printk.h>
#include <linux/sched.h>
#include "kfd_kernel_queue.h"
#include "kfd_priv.h"
#include "kfd_device_queue_manager.h"
#include "kfd_pm4_headers.h"
#include "kfd_pm4_opcodes.h"
#define PM4_COUNT_ZERO (((1 << 15) - 1) << 16)
/* Initialize a kernel queue, including allocations of GART memory
* needed for the queue.
*/
static bool kq_initialize(struct kernel_queue *kq, struct kfd_node *dev,
enum kfd_queue_type type, unsigned int queue_size)
{
struct queue_properties prop;
int retval;
union PM4_MES_TYPE_3_HEADER nop;
if (WARN_ON(type != KFD_QUEUE_TYPE_DIQ && type != KFD_QUEUE_TYPE_HIQ))
return false;
pr_debug("Initializing queue type %d size %d\n", KFD_QUEUE_TYPE_HIQ,
queue_size);
memset(&prop, 0, sizeof(prop));
memset(&nop, 0, sizeof(nop));
nop.opcode = IT_NOP;
nop.type = PM4_TYPE_3;
nop.u32all |= PM4_COUNT_ZERO;
kq->dev = dev;
kq->nop_packet = nop.u32all;
switch (type) {
case KFD_QUEUE_TYPE_DIQ:
kq->mqd_mgr = dev->dqm->mqd_mgrs[KFD_MQD_TYPE_DIQ];
break;
case KFD_QUEUE_TYPE_HIQ:
kq->mqd_mgr = dev->dqm->mqd_mgrs[KFD_MQD_TYPE_HIQ];
break;
default:
pr_err("Invalid queue type %d\n", type);
return false;
}
if (!kq->mqd_mgr)
return false;
prop.doorbell_ptr = kfd_get_kernel_doorbell(dev->kfd, &prop.doorbell_off);
if (!prop.doorbell_ptr) {
pr_err("Failed to initialize doorbell");
goto err_get_kernel_doorbell;
}
retval = kfd_gtt_sa_allocate(dev, queue_size, &kq->pq);
if (retval != 0) {
pr_err("Failed to init pq queues size %d\n", queue_size);
goto err_pq_allocate_vidmem;
}
kq->pq_kernel_addr = kq->pq->cpu_ptr;
kq->pq_gpu_addr = kq->pq->gpu_addr;
/* For CIK family asics, kq->eop_mem is not needed */
if (dev->adev->asic_type > CHIP_MULLINS) {
retval = kfd_gtt_sa_allocate(dev, PAGE_SIZE, &kq->eop_mem);
if (retval != 0)
goto err_eop_allocate_vidmem;
kq->eop_gpu_addr = kq->eop_mem->gpu_addr;
kq->eop_kernel_addr = kq->eop_mem->cpu_ptr;
memset(kq->eop_kernel_addr, 0, PAGE_SIZE);
}
retval = kfd_gtt_sa_allocate(dev, sizeof(*kq->rptr_kernel),
&kq->rptr_mem);
if (retval != 0)
goto err_rptr_allocate_vidmem;
kq->rptr_kernel = kq->rptr_mem->cpu_ptr;
kq->rptr_gpu_addr = kq->rptr_mem->gpu_addr;
retval = kfd_gtt_sa_allocate(dev, dev->kfd->device_info.doorbell_size,
&kq->wptr_mem);
if (retval != 0)
goto err_wptr_allocate_vidmem;
kq->wptr_kernel = kq->wptr_mem->cpu_ptr;
kq->wptr_gpu_addr = kq->wptr_mem->gpu_addr;
memset(kq->pq_kernel_addr, 0, queue_size);
memset(kq->rptr_kernel, 0, sizeof(*kq->rptr_kernel));
memset(kq->wptr_kernel, 0, sizeof(*kq->wptr_kernel));
prop.queue_size = queue_size;
prop.is_interop = false;
prop.is_gws = false;
prop.priority = 1;
prop.queue_percent = 100;
prop.type = type;
prop.vmid = 0;
prop.queue_address = kq->pq_gpu_addr;
prop.read_ptr = (uint32_t *) kq->rptr_gpu_addr;
prop.write_ptr = (uint32_t *) kq->wptr_gpu_addr;
prop.eop_ring_buffer_address = kq->eop_gpu_addr;
prop.eop_ring_buffer_size = PAGE_SIZE;
if (init_queue(&kq->queue, &prop) != 0)
goto err_init_queue;
kq->queue->device = dev;
kq->queue->process = kfd_get_process(current);
kq->queue->mqd_mem_obj = kq->mqd_mgr->allocate_mqd(kq->mqd_mgr->dev,
&kq->queue->properties);
if (!kq->queue->mqd_mem_obj)
goto err_allocate_mqd;
kq->mqd_mgr->init_mqd(kq->mqd_mgr, &kq->queue->mqd,
kq->queue->mqd_mem_obj,
&kq->queue->gart_mqd_addr,
&kq->queue->properties);
/* assign HIQ to HQD */
if (type == KFD_QUEUE_TYPE_HIQ) {
pr_debug("Assigning hiq to hqd\n");
kq->queue->pipe = KFD_CIK_HIQ_PIPE;
kq->queue->queue = KFD_CIK_HIQ_QUEUE;
kq->mqd_mgr->load_mqd(kq->mqd_mgr, kq->queue->mqd,
kq->queue->pipe, kq->queue->queue,
&kq->queue->properties, NULL);
} else {
/* allocate fence for DIQ */
retval = kfd_gtt_sa_allocate(dev, sizeof(uint32_t),
&kq->fence_mem_obj);
if (retval != 0)
goto err_alloc_fence;
kq->fence_kernel_address = kq->fence_mem_obj->cpu_ptr;
kq->fence_gpu_addr = kq->fence_mem_obj->gpu_addr;
}
print_queue(kq->queue);
return true;
err_alloc_fence:
kq->mqd_mgr->free_mqd(kq->mqd_mgr, kq->queue->mqd, kq->queue->mqd_mem_obj);
err_allocate_mqd:
uninit_queue(kq->queue);
err_init_queue:
kfd_gtt_sa_free(dev, kq->wptr_mem);
err_wptr_allocate_vidmem:
kfd_gtt_sa_free(dev, kq->rptr_mem);
err_rptr_allocate_vidmem:
kfd_gtt_sa_free(dev, kq->eop_mem);
err_eop_allocate_vidmem:
kfd_gtt_sa_free(dev, kq->pq);
err_pq_allocate_vidmem:
kfd_release_kernel_doorbell(dev->kfd, prop.doorbell_ptr);
err_get_kernel_doorbell:
return false;
}
/* Uninitialize a kernel queue and free all its memory usages. */
static void kq_uninitialize(struct kernel_queue *kq, bool hanging)
{
if (kq->queue->properties.type == KFD_QUEUE_TYPE_HIQ && !hanging)
kq->mqd_mgr->destroy_mqd(kq->mqd_mgr,
kq->queue->mqd,
KFD_PREEMPT_TYPE_WAVEFRONT_RESET,
KFD_UNMAP_LATENCY_MS,
kq->queue->pipe,
kq->queue->queue);
else if (kq->queue->properties.type == KFD_QUEUE_TYPE_DIQ)
kfd_gtt_sa_free(kq->dev, kq->fence_mem_obj);
kq->mqd_mgr->free_mqd(kq->mqd_mgr, kq->queue->mqd,
kq->queue->mqd_mem_obj);
kfd_gtt_sa_free(kq->dev, kq->rptr_mem);
kfd_gtt_sa_free(kq->dev, kq->wptr_mem);
/* For CIK family asics, kq->eop_mem is Null, kfd_gtt_sa_free()
* is able to handle NULL properly.
*/
kfd_gtt_sa_free(kq->dev, kq->eop_mem);
kfd_gtt_sa_free(kq->dev, kq->pq);
kfd_release_kernel_doorbell(kq->dev->kfd,
kq->queue->properties.doorbell_ptr);
uninit_queue(kq->queue);
}
int kq_acquire_packet_buffer(struct kernel_queue *kq,
size_t packet_size_in_dwords, unsigned int **buffer_ptr)
{
size_t available_size;
size_t queue_size_dwords;
uint32_t wptr, rptr;
uint64_t wptr64;
unsigned int *queue_address;
/* When rptr == wptr, the buffer is empty.
* When rptr == wptr + 1, the buffer is full.
* It is always rptr that advances to the position of wptr, rather than
* the opposite. So we can only use up to queue_size_dwords - 1 dwords.
*/
rptr = *kq->rptr_kernel;
wptr = kq->pending_wptr;
wptr64 = kq->pending_wptr64;
queue_address = (unsigned int *)kq->pq_kernel_addr;
queue_size_dwords = kq->queue->properties.queue_size / 4;
pr_debug("rptr: %d\n", rptr);
pr_debug("wptr: %d\n", wptr);
pr_debug("queue_address 0x%p\n", queue_address);
available_size = (rptr + queue_size_dwords - 1 - wptr) %
queue_size_dwords;
if (packet_size_in_dwords > available_size) {
/*
* make sure calling functions know
* acquire_packet_buffer() failed
*/
goto err_no_space;
}
if (wptr + packet_size_in_dwords >= queue_size_dwords) {
/* make sure after rolling back to position 0, there is
* still enough space.
*/
if (packet_size_in_dwords >= rptr)
goto err_no_space;
/* fill nops, roll back and start at position 0 */
while (wptr > 0) {
queue_address[wptr] = kq->nop_packet;
wptr = (wptr + 1) % queue_size_dwords;
wptr64++;
}
}
*buffer_ptr = &queue_address[wptr];
kq->pending_wptr = wptr + packet_size_in_dwords;
kq->pending_wptr64 = wptr64 + packet_size_in_dwords;
return 0;
err_no_space:
*buffer_ptr = NULL;
return -ENOMEM;
}
void kq_submit_packet(struct kernel_queue *kq)
{
#ifdef DEBUG
int i;
for (i = *kq->wptr_kernel; i < kq->pending_wptr; i++) {
pr_debug("0x%2X ", kq->pq_kernel_addr[i]);
if (i % 15 == 0)
pr_debug("\n");
}
pr_debug("\n");
#endif
if (kq->dev->kfd->device_info.doorbell_size == 8) {
*kq->wptr64_kernel = kq->pending_wptr64;
write_kernel_doorbell64(kq->queue->properties.doorbell_ptr,
kq->pending_wptr64);
} else {
*kq->wptr_kernel = kq->pending_wptr;
write_kernel_doorbell(kq->queue->properties.doorbell_ptr,
kq->pending_wptr);
}
}
void kq_rollback_packet(struct kernel_queue *kq)
{
if (kq->dev->kfd->device_info.doorbell_size == 8) {
kq->pending_wptr64 = *kq->wptr64_kernel;
kq->pending_wptr = *kq->wptr_kernel %
(kq->queue->properties.queue_size / 4);
} else {
kq->pending_wptr = *kq->wptr_kernel;
}
}
struct kernel_queue *kernel_queue_init(struct kfd_node *dev,
enum kfd_queue_type type)
{
struct kernel_queue *kq;
kq = kzalloc(sizeof(*kq), GFP_KERNEL);
if (!kq)
return NULL;
if (kq_initialize(kq, dev, type, KFD_KERNEL_QUEUE_SIZE))
return kq;
pr_err("Failed to init kernel queue\n");
kfree(kq);
return NULL;
}
void kernel_queue_uninit(struct kernel_queue *kq, bool hanging)
{
kq_uninitialize(kq, hanging);
kfree(kq);
}
/* FIXME: Can this test be removed? */
static __attribute__((unused)) void test_kq(struct kfd_node *dev)
{
struct kernel_queue *kq;
uint32_t *buffer, i;
int retval;
pr_err("Starting kernel queue test\n");
kq = kernel_queue_init(dev, KFD_QUEUE_TYPE_HIQ);
if (unlikely(!kq)) {
pr_err(" Failed to initialize HIQ\n");
pr_err("Kernel queue test failed\n");
return;
}
retval = kq_acquire_packet_buffer(kq, 5, &buffer);
if (unlikely(retval != 0)) {
pr_err(" Failed to acquire packet buffer\n");
pr_err("Kernel queue test failed\n");
return;
}
for (i = 0; i < 5; i++)
buffer[i] = kq->nop_packet;
kq_submit_packet(kq);
pr_err("Ending kernel queue test\n");
}
| linux-master | drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c |
// SPDX-License-Identifier: GPL-2.0 OR MIT
/*
* Copyright 2014-2022 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
*/
#include "kfd_device_queue_manager.h"
#include "gca/gfx_8_0_enum.h"
#include "gca/gfx_8_0_sh_mask.h"
#include "oss/oss_3_0_sh_mask.h"
static bool set_cache_memory_policy_vi(struct device_queue_manager *dqm,
struct qcm_process_device *qpd,
enum cache_policy default_policy,
enum cache_policy alternate_policy,
void __user *alternate_aperture_base,
uint64_t alternate_aperture_size);
static int update_qpd_vi(struct device_queue_manager *dqm,
struct qcm_process_device *qpd);
static void init_sdma_vm(struct device_queue_manager *dqm,
struct queue *q,
struct qcm_process_device *qpd);
void device_queue_manager_init_vi(
struct device_queue_manager_asic_ops *asic_ops)
{
asic_ops->set_cache_memory_policy = set_cache_memory_policy_vi;
asic_ops->update_qpd = update_qpd_vi;
asic_ops->init_sdma_vm = init_sdma_vm;
asic_ops->mqd_manager_init = mqd_manager_init_vi;
}
static uint32_t compute_sh_mem_bases_64bit(unsigned int top_address_nybble)
{
/* In 64-bit mode, we can only control the top 3 bits of the LDS,
* scratch and GPUVM apertures.
* The hardware fills in the remaining 59 bits according to the
* following pattern:
* LDS: X0000000'00000000 - X0000001'00000000 (4GB)
* Scratch: X0000001'00000000 - X0000002'00000000 (4GB)
* GPUVM: Y0010000'00000000 - Y0020000'00000000 (1TB)
*
* (where X/Y is the configurable nybble with the low-bit 0)
*
* LDS and scratch will have the same top nybble programmed in the
* top 3 bits of SH_MEM_BASES.PRIVATE_BASE.
* GPUVM can have a different top nybble programmed in the
* top 3 bits of SH_MEM_BASES.SHARED_BASE.
* We don't bother to support different top nybbles
* for LDS/Scratch and GPUVM.
*/
WARN_ON((top_address_nybble & 1) || top_address_nybble > 0xE ||
top_address_nybble == 0);
return top_address_nybble << 12 |
(top_address_nybble << 12) <<
SH_MEM_BASES__SHARED_BASE__SHIFT;
}
static bool set_cache_memory_policy_vi(struct device_queue_manager *dqm,
struct qcm_process_device *qpd,
enum cache_policy default_policy,
enum cache_policy alternate_policy,
void __user *alternate_aperture_base,
uint64_t alternate_aperture_size)
{
uint32_t default_mtype;
uint32_t ape1_mtype;
default_mtype = (default_policy == cache_policy_coherent) ?
MTYPE_UC :
MTYPE_NC;
ape1_mtype = (alternate_policy == cache_policy_coherent) ?
MTYPE_UC :
MTYPE_NC;
qpd->sh_mem_config =
SH_MEM_ALIGNMENT_MODE_UNALIGNED <<
SH_MEM_CONFIG__ALIGNMENT_MODE__SHIFT |
default_mtype << SH_MEM_CONFIG__DEFAULT_MTYPE__SHIFT |
ape1_mtype << SH_MEM_CONFIG__APE1_MTYPE__SHIFT;
return true;
}
static int update_qpd_vi(struct device_queue_manager *dqm,
struct qcm_process_device *qpd)
{
struct kfd_process_device *pdd;
unsigned int temp;
pdd = qpd_to_pdd(qpd);
/* check if sh_mem_config register already configured */
if (qpd->sh_mem_config == 0) {
qpd->sh_mem_config =
SH_MEM_ALIGNMENT_MODE_UNALIGNED <<
SH_MEM_CONFIG__ALIGNMENT_MODE__SHIFT |
MTYPE_UC <<
SH_MEM_CONFIG__DEFAULT_MTYPE__SHIFT |
MTYPE_UC <<
SH_MEM_CONFIG__APE1_MTYPE__SHIFT;
qpd->sh_mem_ape1_limit = 0;
qpd->sh_mem_ape1_base = 0;
}
/* On dGPU we're always in GPUVM64 addressing mode with 64-bit
* aperture addresses.
*/
temp = get_sh_mem_bases_nybble_64(pdd);
qpd->sh_mem_bases = compute_sh_mem_bases_64bit(temp);
pr_debug("sh_mem_bases nybble: 0x%X and register 0x%X\n",
temp, qpd->sh_mem_bases);
return 0;
}
static void init_sdma_vm(struct device_queue_manager *dqm,
struct queue *q,
struct qcm_process_device *qpd)
{
/* On dGPU we're always in GPUVM64 addressing mode with 64-bit
* aperture addresses.
*/
q->properties.sdma_vm_addr =
((get_sh_mem_bases_nybble_64(qpd_to_pdd(qpd))) <<
SDMA0_RLC0_VIRTUAL_ADDR__SHARED_BASE__SHIFT) &
SDMA0_RLC0_VIRTUAL_ADDR__SHARED_BASE_MASK;
}
| linux-master | drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_vi.c |
// SPDX-License-Identifier: GPL-2.0 OR MIT
/*
* Copyright 2014-2022 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
#include <linux/mm_types.h>
#include <linux/slab.h>
#include <linux/types.h>
#include <linux/sched/signal.h>
#include <linux/sched/mm.h>
#include <linux/uaccess.h>
#include <linux/mman.h>
#include <linux/memory.h>
#include "kfd_priv.h"
#include "kfd_events.h"
#include <linux/device.h>
/*
* Wrapper around wait_queue_entry_t
*/
struct kfd_event_waiter {
wait_queue_entry_t wait;
struct kfd_event *event; /* Event to wait for */
bool activated; /* Becomes true when event is signaled */
bool event_age_enabled; /* set to true when last_event_age is non-zero */
};
/*
* Each signal event needs a 64-bit signal slot where the signaler will write
* a 1 before sending an interrupt. (This is needed because some interrupts
* do not contain enough spare data bits to identify an event.)
* We get whole pages and map them to the process VA.
* Individual signal events use their event_id as slot index.
*/
struct kfd_signal_page {
uint64_t *kernel_address;
uint64_t __user *user_address;
bool need_to_free_pages;
};
static uint64_t *page_slots(struct kfd_signal_page *page)
{
return page->kernel_address;
}
static struct kfd_signal_page *allocate_signal_page(struct kfd_process *p)
{
void *backing_store;
struct kfd_signal_page *page;
page = kzalloc(sizeof(*page), GFP_KERNEL);
if (!page)
return NULL;
backing_store = (void *) __get_free_pages(GFP_KERNEL,
get_order(KFD_SIGNAL_EVENT_LIMIT * 8));
if (!backing_store)
goto fail_alloc_signal_store;
/* Initialize all events to unsignaled */
memset(backing_store, (uint8_t) UNSIGNALED_EVENT_SLOT,
KFD_SIGNAL_EVENT_LIMIT * 8);
page->kernel_address = backing_store;
page->need_to_free_pages = true;
pr_debug("Allocated new event signal page at %p, for process %p\n",
page, p);
return page;
fail_alloc_signal_store:
kfree(page);
return NULL;
}
static int allocate_event_notification_slot(struct kfd_process *p,
struct kfd_event *ev,
const int *restore_id)
{
int id;
if (!p->signal_page) {
p->signal_page = allocate_signal_page(p);
if (!p->signal_page)
return -ENOMEM;
/* Oldest user mode expects 256 event slots */
p->signal_mapped_size = 256*8;
}
if (restore_id) {
id = idr_alloc(&p->event_idr, ev, *restore_id, *restore_id + 1,
GFP_KERNEL);
} else {
/*
* Compatibility with old user mode: Only use signal slots
* user mode has mapped, may be less than
* KFD_SIGNAL_EVENT_LIMIT. This also allows future increase
* of the event limit without breaking user mode.
*/
id = idr_alloc(&p->event_idr, ev, 0, p->signal_mapped_size / 8,
GFP_KERNEL);
}
if (id < 0)
return id;
ev->event_id = id;
page_slots(p->signal_page)[id] = UNSIGNALED_EVENT_SLOT;
return 0;
}
/*
* Assumes that p->event_mutex or rcu_readlock is held and of course that p is
* not going away.
*/
static struct kfd_event *lookup_event_by_id(struct kfd_process *p, uint32_t id)
{
return idr_find(&p->event_idr, id);
}
/**
* lookup_signaled_event_by_partial_id - Lookup signaled event from partial ID
* @p: Pointer to struct kfd_process
* @id: ID to look up
* @bits: Number of valid bits in @id
*
* Finds the first signaled event with a matching partial ID. If no
* matching signaled event is found, returns NULL. In that case the
* caller should assume that the partial ID is invalid and do an
* exhaustive search of all siglaned events.
*
* If multiple events with the same partial ID signal at the same
* time, they will be found one interrupt at a time, not necessarily
* in the same order the interrupts occurred. As long as the number of
* interrupts is correct, all signaled events will be seen by the
* driver.
*/
static struct kfd_event *lookup_signaled_event_by_partial_id(
struct kfd_process *p, uint32_t id, uint32_t bits)
{
struct kfd_event *ev;
if (!p->signal_page || id >= KFD_SIGNAL_EVENT_LIMIT)
return NULL;
/* Fast path for the common case that @id is not a partial ID
* and we only need a single lookup.
*/
if (bits > 31 || (1U << bits) >= KFD_SIGNAL_EVENT_LIMIT) {
if (page_slots(p->signal_page)[id] == UNSIGNALED_EVENT_SLOT)
return NULL;
return idr_find(&p->event_idr, id);
}
/* General case for partial IDs: Iterate over all matching IDs
* and find the first one that has signaled.
*/
for (ev = NULL; id < KFD_SIGNAL_EVENT_LIMIT && !ev; id += 1U << bits) {
if (page_slots(p->signal_page)[id] == UNSIGNALED_EVENT_SLOT)
continue;
ev = idr_find(&p->event_idr, id);
}
return ev;
}
static int create_signal_event(struct file *devkfd, struct kfd_process *p,
struct kfd_event *ev, const int *restore_id)
{
int ret;
if (p->signal_mapped_size &&
p->signal_event_count == p->signal_mapped_size / 8) {
if (!p->signal_event_limit_reached) {
pr_debug("Signal event wasn't created because limit was reached\n");
p->signal_event_limit_reached = true;
}
return -ENOSPC;
}
ret = allocate_event_notification_slot(p, ev, restore_id);
if (ret) {
pr_warn("Signal event wasn't created because out of kernel memory\n");
return ret;
}
p->signal_event_count++;
ev->user_signal_address = &p->signal_page->user_address[ev->event_id];
pr_debug("Signal event number %zu created with id %d, address %p\n",
p->signal_event_count, ev->event_id,
ev->user_signal_address);
return 0;
}
static int create_other_event(struct kfd_process *p, struct kfd_event *ev, const int *restore_id)
{
int id;
if (restore_id)
id = idr_alloc(&p->event_idr, ev, *restore_id, *restore_id + 1,
GFP_KERNEL);
else
/* Cast KFD_LAST_NONSIGNAL_EVENT to uint32_t. This allows an
* intentional integer overflow to -1 without a compiler
* warning. idr_alloc treats a negative value as "maximum
* signed integer".
*/
id = idr_alloc(&p->event_idr, ev, KFD_FIRST_NONSIGNAL_EVENT_ID,
(uint32_t)KFD_LAST_NONSIGNAL_EVENT_ID + 1,
GFP_KERNEL);
if (id < 0)
return id;
ev->event_id = id;
return 0;
}
int kfd_event_init_process(struct kfd_process *p)
{
int id;
mutex_init(&p->event_mutex);
idr_init(&p->event_idr);
p->signal_page = NULL;
p->signal_event_count = 1;
/* Allocate event ID 0. It is used for a fast path to ignore bogus events
* that are sent by the CP without a context ID
*/
id = idr_alloc(&p->event_idr, NULL, 0, 1, GFP_KERNEL);
if (id < 0) {
idr_destroy(&p->event_idr);
mutex_destroy(&p->event_mutex);
return id;
}
return 0;
}
static void destroy_event(struct kfd_process *p, struct kfd_event *ev)
{
struct kfd_event_waiter *waiter;
/* Wake up pending waiters. They will return failure */
spin_lock(&ev->lock);
list_for_each_entry(waiter, &ev->wq.head, wait.entry)
WRITE_ONCE(waiter->event, NULL);
wake_up_all(&ev->wq);
spin_unlock(&ev->lock);
if (ev->type == KFD_EVENT_TYPE_SIGNAL ||
ev->type == KFD_EVENT_TYPE_DEBUG)
p->signal_event_count--;
idr_remove(&p->event_idr, ev->event_id);
kfree_rcu(ev, rcu);
}
static void destroy_events(struct kfd_process *p)
{
struct kfd_event *ev;
uint32_t id;
idr_for_each_entry(&p->event_idr, ev, id)
if (ev)
destroy_event(p, ev);
idr_destroy(&p->event_idr);
mutex_destroy(&p->event_mutex);
}
/*
* We assume that the process is being destroyed and there is no need to
* unmap the pages or keep bookkeeping data in order.
*/
static void shutdown_signal_page(struct kfd_process *p)
{
struct kfd_signal_page *page = p->signal_page;
if (page) {
if (page->need_to_free_pages)
free_pages((unsigned long)page->kernel_address,
get_order(KFD_SIGNAL_EVENT_LIMIT * 8));
kfree(page);
}
}
void kfd_event_free_process(struct kfd_process *p)
{
destroy_events(p);
shutdown_signal_page(p);
}
static bool event_can_be_gpu_signaled(const struct kfd_event *ev)
{
return ev->type == KFD_EVENT_TYPE_SIGNAL ||
ev->type == KFD_EVENT_TYPE_DEBUG;
}
static bool event_can_be_cpu_signaled(const struct kfd_event *ev)
{
return ev->type == KFD_EVENT_TYPE_SIGNAL;
}
static int kfd_event_page_set(struct kfd_process *p, void *kernel_address,
uint64_t size, uint64_t user_handle)
{
struct kfd_signal_page *page;
if (p->signal_page)
return -EBUSY;
page = kzalloc(sizeof(*page), GFP_KERNEL);
if (!page)
return -ENOMEM;
/* Initialize all events to unsignaled */
memset(kernel_address, (uint8_t) UNSIGNALED_EVENT_SLOT,
KFD_SIGNAL_EVENT_LIMIT * 8);
page->kernel_address = kernel_address;
p->signal_page = page;
p->signal_mapped_size = size;
p->signal_handle = user_handle;
return 0;
}
int kfd_kmap_event_page(struct kfd_process *p, uint64_t event_page_offset)
{
struct kfd_node *kfd;
struct kfd_process_device *pdd;
void *mem, *kern_addr;
uint64_t size;
int err = 0;
if (p->signal_page) {
pr_err("Event page is already set\n");
return -EINVAL;
}
pdd = kfd_process_device_data_by_id(p, GET_GPU_ID(event_page_offset));
if (!pdd) {
pr_err("Getting device by id failed in %s\n", __func__);
return -EINVAL;
}
kfd = pdd->dev;
pdd = kfd_bind_process_to_device(kfd, p);
if (IS_ERR(pdd))
return PTR_ERR(pdd);
mem = kfd_process_device_translate_handle(pdd,
GET_IDR_HANDLE(event_page_offset));
if (!mem) {
pr_err("Can't find BO, offset is 0x%llx\n", event_page_offset);
return -EINVAL;
}
err = amdgpu_amdkfd_gpuvm_map_gtt_bo_to_kernel(mem, &kern_addr, &size);
if (err) {
pr_err("Failed to map event page to kernel\n");
return err;
}
err = kfd_event_page_set(p, kern_addr, size, event_page_offset);
if (err) {
pr_err("Failed to set event page\n");
amdgpu_amdkfd_gpuvm_unmap_gtt_bo_from_kernel(mem);
return err;
}
return err;
}
int kfd_event_create(struct file *devkfd, struct kfd_process *p,
uint32_t event_type, bool auto_reset, uint32_t node_id,
uint32_t *event_id, uint32_t *event_trigger_data,
uint64_t *event_page_offset, uint32_t *event_slot_index)
{
int ret = 0;
struct kfd_event *ev = kzalloc(sizeof(*ev), GFP_KERNEL);
if (!ev)
return -ENOMEM;
ev->type = event_type;
ev->auto_reset = auto_reset;
ev->signaled = false;
spin_lock_init(&ev->lock);
init_waitqueue_head(&ev->wq);
*event_page_offset = 0;
mutex_lock(&p->event_mutex);
switch (event_type) {
case KFD_EVENT_TYPE_SIGNAL:
case KFD_EVENT_TYPE_DEBUG:
ret = create_signal_event(devkfd, p, ev, NULL);
if (!ret) {
*event_page_offset = KFD_MMAP_TYPE_EVENTS;
*event_slot_index = ev->event_id;
}
break;
default:
ret = create_other_event(p, ev, NULL);
break;
}
if (!ret) {
*event_id = ev->event_id;
*event_trigger_data = ev->event_id;
ev->event_age = 1;
} else {
kfree(ev);
}
mutex_unlock(&p->event_mutex);
return ret;
}
int kfd_criu_restore_event(struct file *devkfd,
struct kfd_process *p,
uint8_t __user *user_priv_ptr,
uint64_t *priv_data_offset,
uint64_t max_priv_data_size)
{
struct kfd_criu_event_priv_data *ev_priv;
struct kfd_event *ev = NULL;
int ret = 0;
ev_priv = kmalloc(sizeof(*ev_priv), GFP_KERNEL);
if (!ev_priv)
return -ENOMEM;
ev = kzalloc(sizeof(*ev), GFP_KERNEL);
if (!ev) {
ret = -ENOMEM;
goto exit;
}
if (*priv_data_offset + sizeof(*ev_priv) > max_priv_data_size) {
ret = -EINVAL;
goto exit;
}
ret = copy_from_user(ev_priv, user_priv_ptr + *priv_data_offset, sizeof(*ev_priv));
if (ret) {
ret = -EFAULT;
goto exit;
}
*priv_data_offset += sizeof(*ev_priv);
if (ev_priv->user_handle) {
ret = kfd_kmap_event_page(p, ev_priv->user_handle);
if (ret)
goto exit;
}
ev->type = ev_priv->type;
ev->auto_reset = ev_priv->auto_reset;
ev->signaled = ev_priv->signaled;
spin_lock_init(&ev->lock);
init_waitqueue_head(&ev->wq);
mutex_lock(&p->event_mutex);
switch (ev->type) {
case KFD_EVENT_TYPE_SIGNAL:
case KFD_EVENT_TYPE_DEBUG:
ret = create_signal_event(devkfd, p, ev, &ev_priv->event_id);
break;
case KFD_EVENT_TYPE_MEMORY:
memcpy(&ev->memory_exception_data,
&ev_priv->memory_exception_data,
sizeof(struct kfd_hsa_memory_exception_data));
ret = create_other_event(p, ev, &ev_priv->event_id);
break;
case KFD_EVENT_TYPE_HW_EXCEPTION:
memcpy(&ev->hw_exception_data,
&ev_priv->hw_exception_data,
sizeof(struct kfd_hsa_hw_exception_data));
ret = create_other_event(p, ev, &ev_priv->event_id);
break;
}
mutex_unlock(&p->event_mutex);
exit:
if (ret)
kfree(ev);
kfree(ev_priv);
return ret;
}
int kfd_criu_checkpoint_events(struct kfd_process *p,
uint8_t __user *user_priv_data,
uint64_t *priv_data_offset)
{
struct kfd_criu_event_priv_data *ev_privs;
int i = 0;
int ret = 0;
struct kfd_event *ev;
uint32_t ev_id;
uint32_t num_events = kfd_get_num_events(p);
if (!num_events)
return 0;
ev_privs = kvzalloc(num_events * sizeof(*ev_privs), GFP_KERNEL);
if (!ev_privs)
return -ENOMEM;
idr_for_each_entry(&p->event_idr, ev, ev_id) {
struct kfd_criu_event_priv_data *ev_priv;
/*
* Currently, all events have same size of private_data, but the current ioctl's
* and CRIU plugin supports private_data of variable sizes
*/
ev_priv = &ev_privs[i];
ev_priv->object_type = KFD_CRIU_OBJECT_TYPE_EVENT;
/* We store the user_handle with the first event */
if (i == 0 && p->signal_page)
ev_priv->user_handle = p->signal_handle;
ev_priv->event_id = ev->event_id;
ev_priv->auto_reset = ev->auto_reset;
ev_priv->type = ev->type;
ev_priv->signaled = ev->signaled;
if (ev_priv->type == KFD_EVENT_TYPE_MEMORY)
memcpy(&ev_priv->memory_exception_data,
&ev->memory_exception_data,
sizeof(struct kfd_hsa_memory_exception_data));
else if (ev_priv->type == KFD_EVENT_TYPE_HW_EXCEPTION)
memcpy(&ev_priv->hw_exception_data,
&ev->hw_exception_data,
sizeof(struct kfd_hsa_hw_exception_data));
pr_debug("Checkpointed event[%d] id = 0x%08x auto_reset = %x type = %x signaled = %x\n",
i,
ev_priv->event_id,
ev_priv->auto_reset,
ev_priv->type,
ev_priv->signaled);
i++;
}
ret = copy_to_user(user_priv_data + *priv_data_offset,
ev_privs, num_events * sizeof(*ev_privs));
if (ret) {
pr_err("Failed to copy events priv to user\n");
ret = -EFAULT;
}
*priv_data_offset += num_events * sizeof(*ev_privs);
kvfree(ev_privs);
return ret;
}
int kfd_get_num_events(struct kfd_process *p)
{
struct kfd_event *ev;
uint32_t id;
u32 num_events = 0;
idr_for_each_entry(&p->event_idr, ev, id)
num_events++;
return num_events;
}
/* Assumes that p is current. */
int kfd_event_destroy(struct kfd_process *p, uint32_t event_id)
{
struct kfd_event *ev;
int ret = 0;
mutex_lock(&p->event_mutex);
ev = lookup_event_by_id(p, event_id);
if (ev)
destroy_event(p, ev);
else
ret = -EINVAL;
mutex_unlock(&p->event_mutex);
return ret;
}
static void set_event(struct kfd_event *ev)
{
struct kfd_event_waiter *waiter;
/* Auto reset if the list is non-empty and we're waking
* someone. waitqueue_active is safe here because we're
* protected by the ev->lock, which is also held when
* updating the wait queues in kfd_wait_on_events.
*/
ev->signaled = !ev->auto_reset || !waitqueue_active(&ev->wq);
if (!(++ev->event_age)) {
/* Never wrap back to reserved/default event age 0/1 */
ev->event_age = 2;
WARN_ONCE(1, "event_age wrap back!");
}
list_for_each_entry(waiter, &ev->wq.head, wait.entry)
WRITE_ONCE(waiter->activated, true);
wake_up_all(&ev->wq);
}
/* Assumes that p is current. */
int kfd_set_event(struct kfd_process *p, uint32_t event_id)
{
int ret = 0;
struct kfd_event *ev;
rcu_read_lock();
ev = lookup_event_by_id(p, event_id);
if (!ev) {
ret = -EINVAL;
goto unlock_rcu;
}
spin_lock(&ev->lock);
if (event_can_be_cpu_signaled(ev))
set_event(ev);
else
ret = -EINVAL;
spin_unlock(&ev->lock);
unlock_rcu:
rcu_read_unlock();
return ret;
}
static void reset_event(struct kfd_event *ev)
{
ev->signaled = false;
}
/* Assumes that p is current. */
int kfd_reset_event(struct kfd_process *p, uint32_t event_id)
{
int ret = 0;
struct kfd_event *ev;
rcu_read_lock();
ev = lookup_event_by_id(p, event_id);
if (!ev) {
ret = -EINVAL;
goto unlock_rcu;
}
spin_lock(&ev->lock);
if (event_can_be_cpu_signaled(ev))
reset_event(ev);
else
ret = -EINVAL;
spin_unlock(&ev->lock);
unlock_rcu:
rcu_read_unlock();
return ret;
}
static void acknowledge_signal(struct kfd_process *p, struct kfd_event *ev)
{
WRITE_ONCE(page_slots(p->signal_page)[ev->event_id], UNSIGNALED_EVENT_SLOT);
}
static void set_event_from_interrupt(struct kfd_process *p,
struct kfd_event *ev)
{
if (ev && event_can_be_gpu_signaled(ev)) {
acknowledge_signal(p, ev);
spin_lock(&ev->lock);
set_event(ev);
spin_unlock(&ev->lock);
}
}
void kfd_signal_event_interrupt(u32 pasid, uint32_t partial_id,
uint32_t valid_id_bits)
{
struct kfd_event *ev = NULL;
/*
* Because we are called from arbitrary context (workqueue) as opposed
* to process context, kfd_process could attempt to exit while we are
* running so the lookup function increments the process ref count.
*/
struct kfd_process *p = kfd_lookup_process_by_pasid(pasid);
if (!p)
return; /* Presumably process exited. */
rcu_read_lock();
if (valid_id_bits)
ev = lookup_signaled_event_by_partial_id(p, partial_id,
valid_id_bits);
if (ev) {
set_event_from_interrupt(p, ev);
} else if (p->signal_page) {
/*
* Partial ID lookup failed. Assume that the event ID
* in the interrupt payload was invalid and do an
* exhaustive search of signaled events.
*/
uint64_t *slots = page_slots(p->signal_page);
uint32_t id;
if (valid_id_bits)
pr_debug_ratelimited("Partial ID invalid: %u (%u valid bits)\n",
partial_id, valid_id_bits);
if (p->signal_event_count < KFD_SIGNAL_EVENT_LIMIT / 64) {
/* With relatively few events, it's faster to
* iterate over the event IDR
*/
idr_for_each_entry(&p->event_idr, ev, id) {
if (id >= KFD_SIGNAL_EVENT_LIMIT)
break;
if (READ_ONCE(slots[id]) != UNSIGNALED_EVENT_SLOT)
set_event_from_interrupt(p, ev);
}
} else {
/* With relatively many events, it's faster to
* iterate over the signal slots and lookup
* only signaled events from the IDR.
*/
for (id = 1; id < KFD_SIGNAL_EVENT_LIMIT; id++)
if (READ_ONCE(slots[id]) != UNSIGNALED_EVENT_SLOT) {
ev = lookup_event_by_id(p, id);
set_event_from_interrupt(p, ev);
}
}
}
rcu_read_unlock();
kfd_unref_process(p);
}
static struct kfd_event_waiter *alloc_event_waiters(uint32_t num_events)
{
struct kfd_event_waiter *event_waiters;
uint32_t i;
event_waiters = kcalloc(num_events, sizeof(struct kfd_event_waiter),
GFP_KERNEL);
if (!event_waiters)
return NULL;
for (i = 0; i < num_events; i++)
init_wait(&event_waiters[i].wait);
return event_waiters;
}
static int init_event_waiter(struct kfd_process *p,
struct kfd_event_waiter *waiter,
struct kfd_event_data *event_data)
{
struct kfd_event *ev = lookup_event_by_id(p, event_data->event_id);
if (!ev)
return -EINVAL;
spin_lock(&ev->lock);
waiter->event = ev;
waiter->activated = ev->signaled;
ev->signaled = ev->signaled && !ev->auto_reset;
/* last_event_age = 0 reserved for backward compatible */
if (waiter->event->type == KFD_EVENT_TYPE_SIGNAL &&
event_data->signal_event_data.last_event_age) {
waiter->event_age_enabled = true;
if (ev->event_age != event_data->signal_event_data.last_event_age)
waiter->activated = true;
}
if (!waiter->activated)
add_wait_queue(&ev->wq, &waiter->wait);
spin_unlock(&ev->lock);
return 0;
}
/* test_event_condition - Test condition of events being waited for
* @all: Return completion only if all events have signaled
* @num_events: Number of events to wait for
* @event_waiters: Array of event waiters, one per event
*
* Returns KFD_IOC_WAIT_RESULT_COMPLETE if all (or one) event(s) have
* signaled. Returns KFD_IOC_WAIT_RESULT_TIMEOUT if no (or not all)
* events have signaled. Returns KFD_IOC_WAIT_RESULT_FAIL if any of
* the events have been destroyed.
*/
static uint32_t test_event_condition(bool all, uint32_t num_events,
struct kfd_event_waiter *event_waiters)
{
uint32_t i;
uint32_t activated_count = 0;
for (i = 0; i < num_events; i++) {
if (!READ_ONCE(event_waiters[i].event))
return KFD_IOC_WAIT_RESULT_FAIL;
if (READ_ONCE(event_waiters[i].activated)) {
if (!all)
return KFD_IOC_WAIT_RESULT_COMPLETE;
activated_count++;
}
}
return activated_count == num_events ?
KFD_IOC_WAIT_RESULT_COMPLETE : KFD_IOC_WAIT_RESULT_TIMEOUT;
}
/*
* Copy event specific data, if defined.
* Currently only memory exception events have additional data to copy to user
*/
static int copy_signaled_event_data(uint32_t num_events,
struct kfd_event_waiter *event_waiters,
struct kfd_event_data __user *data)
{
void *src;
void __user *dst;
struct kfd_event_waiter *waiter;
struct kfd_event *event;
uint32_t i, size = 0;
for (i = 0; i < num_events; i++) {
waiter = &event_waiters[i];
event = waiter->event;
if (!event)
return -EINVAL; /* event was destroyed */
if (waiter->activated) {
if (event->type == KFD_EVENT_TYPE_MEMORY) {
dst = &data[i].memory_exception_data;
src = &event->memory_exception_data;
size = sizeof(struct kfd_hsa_memory_exception_data);
} else if (event->type == KFD_EVENT_TYPE_SIGNAL &&
waiter->event_age_enabled) {
dst = &data[i].signal_event_data.last_event_age;
src = &event->event_age;
size = sizeof(u64);
}
if (size && copy_to_user(dst, src, size))
return -EFAULT;
}
}
return 0;
}
static long user_timeout_to_jiffies(uint32_t user_timeout_ms)
{
if (user_timeout_ms == KFD_EVENT_TIMEOUT_IMMEDIATE)
return 0;
if (user_timeout_ms == KFD_EVENT_TIMEOUT_INFINITE)
return MAX_SCHEDULE_TIMEOUT;
/*
* msecs_to_jiffies interprets all values above 2^31-1 as infinite,
* but we consider them finite.
* This hack is wrong, but nobody is likely to notice.
*/
user_timeout_ms = min_t(uint32_t, user_timeout_ms, 0x7FFFFFFF);
return msecs_to_jiffies(user_timeout_ms) + 1;
}
static void free_waiters(uint32_t num_events, struct kfd_event_waiter *waiters,
bool undo_auto_reset)
{
uint32_t i;
for (i = 0; i < num_events; i++)
if (waiters[i].event) {
spin_lock(&waiters[i].event->lock);
remove_wait_queue(&waiters[i].event->wq,
&waiters[i].wait);
if (undo_auto_reset && waiters[i].activated &&
waiters[i].event && waiters[i].event->auto_reset)
set_event(waiters[i].event);
spin_unlock(&waiters[i].event->lock);
}
kfree(waiters);
}
int kfd_wait_on_events(struct kfd_process *p,
uint32_t num_events, void __user *data,
bool all, uint32_t *user_timeout_ms,
uint32_t *wait_result)
{
struct kfd_event_data __user *events =
(struct kfd_event_data __user *) data;
uint32_t i;
int ret = 0;
struct kfd_event_waiter *event_waiters = NULL;
long timeout = user_timeout_to_jiffies(*user_timeout_ms);
event_waiters = alloc_event_waiters(num_events);
if (!event_waiters) {
ret = -ENOMEM;
goto out;
}
/* Use p->event_mutex here to protect against concurrent creation and
* destruction of events while we initialize event_waiters.
*/
mutex_lock(&p->event_mutex);
for (i = 0; i < num_events; i++) {
struct kfd_event_data event_data;
if (copy_from_user(&event_data, &events[i],
sizeof(struct kfd_event_data))) {
ret = -EFAULT;
goto out_unlock;
}
ret = init_event_waiter(p, &event_waiters[i], &event_data);
if (ret)
goto out_unlock;
}
/* Check condition once. */
*wait_result = test_event_condition(all, num_events, event_waiters);
if (*wait_result == KFD_IOC_WAIT_RESULT_COMPLETE) {
ret = copy_signaled_event_data(num_events,
event_waiters, events);
goto out_unlock;
} else if (WARN_ON(*wait_result == KFD_IOC_WAIT_RESULT_FAIL)) {
/* This should not happen. Events shouldn't be
* destroyed while we're holding the event_mutex
*/
goto out_unlock;
}
mutex_unlock(&p->event_mutex);
while (true) {
if (fatal_signal_pending(current)) {
ret = -EINTR;
break;
}
if (signal_pending(current)) {
ret = -ERESTARTSYS;
if (*user_timeout_ms != KFD_EVENT_TIMEOUT_IMMEDIATE &&
*user_timeout_ms != KFD_EVENT_TIMEOUT_INFINITE)
*user_timeout_ms = jiffies_to_msecs(
max(0l, timeout-1));
break;
}
/* Set task state to interruptible sleep before
* checking wake-up conditions. A concurrent wake-up
* will put the task back into runnable state. In that
* case schedule_timeout will not put the task to
* sleep and we'll get a chance to re-check the
* updated conditions almost immediately. Otherwise,
* this race condition would lead to a soft hang or a
* very long sleep.
*/
set_current_state(TASK_INTERRUPTIBLE);
*wait_result = test_event_condition(all, num_events,
event_waiters);
if (*wait_result != KFD_IOC_WAIT_RESULT_TIMEOUT)
break;
if (timeout <= 0)
break;
timeout = schedule_timeout(timeout);
}
__set_current_state(TASK_RUNNING);
mutex_lock(&p->event_mutex);
/* copy_signaled_event_data may sleep. So this has to happen
* after the task state is set back to RUNNING.
*
* The event may also have been destroyed after signaling. So
* copy_signaled_event_data also must confirm that the event
* still exists. Therefore this must be under the p->event_mutex
* which is also held when events are destroyed.
*/
if (!ret && *wait_result == KFD_IOC_WAIT_RESULT_COMPLETE)
ret = copy_signaled_event_data(num_events,
event_waiters, events);
out_unlock:
free_waiters(num_events, event_waiters, ret == -ERESTARTSYS);
mutex_unlock(&p->event_mutex);
out:
if (ret)
*wait_result = KFD_IOC_WAIT_RESULT_FAIL;
else if (*wait_result == KFD_IOC_WAIT_RESULT_FAIL)
ret = -EIO;
return ret;
}
int kfd_event_mmap(struct kfd_process *p, struct vm_area_struct *vma)
{
unsigned long pfn;
struct kfd_signal_page *page;
int ret;
/* check required size doesn't exceed the allocated size */
if (get_order(KFD_SIGNAL_EVENT_LIMIT * 8) <
get_order(vma->vm_end - vma->vm_start)) {
pr_err("Event page mmap requested illegal size\n");
return -EINVAL;
}
page = p->signal_page;
if (!page) {
/* Probably KFD bug, but mmap is user-accessible. */
pr_debug("Signal page could not be found\n");
return -EINVAL;
}
pfn = __pa(page->kernel_address);
pfn >>= PAGE_SHIFT;
vm_flags_set(vma, VM_IO | VM_DONTCOPY | VM_DONTEXPAND | VM_NORESERVE
| VM_DONTDUMP | VM_PFNMAP);
pr_debug("Mapping signal page\n");
pr_debug(" start user address == 0x%08lx\n", vma->vm_start);
pr_debug(" end user address == 0x%08lx\n", vma->vm_end);
pr_debug(" pfn == 0x%016lX\n", pfn);
pr_debug(" vm_flags == 0x%08lX\n", vma->vm_flags);
pr_debug(" size == 0x%08lX\n",
vma->vm_end - vma->vm_start);
page->user_address = (uint64_t __user *)vma->vm_start;
/* mapping the page to user process */
ret = remap_pfn_range(vma, vma->vm_start, pfn,
vma->vm_end - vma->vm_start, vma->vm_page_prot);
if (!ret)
p->signal_mapped_size = vma->vm_end - vma->vm_start;
return ret;
}
/*
* Assumes that p is not going away.
*/
static void lookup_events_by_type_and_signal(struct kfd_process *p,
int type, void *event_data)
{
struct kfd_hsa_memory_exception_data *ev_data;
struct kfd_event *ev;
uint32_t id;
bool send_signal = true;
ev_data = (struct kfd_hsa_memory_exception_data *) event_data;
rcu_read_lock();
id = KFD_FIRST_NONSIGNAL_EVENT_ID;
idr_for_each_entry_continue(&p->event_idr, ev, id)
if (ev->type == type) {
send_signal = false;
dev_dbg(kfd_device,
"Event found: id %X type %d",
ev->event_id, ev->type);
spin_lock(&ev->lock);
set_event(ev);
if (ev->type == KFD_EVENT_TYPE_MEMORY && ev_data)
ev->memory_exception_data = *ev_data;
spin_unlock(&ev->lock);
}
if (type == KFD_EVENT_TYPE_MEMORY) {
dev_warn(kfd_device,
"Sending SIGSEGV to process %d (pasid 0x%x)",
p->lead_thread->pid, p->pasid);
send_sig(SIGSEGV, p->lead_thread, 0);
}
/* Send SIGTERM no event of type "type" has been found*/
if (send_signal) {
if (send_sigterm) {
dev_warn(kfd_device,
"Sending SIGTERM to process %d (pasid 0x%x)",
p->lead_thread->pid, p->pasid);
send_sig(SIGTERM, p->lead_thread, 0);
} else {
dev_err(kfd_device,
"Process %d (pasid 0x%x) got unhandled exception",
p->lead_thread->pid, p->pasid);
}
}
rcu_read_unlock();
}
void kfd_signal_hw_exception_event(u32 pasid)
{
/*
* Because we are called from arbitrary context (workqueue) as opposed
* to process context, kfd_process could attempt to exit while we are
* running so the lookup function increments the process ref count.
*/
struct kfd_process *p = kfd_lookup_process_by_pasid(pasid);
if (!p)
return; /* Presumably process exited. */
lookup_events_by_type_and_signal(p, KFD_EVENT_TYPE_HW_EXCEPTION, NULL);
kfd_unref_process(p);
}
void kfd_signal_vm_fault_event(struct kfd_node *dev, u32 pasid,
struct kfd_vm_fault_info *info,
struct kfd_hsa_memory_exception_data *data)
{
struct kfd_event *ev;
uint32_t id;
struct kfd_process *p = kfd_lookup_process_by_pasid(pasid);
struct kfd_hsa_memory_exception_data memory_exception_data;
int user_gpu_id;
if (!p)
return; /* Presumably process exited. */
user_gpu_id = kfd_process_get_user_gpu_id(p, dev->id);
if (unlikely(user_gpu_id == -EINVAL)) {
WARN_ONCE(1, "Could not get user_gpu_id from dev->id:%x\n", dev->id);
return;
}
/* SoC15 chips and onwards will pass in data from now on. */
if (!data) {
memset(&memory_exception_data, 0, sizeof(memory_exception_data));
memory_exception_data.gpu_id = user_gpu_id;
memory_exception_data.failure.imprecise = true;
/* Set failure reason */
if (info) {
memory_exception_data.va = (info->page_addr) <<
PAGE_SHIFT;
memory_exception_data.failure.NotPresent =
info->prot_valid ? 1 : 0;
memory_exception_data.failure.NoExecute =
info->prot_exec ? 1 : 0;
memory_exception_data.failure.ReadOnly =
info->prot_write ? 1 : 0;
memory_exception_data.failure.imprecise = 0;
}
}
rcu_read_lock();
id = KFD_FIRST_NONSIGNAL_EVENT_ID;
idr_for_each_entry_continue(&p->event_idr, ev, id)
if (ev->type == KFD_EVENT_TYPE_MEMORY) {
spin_lock(&ev->lock);
ev->memory_exception_data = data ? *data :
memory_exception_data;
set_event(ev);
spin_unlock(&ev->lock);
}
rcu_read_unlock();
kfd_unref_process(p);
}
void kfd_signal_reset_event(struct kfd_node *dev)
{
struct kfd_hsa_hw_exception_data hw_exception_data;
struct kfd_hsa_memory_exception_data memory_exception_data;
struct kfd_process *p;
struct kfd_event *ev;
unsigned int temp;
uint32_t id, idx;
int reset_cause = atomic_read(&dev->sram_ecc_flag) ?
KFD_HW_EXCEPTION_ECC :
KFD_HW_EXCEPTION_GPU_HANG;
/* Whole gpu reset caused by GPU hang and memory is lost */
memset(&hw_exception_data, 0, sizeof(hw_exception_data));
hw_exception_data.memory_lost = 1;
hw_exception_data.reset_cause = reset_cause;
memset(&memory_exception_data, 0, sizeof(memory_exception_data));
memory_exception_data.ErrorType = KFD_MEM_ERR_SRAM_ECC;
memory_exception_data.failure.imprecise = true;
idx = srcu_read_lock(&kfd_processes_srcu);
hash_for_each_rcu(kfd_processes_table, temp, p, kfd_processes) {
int user_gpu_id = kfd_process_get_user_gpu_id(p, dev->id);
if (unlikely(user_gpu_id == -EINVAL)) {
WARN_ONCE(1, "Could not get user_gpu_id from dev->id:%x\n", dev->id);
continue;
}
rcu_read_lock();
id = KFD_FIRST_NONSIGNAL_EVENT_ID;
idr_for_each_entry_continue(&p->event_idr, ev, id) {
if (ev->type == KFD_EVENT_TYPE_HW_EXCEPTION) {
spin_lock(&ev->lock);
ev->hw_exception_data = hw_exception_data;
ev->hw_exception_data.gpu_id = user_gpu_id;
set_event(ev);
spin_unlock(&ev->lock);
}
if (ev->type == KFD_EVENT_TYPE_MEMORY &&
reset_cause == KFD_HW_EXCEPTION_ECC) {
spin_lock(&ev->lock);
ev->memory_exception_data = memory_exception_data;
ev->memory_exception_data.gpu_id = user_gpu_id;
set_event(ev);
spin_unlock(&ev->lock);
}
}
rcu_read_unlock();
}
srcu_read_unlock(&kfd_processes_srcu, idx);
}
void kfd_signal_poison_consumed_event(struct kfd_node *dev, u32 pasid)
{
struct kfd_process *p = kfd_lookup_process_by_pasid(pasid);
struct kfd_hsa_memory_exception_data memory_exception_data;
struct kfd_hsa_hw_exception_data hw_exception_data;
struct kfd_event *ev;
uint32_t id = KFD_FIRST_NONSIGNAL_EVENT_ID;
int user_gpu_id;
if (!p)
return; /* Presumably process exited. */
user_gpu_id = kfd_process_get_user_gpu_id(p, dev->id);
if (unlikely(user_gpu_id == -EINVAL)) {
WARN_ONCE(1, "Could not get user_gpu_id from dev->id:%x\n", dev->id);
return;
}
memset(&hw_exception_data, 0, sizeof(hw_exception_data));
hw_exception_data.gpu_id = user_gpu_id;
hw_exception_data.memory_lost = 1;
hw_exception_data.reset_cause = KFD_HW_EXCEPTION_ECC;
memset(&memory_exception_data, 0, sizeof(memory_exception_data));
memory_exception_data.ErrorType = KFD_MEM_ERR_POISON_CONSUMED;
memory_exception_data.gpu_id = user_gpu_id;
memory_exception_data.failure.imprecise = true;
rcu_read_lock();
idr_for_each_entry_continue(&p->event_idr, ev, id) {
if (ev->type == KFD_EVENT_TYPE_HW_EXCEPTION) {
spin_lock(&ev->lock);
ev->hw_exception_data = hw_exception_data;
set_event(ev);
spin_unlock(&ev->lock);
}
if (ev->type == KFD_EVENT_TYPE_MEMORY) {
spin_lock(&ev->lock);
ev->memory_exception_data = memory_exception_data;
set_event(ev);
spin_unlock(&ev->lock);
}
}
rcu_read_unlock();
/* user application will handle SIGBUS signal */
send_sig(SIGBUS, p->lead_thread, 0);
kfd_unref_process(p);
}
| linux-master | drivers/gpu/drm/amd/amdkfd/kfd_events.c |
// SPDX-License-Identifier: GPL-2.0 OR MIT
/*
* Copyright 2014-2022 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
/*
* KFD Interrupts.
*
* AMD GPUs deliver interrupts by pushing an interrupt description onto the
* interrupt ring and then sending an interrupt. KGD receives the interrupt
* in ISR and sends us a pointer to each new entry on the interrupt ring.
*
* We generally can't process interrupt-signaled events from ISR, so we call
* out to each interrupt client module (currently only the scheduler) to ask if
* each interrupt is interesting. If they return true, then it requires further
* processing so we copy it to an internal interrupt ring and call each
* interrupt client again from a work-queue.
*
* There's no acknowledgment for the interrupts we use. The hardware simply
* queues a new interrupt each time without waiting.
*
* The fixed-size internal queue means that it's possible for us to lose
* interrupts because we have no back-pressure to the hardware.
*/
#include <linux/slab.h>
#include <linux/device.h>
#include <linux/kfifo.h>
#include "kfd_priv.h"
#define KFD_IH_NUM_ENTRIES 8192
static void interrupt_wq(struct work_struct *);
int kfd_interrupt_init(struct kfd_node *node)
{
int r;
r = kfifo_alloc(&node->ih_fifo,
KFD_IH_NUM_ENTRIES * node->kfd->device_info.ih_ring_entry_size,
GFP_KERNEL);
if (r) {
dev_err(node->adev->dev, "Failed to allocate IH fifo\n");
return r;
}
node->ih_wq = alloc_workqueue("KFD IH", WQ_HIGHPRI, 1);
if (unlikely(!node->ih_wq)) {
kfifo_free(&node->ih_fifo);
dev_err(node->adev->dev, "Failed to allocate KFD IH workqueue\n");
return -ENOMEM;
}
spin_lock_init(&node->interrupt_lock);
INIT_WORK(&node->interrupt_work, interrupt_wq);
node->interrupts_active = true;
/*
* After this function returns, the interrupt will be enabled. This
* barrier ensures that the interrupt running on a different processor
* sees all the above writes.
*/
smp_wmb();
return 0;
}
void kfd_interrupt_exit(struct kfd_node *node)
{
/*
* Stop the interrupt handler from writing to the ring and scheduling
* workqueue items. The spinlock ensures that any interrupt running
* after we have unlocked sees interrupts_active = false.
*/
unsigned long flags;
spin_lock_irqsave(&node->interrupt_lock, flags);
node->interrupts_active = false;
spin_unlock_irqrestore(&node->interrupt_lock, flags);
/*
* flush_work ensures that there are no outstanding
* work-queue items that will access interrupt_ring. New work items
* can't be created because we stopped interrupt handling above.
*/
flush_workqueue(node->ih_wq);
kfifo_free(&node->ih_fifo);
}
/*
* Assumption: single reader/writer. This function is not re-entrant
*/
bool enqueue_ih_ring_entry(struct kfd_node *node, const void *ih_ring_entry)
{
int count;
count = kfifo_in(&node->ih_fifo, ih_ring_entry,
node->kfd->device_info.ih_ring_entry_size);
if (count != node->kfd->device_info.ih_ring_entry_size) {
dev_dbg_ratelimited(node->adev->dev,
"Interrupt ring overflow, dropping interrupt %d\n",
count);
return false;
}
return true;
}
/*
* Assumption: single reader/writer. This function is not re-entrant
*/
static bool dequeue_ih_ring_entry(struct kfd_node *node, void *ih_ring_entry)
{
int count;
count = kfifo_out(&node->ih_fifo, ih_ring_entry,
node->kfd->device_info.ih_ring_entry_size);
WARN_ON(count && count != node->kfd->device_info.ih_ring_entry_size);
return count == node->kfd->device_info.ih_ring_entry_size;
}
static void interrupt_wq(struct work_struct *work)
{
struct kfd_node *dev = container_of(work, struct kfd_node,
interrupt_work);
uint32_t ih_ring_entry[KFD_MAX_RING_ENTRY_SIZE];
unsigned long start_jiffies = jiffies;
if (dev->kfd->device_info.ih_ring_entry_size > sizeof(ih_ring_entry)) {
dev_err_once(dev->adev->dev, "Ring entry too small\n");
return;
}
while (dequeue_ih_ring_entry(dev, ih_ring_entry)) {
dev->kfd->device_info.event_interrupt_class->interrupt_wq(dev,
ih_ring_entry);
if (time_is_before_jiffies(start_jiffies + HZ)) {
/* If we spent more than a second processing signals,
* reschedule the worker to avoid soft-lockup warnings
*/
queue_work(dev->ih_wq, &dev->interrupt_work);
break;
}
}
}
bool interrupt_is_wanted(struct kfd_node *dev,
const uint32_t *ih_ring_entry,
uint32_t *patched_ihre, bool *flag)
{
/* integer and bitwise OR so there is no boolean short-circuiting */
unsigned int wanted = 0;
wanted |= dev->kfd->device_info.event_interrupt_class->interrupt_isr(dev,
ih_ring_entry, patched_ihre, flag);
return wanted != 0;
}
| linux-master | drivers/gpu/drm/amd/amdkfd/kfd_interrupt.c |
// SPDX-License-Identifier: GPL-2.0 OR MIT
/*
* Copyright 2014-2022 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
#include <linux/device.h>
#include <linux/export.h>
#include <linux/err.h>
#include <linux/fs.h>
#include <linux/file.h>
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/uaccess.h>
#include <linux/compat.h>
#include <uapi/linux/kfd_ioctl.h>
#include <linux/time.h>
#include <linux/mm.h>
#include <linux/mman.h>
#include <linux/ptrace.h>
#include <linux/dma-buf.h>
#include <linux/fdtable.h>
#include <linux/processor.h>
#include "kfd_priv.h"
#include "kfd_device_queue_manager.h"
#include "kfd_svm.h"
#include "amdgpu_amdkfd.h"
#include "kfd_smi_events.h"
#include "amdgpu_dma_buf.h"
#include "kfd_debug.h"
static long kfd_ioctl(struct file *, unsigned int, unsigned long);
static int kfd_open(struct inode *, struct file *);
static int kfd_release(struct inode *, struct file *);
static int kfd_mmap(struct file *, struct vm_area_struct *);
static const char kfd_dev_name[] = "kfd";
static const struct file_operations kfd_fops = {
.owner = THIS_MODULE,
.unlocked_ioctl = kfd_ioctl,
.compat_ioctl = compat_ptr_ioctl,
.open = kfd_open,
.release = kfd_release,
.mmap = kfd_mmap,
};
static int kfd_char_dev_major = -1;
static struct class *kfd_class;
struct device *kfd_device;
static inline struct kfd_process_device *kfd_lock_pdd_by_id(struct kfd_process *p, __u32 gpu_id)
{
struct kfd_process_device *pdd;
mutex_lock(&p->mutex);
pdd = kfd_process_device_data_by_id(p, gpu_id);
if (pdd)
return pdd;
mutex_unlock(&p->mutex);
return NULL;
}
static inline void kfd_unlock_pdd(struct kfd_process_device *pdd)
{
mutex_unlock(&pdd->process->mutex);
}
int kfd_chardev_init(void)
{
int err = 0;
kfd_char_dev_major = register_chrdev(0, kfd_dev_name, &kfd_fops);
err = kfd_char_dev_major;
if (err < 0)
goto err_register_chrdev;
kfd_class = class_create(kfd_dev_name);
err = PTR_ERR(kfd_class);
if (IS_ERR(kfd_class))
goto err_class_create;
kfd_device = device_create(kfd_class, NULL,
MKDEV(kfd_char_dev_major, 0),
NULL, kfd_dev_name);
err = PTR_ERR(kfd_device);
if (IS_ERR(kfd_device))
goto err_device_create;
return 0;
err_device_create:
class_destroy(kfd_class);
err_class_create:
unregister_chrdev(kfd_char_dev_major, kfd_dev_name);
err_register_chrdev:
return err;
}
void kfd_chardev_exit(void)
{
device_destroy(kfd_class, MKDEV(kfd_char_dev_major, 0));
class_destroy(kfd_class);
unregister_chrdev(kfd_char_dev_major, kfd_dev_name);
kfd_device = NULL;
}
static int kfd_open(struct inode *inode, struct file *filep)
{
struct kfd_process *process;
bool is_32bit_user_mode;
if (iminor(inode) != 0)
return -ENODEV;
is_32bit_user_mode = in_compat_syscall();
if (is_32bit_user_mode) {
dev_warn(kfd_device,
"Process %d (32-bit) failed to open /dev/kfd\n"
"32-bit processes are not supported by amdkfd\n",
current->pid);
return -EPERM;
}
process = kfd_create_process(current);
if (IS_ERR(process))
return PTR_ERR(process);
if (kfd_process_init_cwsr_apu(process, filep)) {
kfd_unref_process(process);
return -EFAULT;
}
/* filep now owns the reference returned by kfd_create_process */
filep->private_data = process;
dev_dbg(kfd_device, "process %d opened, compat mode (32 bit) - %d\n",
process->pasid, process->is_32bit_user_mode);
return 0;
}
static int kfd_release(struct inode *inode, struct file *filep)
{
struct kfd_process *process = filep->private_data;
if (process)
kfd_unref_process(process);
return 0;
}
static int kfd_ioctl_get_version(struct file *filep, struct kfd_process *p,
void *data)
{
struct kfd_ioctl_get_version_args *args = data;
args->major_version = KFD_IOCTL_MAJOR_VERSION;
args->minor_version = KFD_IOCTL_MINOR_VERSION;
return 0;
}
static int set_queue_properties_from_user(struct queue_properties *q_properties,
struct kfd_ioctl_create_queue_args *args)
{
/*
* Repurpose queue percentage to accommodate new features:
* bit 0-7: queue percentage
* bit 8-15: pm4_target_xcc
*/
if ((args->queue_percentage & 0xFF) > KFD_MAX_QUEUE_PERCENTAGE) {
pr_err("Queue percentage must be between 0 to KFD_MAX_QUEUE_PERCENTAGE\n");
return -EINVAL;
}
if (args->queue_priority > KFD_MAX_QUEUE_PRIORITY) {
pr_err("Queue priority must be between 0 to KFD_MAX_QUEUE_PRIORITY\n");
return -EINVAL;
}
if ((args->ring_base_address) &&
(!access_ok((const void __user *) args->ring_base_address,
sizeof(uint64_t)))) {
pr_err("Can't access ring base address\n");
return -EFAULT;
}
if (!is_power_of_2(args->ring_size) && (args->ring_size != 0)) {
pr_err("Ring size must be a power of 2 or 0\n");
return -EINVAL;
}
if (!access_ok((const void __user *) args->read_pointer_address,
sizeof(uint32_t))) {
pr_err("Can't access read pointer\n");
return -EFAULT;
}
if (!access_ok((const void __user *) args->write_pointer_address,
sizeof(uint32_t))) {
pr_err("Can't access write pointer\n");
return -EFAULT;
}
if (args->eop_buffer_address &&
!access_ok((const void __user *) args->eop_buffer_address,
sizeof(uint32_t))) {
pr_debug("Can't access eop buffer");
return -EFAULT;
}
if (args->ctx_save_restore_address &&
!access_ok((const void __user *) args->ctx_save_restore_address,
sizeof(uint32_t))) {
pr_debug("Can't access ctx save restore buffer");
return -EFAULT;
}
q_properties->is_interop = false;
q_properties->is_gws = false;
q_properties->queue_percent = args->queue_percentage & 0xFF;
/* bit 8-15 are repurposed to be PM4 target XCC */
q_properties->pm4_target_xcc = (args->queue_percentage >> 8) & 0xFF;
q_properties->priority = args->queue_priority;
q_properties->queue_address = args->ring_base_address;
q_properties->queue_size = args->ring_size;
q_properties->read_ptr = (uint32_t *) args->read_pointer_address;
q_properties->write_ptr = (uint32_t *) args->write_pointer_address;
q_properties->eop_ring_buffer_address = args->eop_buffer_address;
q_properties->eop_ring_buffer_size = args->eop_buffer_size;
q_properties->ctx_save_restore_area_address =
args->ctx_save_restore_address;
q_properties->ctx_save_restore_area_size = args->ctx_save_restore_size;
q_properties->ctl_stack_size = args->ctl_stack_size;
if (args->queue_type == KFD_IOC_QUEUE_TYPE_COMPUTE ||
args->queue_type == KFD_IOC_QUEUE_TYPE_COMPUTE_AQL)
q_properties->type = KFD_QUEUE_TYPE_COMPUTE;
else if (args->queue_type == KFD_IOC_QUEUE_TYPE_SDMA)
q_properties->type = KFD_QUEUE_TYPE_SDMA;
else if (args->queue_type == KFD_IOC_QUEUE_TYPE_SDMA_XGMI)
q_properties->type = KFD_QUEUE_TYPE_SDMA_XGMI;
else
return -ENOTSUPP;
if (args->queue_type == KFD_IOC_QUEUE_TYPE_COMPUTE_AQL)
q_properties->format = KFD_QUEUE_FORMAT_AQL;
else
q_properties->format = KFD_QUEUE_FORMAT_PM4;
pr_debug("Queue Percentage: %d, %d\n",
q_properties->queue_percent, args->queue_percentage);
pr_debug("Queue Priority: %d, %d\n",
q_properties->priority, args->queue_priority);
pr_debug("Queue Address: 0x%llX, 0x%llX\n",
q_properties->queue_address, args->ring_base_address);
pr_debug("Queue Size: 0x%llX, %u\n",
q_properties->queue_size, args->ring_size);
pr_debug("Queue r/w Pointers: %px, %px\n",
q_properties->read_ptr,
q_properties->write_ptr);
pr_debug("Queue Format: %d\n", q_properties->format);
pr_debug("Queue EOP: 0x%llX\n", q_properties->eop_ring_buffer_address);
pr_debug("Queue CTX save area: 0x%llX\n",
q_properties->ctx_save_restore_area_address);
return 0;
}
static int kfd_ioctl_create_queue(struct file *filep, struct kfd_process *p,
void *data)
{
struct kfd_ioctl_create_queue_args *args = data;
struct kfd_node *dev;
int err = 0;
unsigned int queue_id;
struct kfd_process_device *pdd;
struct queue_properties q_properties;
uint32_t doorbell_offset_in_process = 0;
struct amdgpu_bo *wptr_bo = NULL;
memset(&q_properties, 0, sizeof(struct queue_properties));
pr_debug("Creating queue ioctl\n");
err = set_queue_properties_from_user(&q_properties, args);
if (err)
return err;
pr_debug("Looking for gpu id 0x%x\n", args->gpu_id);
mutex_lock(&p->mutex);
pdd = kfd_process_device_data_by_id(p, args->gpu_id);
if (!pdd) {
pr_debug("Could not find gpu id 0x%x\n", args->gpu_id);
err = -EINVAL;
goto err_pdd;
}
dev = pdd->dev;
pdd = kfd_bind_process_to_device(dev, p);
if (IS_ERR(pdd)) {
err = -ESRCH;
goto err_bind_process;
}
if (!pdd->qpd.proc_doorbells) {
err = kfd_alloc_process_doorbells(dev->kfd, pdd);
if (err) {
pr_debug("failed to allocate process doorbells\n");
goto err_bind_process;
}
}
/* Starting with GFX11, wptr BOs must be mapped to GART for MES to determine work
* on unmapped queues for usermode queue oversubscription (no aggregated doorbell)
*/
if (dev->kfd->shared_resources.enable_mes &&
((dev->adev->mes.sched_version & AMDGPU_MES_API_VERSION_MASK)
>> AMDGPU_MES_API_VERSION_SHIFT) >= 2) {
struct amdgpu_bo_va_mapping *wptr_mapping;
struct amdgpu_vm *wptr_vm;
wptr_vm = drm_priv_to_vm(pdd->drm_priv);
err = amdgpu_bo_reserve(wptr_vm->root.bo, false);
if (err)
goto err_wptr_map_gart;
wptr_mapping = amdgpu_vm_bo_lookup_mapping(
wptr_vm, args->write_pointer_address >> PAGE_SHIFT);
amdgpu_bo_unreserve(wptr_vm->root.bo);
if (!wptr_mapping) {
pr_err("Failed to lookup wptr bo\n");
err = -EINVAL;
goto err_wptr_map_gart;
}
wptr_bo = wptr_mapping->bo_va->base.bo;
if (wptr_bo->tbo.base.size > PAGE_SIZE) {
pr_err("Requested GART mapping for wptr bo larger than one page\n");
err = -EINVAL;
goto err_wptr_map_gart;
}
err = amdgpu_amdkfd_map_gtt_bo_to_gart(dev->adev, wptr_bo);
if (err) {
pr_err("Failed to map wptr bo to GART\n");
goto err_wptr_map_gart;
}
}
pr_debug("Creating queue for PASID 0x%x on gpu 0x%x\n",
p->pasid,
dev->id);
err = pqm_create_queue(&p->pqm, dev, filep, &q_properties, &queue_id, wptr_bo,
NULL, NULL, NULL, &doorbell_offset_in_process);
if (err != 0)
goto err_create_queue;
args->queue_id = queue_id;
/* Return gpu_id as doorbell offset for mmap usage */
args->doorbell_offset = KFD_MMAP_TYPE_DOORBELL;
args->doorbell_offset |= KFD_MMAP_GPU_ID(args->gpu_id);
if (KFD_IS_SOC15(dev))
/* On SOC15 ASICs, include the doorbell offset within the
* process doorbell frame, which is 2 pages.
*/
args->doorbell_offset |= doorbell_offset_in_process;
mutex_unlock(&p->mutex);
pr_debug("Queue id %d was created successfully\n", args->queue_id);
pr_debug("Ring buffer address == 0x%016llX\n",
args->ring_base_address);
pr_debug("Read ptr address == 0x%016llX\n",
args->read_pointer_address);
pr_debug("Write ptr address == 0x%016llX\n",
args->write_pointer_address);
kfd_dbg_ev_raise(KFD_EC_MASK(EC_QUEUE_NEW), p, dev, queue_id, false, NULL, 0);
return 0;
err_create_queue:
if (wptr_bo)
amdgpu_amdkfd_free_gtt_mem(dev->adev, wptr_bo);
err_wptr_map_gart:
err_bind_process:
err_pdd:
mutex_unlock(&p->mutex);
return err;
}
static int kfd_ioctl_destroy_queue(struct file *filp, struct kfd_process *p,
void *data)
{
int retval;
struct kfd_ioctl_destroy_queue_args *args = data;
pr_debug("Destroying queue id %d for pasid 0x%x\n",
args->queue_id,
p->pasid);
mutex_lock(&p->mutex);
retval = pqm_destroy_queue(&p->pqm, args->queue_id);
mutex_unlock(&p->mutex);
return retval;
}
static int kfd_ioctl_update_queue(struct file *filp, struct kfd_process *p,
void *data)
{
int retval;
struct kfd_ioctl_update_queue_args *args = data;
struct queue_properties properties;
/*
* Repurpose queue percentage to accommodate new features:
* bit 0-7: queue percentage
* bit 8-15: pm4_target_xcc
*/
if ((args->queue_percentage & 0xFF) > KFD_MAX_QUEUE_PERCENTAGE) {
pr_err("Queue percentage must be between 0 to KFD_MAX_QUEUE_PERCENTAGE\n");
return -EINVAL;
}
if (args->queue_priority > KFD_MAX_QUEUE_PRIORITY) {
pr_err("Queue priority must be between 0 to KFD_MAX_QUEUE_PRIORITY\n");
return -EINVAL;
}
if ((args->ring_base_address) &&
(!access_ok((const void __user *) args->ring_base_address,
sizeof(uint64_t)))) {
pr_err("Can't access ring base address\n");
return -EFAULT;
}
if (!is_power_of_2(args->ring_size) && (args->ring_size != 0)) {
pr_err("Ring size must be a power of 2 or 0\n");
return -EINVAL;
}
properties.queue_address = args->ring_base_address;
properties.queue_size = args->ring_size;
properties.queue_percent = args->queue_percentage & 0xFF;
/* bit 8-15 are repurposed to be PM4 target XCC */
properties.pm4_target_xcc = (args->queue_percentage >> 8) & 0xFF;
properties.priority = args->queue_priority;
pr_debug("Updating queue id %d for pasid 0x%x\n",
args->queue_id, p->pasid);
mutex_lock(&p->mutex);
retval = pqm_update_queue_properties(&p->pqm, args->queue_id, &properties);
mutex_unlock(&p->mutex);
return retval;
}
static int kfd_ioctl_set_cu_mask(struct file *filp, struct kfd_process *p,
void *data)
{
int retval;
const int max_num_cus = 1024;
struct kfd_ioctl_set_cu_mask_args *args = data;
struct mqd_update_info minfo = {0};
uint32_t __user *cu_mask_ptr = (uint32_t __user *)args->cu_mask_ptr;
size_t cu_mask_size = sizeof(uint32_t) * (args->num_cu_mask / 32);
if ((args->num_cu_mask % 32) != 0) {
pr_debug("num_cu_mask 0x%x must be a multiple of 32",
args->num_cu_mask);
return -EINVAL;
}
minfo.cu_mask.count = args->num_cu_mask;
if (minfo.cu_mask.count == 0) {
pr_debug("CU mask cannot be 0");
return -EINVAL;
}
/* To prevent an unreasonably large CU mask size, set an arbitrary
* limit of max_num_cus bits. We can then just drop any CU mask bits
* past max_num_cus bits and just use the first max_num_cus bits.
*/
if (minfo.cu_mask.count > max_num_cus) {
pr_debug("CU mask cannot be greater than 1024 bits");
minfo.cu_mask.count = max_num_cus;
cu_mask_size = sizeof(uint32_t) * (max_num_cus/32);
}
minfo.cu_mask.ptr = kzalloc(cu_mask_size, GFP_KERNEL);
if (!minfo.cu_mask.ptr)
return -ENOMEM;
retval = copy_from_user(minfo.cu_mask.ptr, cu_mask_ptr, cu_mask_size);
if (retval) {
pr_debug("Could not copy CU mask from userspace");
retval = -EFAULT;
goto out;
}
mutex_lock(&p->mutex);
retval = pqm_update_mqd(&p->pqm, args->queue_id, &minfo);
mutex_unlock(&p->mutex);
out:
kfree(minfo.cu_mask.ptr);
return retval;
}
static int kfd_ioctl_get_queue_wave_state(struct file *filep,
struct kfd_process *p, void *data)
{
struct kfd_ioctl_get_queue_wave_state_args *args = data;
int r;
mutex_lock(&p->mutex);
r = pqm_get_wave_state(&p->pqm, args->queue_id,
(void __user *)args->ctl_stack_address,
&args->ctl_stack_used_size,
&args->save_area_used_size);
mutex_unlock(&p->mutex);
return r;
}
static int kfd_ioctl_set_memory_policy(struct file *filep,
struct kfd_process *p, void *data)
{
struct kfd_ioctl_set_memory_policy_args *args = data;
int err = 0;
struct kfd_process_device *pdd;
enum cache_policy default_policy, alternate_policy;
if (args->default_policy != KFD_IOC_CACHE_POLICY_COHERENT
&& args->default_policy != KFD_IOC_CACHE_POLICY_NONCOHERENT) {
return -EINVAL;
}
if (args->alternate_policy != KFD_IOC_CACHE_POLICY_COHERENT
&& args->alternate_policy != KFD_IOC_CACHE_POLICY_NONCOHERENT) {
return -EINVAL;
}
mutex_lock(&p->mutex);
pdd = kfd_process_device_data_by_id(p, args->gpu_id);
if (!pdd) {
pr_debug("Could not find gpu id 0x%x\n", args->gpu_id);
err = -EINVAL;
goto err_pdd;
}
pdd = kfd_bind_process_to_device(pdd->dev, p);
if (IS_ERR(pdd)) {
err = -ESRCH;
goto out;
}
default_policy = (args->default_policy == KFD_IOC_CACHE_POLICY_COHERENT)
? cache_policy_coherent : cache_policy_noncoherent;
alternate_policy =
(args->alternate_policy == KFD_IOC_CACHE_POLICY_COHERENT)
? cache_policy_coherent : cache_policy_noncoherent;
if (!pdd->dev->dqm->ops.set_cache_memory_policy(pdd->dev->dqm,
&pdd->qpd,
default_policy,
alternate_policy,
(void __user *)args->alternate_aperture_base,
args->alternate_aperture_size))
err = -EINVAL;
out:
err_pdd:
mutex_unlock(&p->mutex);
return err;
}
static int kfd_ioctl_set_trap_handler(struct file *filep,
struct kfd_process *p, void *data)
{
struct kfd_ioctl_set_trap_handler_args *args = data;
int err = 0;
struct kfd_process_device *pdd;
mutex_lock(&p->mutex);
pdd = kfd_process_device_data_by_id(p, args->gpu_id);
if (!pdd) {
err = -EINVAL;
goto err_pdd;
}
pdd = kfd_bind_process_to_device(pdd->dev, p);
if (IS_ERR(pdd)) {
err = -ESRCH;
goto out;
}
kfd_process_set_trap_handler(&pdd->qpd, args->tba_addr, args->tma_addr);
out:
err_pdd:
mutex_unlock(&p->mutex);
return err;
}
static int kfd_ioctl_dbg_register(struct file *filep,
struct kfd_process *p, void *data)
{
return -EPERM;
}
static int kfd_ioctl_dbg_unregister(struct file *filep,
struct kfd_process *p, void *data)
{
return -EPERM;
}
static int kfd_ioctl_dbg_address_watch(struct file *filep,
struct kfd_process *p, void *data)
{
return -EPERM;
}
/* Parse and generate fixed size data structure for wave control */
static int kfd_ioctl_dbg_wave_control(struct file *filep,
struct kfd_process *p, void *data)
{
return -EPERM;
}
static int kfd_ioctl_get_clock_counters(struct file *filep,
struct kfd_process *p, void *data)
{
struct kfd_ioctl_get_clock_counters_args *args = data;
struct kfd_process_device *pdd;
mutex_lock(&p->mutex);
pdd = kfd_process_device_data_by_id(p, args->gpu_id);
mutex_unlock(&p->mutex);
if (pdd)
/* Reading GPU clock counter from KGD */
args->gpu_clock_counter = amdgpu_amdkfd_get_gpu_clock_counter(pdd->dev->adev);
else
/* Node without GPU resource */
args->gpu_clock_counter = 0;
/* No access to rdtsc. Using raw monotonic time */
args->cpu_clock_counter = ktime_get_raw_ns();
args->system_clock_counter = ktime_get_boottime_ns();
/* Since the counter is in nano-seconds we use 1GHz frequency */
args->system_clock_freq = 1000000000;
return 0;
}
static int kfd_ioctl_get_process_apertures(struct file *filp,
struct kfd_process *p, void *data)
{
struct kfd_ioctl_get_process_apertures_args *args = data;
struct kfd_process_device_apertures *pAperture;
int i;
dev_dbg(kfd_device, "get apertures for PASID 0x%x", p->pasid);
args->num_of_nodes = 0;
mutex_lock(&p->mutex);
/* Run over all pdd of the process */
for (i = 0; i < p->n_pdds; i++) {
struct kfd_process_device *pdd = p->pdds[i];
pAperture =
&args->process_apertures[args->num_of_nodes];
pAperture->gpu_id = pdd->dev->id;
pAperture->lds_base = pdd->lds_base;
pAperture->lds_limit = pdd->lds_limit;
pAperture->gpuvm_base = pdd->gpuvm_base;
pAperture->gpuvm_limit = pdd->gpuvm_limit;
pAperture->scratch_base = pdd->scratch_base;
pAperture->scratch_limit = pdd->scratch_limit;
dev_dbg(kfd_device,
"node id %u\n", args->num_of_nodes);
dev_dbg(kfd_device,
"gpu id %u\n", pdd->dev->id);
dev_dbg(kfd_device,
"lds_base %llX\n", pdd->lds_base);
dev_dbg(kfd_device,
"lds_limit %llX\n", pdd->lds_limit);
dev_dbg(kfd_device,
"gpuvm_base %llX\n", pdd->gpuvm_base);
dev_dbg(kfd_device,
"gpuvm_limit %llX\n", pdd->gpuvm_limit);
dev_dbg(kfd_device,
"scratch_base %llX\n", pdd->scratch_base);
dev_dbg(kfd_device,
"scratch_limit %llX\n", pdd->scratch_limit);
if (++args->num_of_nodes >= NUM_OF_SUPPORTED_GPUS)
break;
}
mutex_unlock(&p->mutex);
return 0;
}
static int kfd_ioctl_get_process_apertures_new(struct file *filp,
struct kfd_process *p, void *data)
{
struct kfd_ioctl_get_process_apertures_new_args *args = data;
struct kfd_process_device_apertures *pa;
int ret;
int i;
dev_dbg(kfd_device, "get apertures for PASID 0x%x", p->pasid);
if (args->num_of_nodes == 0) {
/* Return number of nodes, so that user space can alloacate
* sufficient memory
*/
mutex_lock(&p->mutex);
args->num_of_nodes = p->n_pdds;
goto out_unlock;
}
/* Fill in process-aperture information for all available
* nodes, but not more than args->num_of_nodes as that is
* the amount of memory allocated by user
*/
pa = kzalloc((sizeof(struct kfd_process_device_apertures) *
args->num_of_nodes), GFP_KERNEL);
if (!pa)
return -ENOMEM;
mutex_lock(&p->mutex);
if (!p->n_pdds) {
args->num_of_nodes = 0;
kfree(pa);
goto out_unlock;
}
/* Run over all pdd of the process */
for (i = 0; i < min(p->n_pdds, args->num_of_nodes); i++) {
struct kfd_process_device *pdd = p->pdds[i];
pa[i].gpu_id = pdd->dev->id;
pa[i].lds_base = pdd->lds_base;
pa[i].lds_limit = pdd->lds_limit;
pa[i].gpuvm_base = pdd->gpuvm_base;
pa[i].gpuvm_limit = pdd->gpuvm_limit;
pa[i].scratch_base = pdd->scratch_base;
pa[i].scratch_limit = pdd->scratch_limit;
dev_dbg(kfd_device,
"gpu id %u\n", pdd->dev->id);
dev_dbg(kfd_device,
"lds_base %llX\n", pdd->lds_base);
dev_dbg(kfd_device,
"lds_limit %llX\n", pdd->lds_limit);
dev_dbg(kfd_device,
"gpuvm_base %llX\n", pdd->gpuvm_base);
dev_dbg(kfd_device,
"gpuvm_limit %llX\n", pdd->gpuvm_limit);
dev_dbg(kfd_device,
"scratch_base %llX\n", pdd->scratch_base);
dev_dbg(kfd_device,
"scratch_limit %llX\n", pdd->scratch_limit);
}
mutex_unlock(&p->mutex);
args->num_of_nodes = i;
ret = copy_to_user(
(void __user *)args->kfd_process_device_apertures_ptr,
pa,
(i * sizeof(struct kfd_process_device_apertures)));
kfree(pa);
return ret ? -EFAULT : 0;
out_unlock:
mutex_unlock(&p->mutex);
return 0;
}
static int kfd_ioctl_create_event(struct file *filp, struct kfd_process *p,
void *data)
{
struct kfd_ioctl_create_event_args *args = data;
int err;
/* For dGPUs the event page is allocated in user mode. The
* handle is passed to KFD with the first call to this IOCTL
* through the event_page_offset field.
*/
if (args->event_page_offset) {
mutex_lock(&p->mutex);
err = kfd_kmap_event_page(p, args->event_page_offset);
mutex_unlock(&p->mutex);
if (err)
return err;
}
err = kfd_event_create(filp, p, args->event_type,
args->auto_reset != 0, args->node_id,
&args->event_id, &args->event_trigger_data,
&args->event_page_offset,
&args->event_slot_index);
pr_debug("Created event (id:0x%08x) (%s)\n", args->event_id, __func__);
return err;
}
static int kfd_ioctl_destroy_event(struct file *filp, struct kfd_process *p,
void *data)
{
struct kfd_ioctl_destroy_event_args *args = data;
return kfd_event_destroy(p, args->event_id);
}
static int kfd_ioctl_set_event(struct file *filp, struct kfd_process *p,
void *data)
{
struct kfd_ioctl_set_event_args *args = data;
return kfd_set_event(p, args->event_id);
}
static int kfd_ioctl_reset_event(struct file *filp, struct kfd_process *p,
void *data)
{
struct kfd_ioctl_reset_event_args *args = data;
return kfd_reset_event(p, args->event_id);
}
static int kfd_ioctl_wait_events(struct file *filp, struct kfd_process *p,
void *data)
{
struct kfd_ioctl_wait_events_args *args = data;
return kfd_wait_on_events(p, args->num_events,
(void __user *)args->events_ptr,
(args->wait_for_all != 0),
&args->timeout, &args->wait_result);
}
static int kfd_ioctl_set_scratch_backing_va(struct file *filep,
struct kfd_process *p, void *data)
{
struct kfd_ioctl_set_scratch_backing_va_args *args = data;
struct kfd_process_device *pdd;
struct kfd_node *dev;
long err;
mutex_lock(&p->mutex);
pdd = kfd_process_device_data_by_id(p, args->gpu_id);
if (!pdd) {
err = -EINVAL;
goto err_pdd;
}
dev = pdd->dev;
pdd = kfd_bind_process_to_device(dev, p);
if (IS_ERR(pdd)) {
err = PTR_ERR(pdd);
goto bind_process_to_device_fail;
}
pdd->qpd.sh_hidden_private_base = args->va_addr;
mutex_unlock(&p->mutex);
if (dev->dqm->sched_policy == KFD_SCHED_POLICY_NO_HWS &&
pdd->qpd.vmid != 0 && dev->kfd2kgd->set_scratch_backing_va)
dev->kfd2kgd->set_scratch_backing_va(
dev->adev, args->va_addr, pdd->qpd.vmid);
return 0;
bind_process_to_device_fail:
err_pdd:
mutex_unlock(&p->mutex);
return err;
}
static int kfd_ioctl_get_tile_config(struct file *filep,
struct kfd_process *p, void *data)
{
struct kfd_ioctl_get_tile_config_args *args = data;
struct kfd_process_device *pdd;
struct tile_config config;
int err = 0;
mutex_lock(&p->mutex);
pdd = kfd_process_device_data_by_id(p, args->gpu_id);
mutex_unlock(&p->mutex);
if (!pdd)
return -EINVAL;
amdgpu_amdkfd_get_tile_config(pdd->dev->adev, &config);
args->gb_addr_config = config.gb_addr_config;
args->num_banks = config.num_banks;
args->num_ranks = config.num_ranks;
if (args->num_tile_configs > config.num_tile_configs)
args->num_tile_configs = config.num_tile_configs;
err = copy_to_user((void __user *)args->tile_config_ptr,
config.tile_config_ptr,
args->num_tile_configs * sizeof(uint32_t));
if (err) {
args->num_tile_configs = 0;
return -EFAULT;
}
if (args->num_macro_tile_configs > config.num_macro_tile_configs)
args->num_macro_tile_configs =
config.num_macro_tile_configs;
err = copy_to_user((void __user *)args->macro_tile_config_ptr,
config.macro_tile_config_ptr,
args->num_macro_tile_configs * sizeof(uint32_t));
if (err) {
args->num_macro_tile_configs = 0;
return -EFAULT;
}
return 0;
}
static int kfd_ioctl_acquire_vm(struct file *filep, struct kfd_process *p,
void *data)
{
struct kfd_ioctl_acquire_vm_args *args = data;
struct kfd_process_device *pdd;
struct file *drm_file;
int ret;
drm_file = fget(args->drm_fd);
if (!drm_file)
return -EINVAL;
mutex_lock(&p->mutex);
pdd = kfd_process_device_data_by_id(p, args->gpu_id);
if (!pdd) {
ret = -EINVAL;
goto err_pdd;
}
if (pdd->drm_file) {
ret = pdd->drm_file == drm_file ? 0 : -EBUSY;
goto err_drm_file;
}
ret = kfd_process_device_init_vm(pdd, drm_file);
if (ret)
goto err_unlock;
/* On success, the PDD keeps the drm_file reference */
mutex_unlock(&p->mutex);
return 0;
err_unlock:
err_pdd:
err_drm_file:
mutex_unlock(&p->mutex);
fput(drm_file);
return ret;
}
bool kfd_dev_is_large_bar(struct kfd_node *dev)
{
if (debug_largebar) {
pr_debug("Simulate large-bar allocation on non large-bar machine\n");
return true;
}
if (dev->local_mem_info.local_mem_size_private == 0 &&
dev->local_mem_info.local_mem_size_public > 0)
return true;
if (dev->local_mem_info.local_mem_size_public == 0 &&
dev->kfd->adev->gmc.is_app_apu) {
pr_debug("APP APU, Consider like a large bar system\n");
return true;
}
return false;
}
static int kfd_ioctl_get_available_memory(struct file *filep,
struct kfd_process *p, void *data)
{
struct kfd_ioctl_get_available_memory_args *args = data;
struct kfd_process_device *pdd = kfd_lock_pdd_by_id(p, args->gpu_id);
if (!pdd)
return -EINVAL;
args->available = amdgpu_amdkfd_get_available_memory(pdd->dev->adev,
pdd->dev->node_id);
kfd_unlock_pdd(pdd);
return 0;
}
static int kfd_ioctl_alloc_memory_of_gpu(struct file *filep,
struct kfd_process *p, void *data)
{
struct kfd_ioctl_alloc_memory_of_gpu_args *args = data;
struct kfd_process_device *pdd;
void *mem;
struct kfd_node *dev;
int idr_handle;
long err;
uint64_t offset = args->mmap_offset;
uint32_t flags = args->flags;
if (args->size == 0)
return -EINVAL;
#if IS_ENABLED(CONFIG_HSA_AMD_SVM)
/* Flush pending deferred work to avoid racing with deferred actions
* from previous memory map changes (e.g. munmap).
*/
svm_range_list_lock_and_flush_work(&p->svms, current->mm);
mutex_lock(&p->svms.lock);
mmap_write_unlock(current->mm);
if (interval_tree_iter_first(&p->svms.objects,
args->va_addr >> PAGE_SHIFT,
(args->va_addr + args->size - 1) >> PAGE_SHIFT)) {
pr_err("Address: 0x%llx already allocated by SVM\n",
args->va_addr);
mutex_unlock(&p->svms.lock);
return -EADDRINUSE;
}
/* When register user buffer check if it has been registered by svm by
* buffer cpu virtual address.
*/
if ((flags & KFD_IOC_ALLOC_MEM_FLAGS_USERPTR) &&
interval_tree_iter_first(&p->svms.objects,
args->mmap_offset >> PAGE_SHIFT,
(args->mmap_offset + args->size - 1) >> PAGE_SHIFT)) {
pr_err("User Buffer Address: 0x%llx already allocated by SVM\n",
args->mmap_offset);
mutex_unlock(&p->svms.lock);
return -EADDRINUSE;
}
mutex_unlock(&p->svms.lock);
#endif
mutex_lock(&p->mutex);
pdd = kfd_process_device_data_by_id(p, args->gpu_id);
if (!pdd) {
err = -EINVAL;
goto err_pdd;
}
dev = pdd->dev;
if ((flags & KFD_IOC_ALLOC_MEM_FLAGS_PUBLIC) &&
(flags & KFD_IOC_ALLOC_MEM_FLAGS_VRAM) &&
!kfd_dev_is_large_bar(dev)) {
pr_err("Alloc host visible vram on small bar is not allowed\n");
err = -EINVAL;
goto err_large_bar;
}
pdd = kfd_bind_process_to_device(dev, p);
if (IS_ERR(pdd)) {
err = PTR_ERR(pdd);
goto err_unlock;
}
if (flags & KFD_IOC_ALLOC_MEM_FLAGS_DOORBELL) {
if (args->size != kfd_doorbell_process_slice(dev->kfd)) {
err = -EINVAL;
goto err_unlock;
}
offset = kfd_get_process_doorbells(pdd);
if (!offset) {
err = -ENOMEM;
goto err_unlock;
}
} else if (flags & KFD_IOC_ALLOC_MEM_FLAGS_MMIO_REMAP) {
if (args->size != PAGE_SIZE) {
err = -EINVAL;
goto err_unlock;
}
offset = dev->adev->rmmio_remap.bus_addr;
if (!offset) {
err = -ENOMEM;
goto err_unlock;
}
}
err = amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu(
dev->adev, args->va_addr, args->size,
pdd->drm_priv, (struct kgd_mem **) &mem, &offset,
flags, false);
if (err)
goto err_unlock;
idr_handle = kfd_process_device_create_obj_handle(pdd, mem);
if (idr_handle < 0) {
err = -EFAULT;
goto err_free;
}
/* Update the VRAM usage count */
if (flags & KFD_IOC_ALLOC_MEM_FLAGS_VRAM) {
uint64_t size = args->size;
if (flags & KFD_IOC_ALLOC_MEM_FLAGS_AQL_QUEUE_MEM)
size >>= 1;
WRITE_ONCE(pdd->vram_usage, pdd->vram_usage + PAGE_ALIGN(size));
}
mutex_unlock(&p->mutex);
args->handle = MAKE_HANDLE(args->gpu_id, idr_handle);
args->mmap_offset = offset;
/* MMIO is mapped through kfd device
* Generate a kfd mmap offset
*/
if (flags & KFD_IOC_ALLOC_MEM_FLAGS_MMIO_REMAP)
args->mmap_offset = KFD_MMAP_TYPE_MMIO
| KFD_MMAP_GPU_ID(args->gpu_id);
return 0;
err_free:
amdgpu_amdkfd_gpuvm_free_memory_of_gpu(dev->adev, (struct kgd_mem *)mem,
pdd->drm_priv, NULL);
err_unlock:
err_pdd:
err_large_bar:
mutex_unlock(&p->mutex);
return err;
}
static int kfd_ioctl_free_memory_of_gpu(struct file *filep,
struct kfd_process *p, void *data)
{
struct kfd_ioctl_free_memory_of_gpu_args *args = data;
struct kfd_process_device *pdd;
void *mem;
int ret;
uint64_t size = 0;
mutex_lock(&p->mutex);
/*
* Safeguard to prevent user space from freeing signal BO.
* It will be freed at process termination.
*/
if (p->signal_handle && (p->signal_handle == args->handle)) {
pr_err("Free signal BO is not allowed\n");
ret = -EPERM;
goto err_unlock;
}
pdd = kfd_process_device_data_by_id(p, GET_GPU_ID(args->handle));
if (!pdd) {
pr_err("Process device data doesn't exist\n");
ret = -EINVAL;
goto err_pdd;
}
mem = kfd_process_device_translate_handle(
pdd, GET_IDR_HANDLE(args->handle));
if (!mem) {
ret = -EINVAL;
goto err_unlock;
}
ret = amdgpu_amdkfd_gpuvm_free_memory_of_gpu(pdd->dev->adev,
(struct kgd_mem *)mem, pdd->drm_priv, &size);
/* If freeing the buffer failed, leave the handle in place for
* clean-up during process tear-down.
*/
if (!ret)
kfd_process_device_remove_obj_handle(
pdd, GET_IDR_HANDLE(args->handle));
WRITE_ONCE(pdd->vram_usage, pdd->vram_usage - size);
err_unlock:
err_pdd:
mutex_unlock(&p->mutex);
return ret;
}
static int kfd_ioctl_map_memory_to_gpu(struct file *filep,
struct kfd_process *p, void *data)
{
struct kfd_ioctl_map_memory_to_gpu_args *args = data;
struct kfd_process_device *pdd, *peer_pdd;
void *mem;
struct kfd_node *dev;
long err = 0;
int i;
uint32_t *devices_arr = NULL;
if (!args->n_devices) {
pr_debug("Device IDs array empty\n");
return -EINVAL;
}
if (args->n_success > args->n_devices) {
pr_debug("n_success exceeds n_devices\n");
return -EINVAL;
}
devices_arr = kmalloc_array(args->n_devices, sizeof(*devices_arr),
GFP_KERNEL);
if (!devices_arr)
return -ENOMEM;
err = copy_from_user(devices_arr,
(void __user *)args->device_ids_array_ptr,
args->n_devices * sizeof(*devices_arr));
if (err != 0) {
err = -EFAULT;
goto copy_from_user_failed;
}
mutex_lock(&p->mutex);
pdd = kfd_process_device_data_by_id(p, GET_GPU_ID(args->handle));
if (!pdd) {
err = -EINVAL;
goto get_process_device_data_failed;
}
dev = pdd->dev;
pdd = kfd_bind_process_to_device(dev, p);
if (IS_ERR(pdd)) {
err = PTR_ERR(pdd);
goto bind_process_to_device_failed;
}
mem = kfd_process_device_translate_handle(pdd,
GET_IDR_HANDLE(args->handle));
if (!mem) {
err = -ENOMEM;
goto get_mem_obj_from_handle_failed;
}
for (i = args->n_success; i < args->n_devices; i++) {
peer_pdd = kfd_process_device_data_by_id(p, devices_arr[i]);
if (!peer_pdd) {
pr_debug("Getting device by id failed for 0x%x\n",
devices_arr[i]);
err = -EINVAL;
goto get_mem_obj_from_handle_failed;
}
peer_pdd = kfd_bind_process_to_device(peer_pdd->dev, p);
if (IS_ERR(peer_pdd)) {
err = PTR_ERR(peer_pdd);
goto get_mem_obj_from_handle_failed;
}
err = amdgpu_amdkfd_gpuvm_map_memory_to_gpu(
peer_pdd->dev->adev, (struct kgd_mem *)mem,
peer_pdd->drm_priv);
if (err) {
struct pci_dev *pdev = peer_pdd->dev->adev->pdev;
dev_err(dev->adev->dev,
"Failed to map peer:%04x:%02x:%02x.%d mem_domain:%d\n",
pci_domain_nr(pdev->bus),
pdev->bus->number,
PCI_SLOT(pdev->devfn),
PCI_FUNC(pdev->devfn),
((struct kgd_mem *)mem)->domain);
goto map_memory_to_gpu_failed;
}
args->n_success = i+1;
}
err = amdgpu_amdkfd_gpuvm_sync_memory(dev->adev, (struct kgd_mem *) mem, true);
if (err) {
pr_debug("Sync memory failed, wait interrupted by user signal\n");
goto sync_memory_failed;
}
mutex_unlock(&p->mutex);
/* Flush TLBs after waiting for the page table updates to complete */
for (i = 0; i < args->n_devices; i++) {
peer_pdd = kfd_process_device_data_by_id(p, devices_arr[i]);
if (WARN_ON_ONCE(!peer_pdd))
continue;
kfd_flush_tlb(peer_pdd, TLB_FLUSH_LEGACY);
}
kfree(devices_arr);
return err;
get_process_device_data_failed:
bind_process_to_device_failed:
get_mem_obj_from_handle_failed:
map_memory_to_gpu_failed:
sync_memory_failed:
mutex_unlock(&p->mutex);
copy_from_user_failed:
kfree(devices_arr);
return err;
}
static int kfd_ioctl_unmap_memory_from_gpu(struct file *filep,
struct kfd_process *p, void *data)
{
struct kfd_ioctl_unmap_memory_from_gpu_args *args = data;
struct kfd_process_device *pdd, *peer_pdd;
void *mem;
long err = 0;
uint32_t *devices_arr = NULL, i;
bool flush_tlb;
if (!args->n_devices) {
pr_debug("Device IDs array empty\n");
return -EINVAL;
}
if (args->n_success > args->n_devices) {
pr_debug("n_success exceeds n_devices\n");
return -EINVAL;
}
devices_arr = kmalloc_array(args->n_devices, sizeof(*devices_arr),
GFP_KERNEL);
if (!devices_arr)
return -ENOMEM;
err = copy_from_user(devices_arr,
(void __user *)args->device_ids_array_ptr,
args->n_devices * sizeof(*devices_arr));
if (err != 0) {
err = -EFAULT;
goto copy_from_user_failed;
}
mutex_lock(&p->mutex);
pdd = kfd_process_device_data_by_id(p, GET_GPU_ID(args->handle));
if (!pdd) {
err = -EINVAL;
goto bind_process_to_device_failed;
}
mem = kfd_process_device_translate_handle(pdd,
GET_IDR_HANDLE(args->handle));
if (!mem) {
err = -ENOMEM;
goto get_mem_obj_from_handle_failed;
}
for (i = args->n_success; i < args->n_devices; i++) {
peer_pdd = kfd_process_device_data_by_id(p, devices_arr[i]);
if (!peer_pdd) {
err = -EINVAL;
goto get_mem_obj_from_handle_failed;
}
err = amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu(
peer_pdd->dev->adev, (struct kgd_mem *)mem, peer_pdd->drm_priv);
if (err) {
pr_err("Failed to unmap from gpu %d/%d\n",
i, args->n_devices);
goto unmap_memory_from_gpu_failed;
}
args->n_success = i+1;
}
flush_tlb = kfd_flush_tlb_after_unmap(pdd->dev->kfd);
if (flush_tlb) {
err = amdgpu_amdkfd_gpuvm_sync_memory(pdd->dev->adev,
(struct kgd_mem *) mem, true);
if (err) {
pr_debug("Sync memory failed, wait interrupted by user signal\n");
goto sync_memory_failed;
}
}
mutex_unlock(&p->mutex);
if (flush_tlb) {
/* Flush TLBs after waiting for the page table updates to complete */
for (i = 0; i < args->n_devices; i++) {
peer_pdd = kfd_process_device_data_by_id(p, devices_arr[i]);
if (WARN_ON_ONCE(!peer_pdd))
continue;
kfd_flush_tlb(peer_pdd, TLB_FLUSH_HEAVYWEIGHT);
}
}
kfree(devices_arr);
return 0;
bind_process_to_device_failed:
get_mem_obj_from_handle_failed:
unmap_memory_from_gpu_failed:
sync_memory_failed:
mutex_unlock(&p->mutex);
copy_from_user_failed:
kfree(devices_arr);
return err;
}
static int kfd_ioctl_alloc_queue_gws(struct file *filep,
struct kfd_process *p, void *data)
{
int retval;
struct kfd_ioctl_alloc_queue_gws_args *args = data;
struct queue *q;
struct kfd_node *dev;
mutex_lock(&p->mutex);
q = pqm_get_user_queue(&p->pqm, args->queue_id);
if (q) {
dev = q->device;
} else {
retval = -EINVAL;
goto out_unlock;
}
if (!dev->gws) {
retval = -ENODEV;
goto out_unlock;
}
if (dev->dqm->sched_policy == KFD_SCHED_POLICY_NO_HWS) {
retval = -ENODEV;
goto out_unlock;
}
if (p->debug_trap_enabled && (!kfd_dbg_has_gws_support(dev) ||
kfd_dbg_has_cwsr_workaround(dev))) {
retval = -EBUSY;
goto out_unlock;
}
retval = pqm_set_gws(&p->pqm, args->queue_id, args->num_gws ? dev->gws : NULL);
mutex_unlock(&p->mutex);
args->first_gws = 0;
return retval;
out_unlock:
mutex_unlock(&p->mutex);
return retval;
}
static int kfd_ioctl_get_dmabuf_info(struct file *filep,
struct kfd_process *p, void *data)
{
struct kfd_ioctl_get_dmabuf_info_args *args = data;
struct kfd_node *dev = NULL;
struct amdgpu_device *dmabuf_adev;
void *metadata_buffer = NULL;
uint32_t flags;
int8_t xcp_id;
unsigned int i;
int r;
/* Find a KFD GPU device that supports the get_dmabuf_info query */
for (i = 0; kfd_topology_enum_kfd_devices(i, &dev) == 0; i++)
if (dev)
break;
if (!dev)
return -EINVAL;
if (args->metadata_ptr) {
metadata_buffer = kzalloc(args->metadata_size, GFP_KERNEL);
if (!metadata_buffer)
return -ENOMEM;
}
/* Get dmabuf info from KGD */
r = amdgpu_amdkfd_get_dmabuf_info(dev->adev, args->dmabuf_fd,
&dmabuf_adev, &args->size,
metadata_buffer, args->metadata_size,
&args->metadata_size, &flags, &xcp_id);
if (r)
goto exit;
if (xcp_id >= 0)
args->gpu_id = dmabuf_adev->kfd.dev->nodes[xcp_id]->id;
else
args->gpu_id = dmabuf_adev->kfd.dev->nodes[0]->id;
args->flags = flags;
/* Copy metadata buffer to user mode */
if (metadata_buffer) {
r = copy_to_user((void __user *)args->metadata_ptr,
metadata_buffer, args->metadata_size);
if (r != 0)
r = -EFAULT;
}
exit:
kfree(metadata_buffer);
return r;
}
static int kfd_ioctl_import_dmabuf(struct file *filep,
struct kfd_process *p, void *data)
{
struct kfd_ioctl_import_dmabuf_args *args = data;
struct kfd_process_device *pdd;
struct dma_buf *dmabuf;
int idr_handle;
uint64_t size;
void *mem;
int r;
dmabuf = dma_buf_get(args->dmabuf_fd);
if (IS_ERR(dmabuf))
return PTR_ERR(dmabuf);
mutex_lock(&p->mutex);
pdd = kfd_process_device_data_by_id(p, args->gpu_id);
if (!pdd) {
r = -EINVAL;
goto err_unlock;
}
pdd = kfd_bind_process_to_device(pdd->dev, p);
if (IS_ERR(pdd)) {
r = PTR_ERR(pdd);
goto err_unlock;
}
r = amdgpu_amdkfd_gpuvm_import_dmabuf(pdd->dev->adev, dmabuf,
args->va_addr, pdd->drm_priv,
(struct kgd_mem **)&mem, &size,
NULL);
if (r)
goto err_unlock;
idr_handle = kfd_process_device_create_obj_handle(pdd, mem);
if (idr_handle < 0) {
r = -EFAULT;
goto err_free;
}
mutex_unlock(&p->mutex);
dma_buf_put(dmabuf);
args->handle = MAKE_HANDLE(args->gpu_id, idr_handle);
return 0;
err_free:
amdgpu_amdkfd_gpuvm_free_memory_of_gpu(pdd->dev->adev, (struct kgd_mem *)mem,
pdd->drm_priv, NULL);
err_unlock:
mutex_unlock(&p->mutex);
dma_buf_put(dmabuf);
return r;
}
static int kfd_ioctl_export_dmabuf(struct file *filep,
struct kfd_process *p, void *data)
{
struct kfd_ioctl_export_dmabuf_args *args = data;
struct kfd_process_device *pdd;
struct dma_buf *dmabuf;
struct kfd_node *dev;
void *mem;
int ret = 0;
dev = kfd_device_by_id(GET_GPU_ID(args->handle));
if (!dev)
return -EINVAL;
mutex_lock(&p->mutex);
pdd = kfd_get_process_device_data(dev, p);
if (!pdd) {
ret = -EINVAL;
goto err_unlock;
}
mem = kfd_process_device_translate_handle(pdd,
GET_IDR_HANDLE(args->handle));
if (!mem) {
ret = -EINVAL;
goto err_unlock;
}
ret = amdgpu_amdkfd_gpuvm_export_dmabuf(mem, &dmabuf);
mutex_unlock(&p->mutex);
if (ret)
goto err_out;
ret = dma_buf_fd(dmabuf, args->flags);
if (ret < 0) {
dma_buf_put(dmabuf);
goto err_out;
}
/* dma_buf_fd assigns the reference count to the fd, no need to
* put the reference here.
*/
args->dmabuf_fd = ret;
return 0;
err_unlock:
mutex_unlock(&p->mutex);
err_out:
return ret;
}
/* Handle requests for watching SMI events */
static int kfd_ioctl_smi_events(struct file *filep,
struct kfd_process *p, void *data)
{
struct kfd_ioctl_smi_events_args *args = data;
struct kfd_process_device *pdd;
mutex_lock(&p->mutex);
pdd = kfd_process_device_data_by_id(p, args->gpuid);
mutex_unlock(&p->mutex);
if (!pdd)
return -EINVAL;
return kfd_smi_event_open(pdd->dev, &args->anon_fd);
}
#if IS_ENABLED(CONFIG_HSA_AMD_SVM)
static int kfd_ioctl_set_xnack_mode(struct file *filep,
struct kfd_process *p, void *data)
{
struct kfd_ioctl_set_xnack_mode_args *args = data;
int r = 0;
mutex_lock(&p->mutex);
if (args->xnack_enabled >= 0) {
if (!list_empty(&p->pqm.queues)) {
pr_debug("Process has user queues running\n");
r = -EBUSY;
goto out_unlock;
}
if (p->xnack_enabled == args->xnack_enabled)
goto out_unlock;
if (args->xnack_enabled && !kfd_process_xnack_mode(p, true)) {
r = -EPERM;
goto out_unlock;
}
r = svm_range_switch_xnack_reserve_mem(p, args->xnack_enabled);
} else {
args->xnack_enabled = p->xnack_enabled;
}
out_unlock:
mutex_unlock(&p->mutex);
return r;
}
static int kfd_ioctl_svm(struct file *filep, struct kfd_process *p, void *data)
{
struct kfd_ioctl_svm_args *args = data;
int r = 0;
pr_debug("start 0x%llx size 0x%llx op 0x%x nattr 0x%x\n",
args->start_addr, args->size, args->op, args->nattr);
if ((args->start_addr & ~PAGE_MASK) || (args->size & ~PAGE_MASK))
return -EINVAL;
if (!args->start_addr || !args->size)
return -EINVAL;
r = svm_ioctl(p, args->op, args->start_addr, args->size, args->nattr,
args->attrs);
return r;
}
#else
static int kfd_ioctl_set_xnack_mode(struct file *filep,
struct kfd_process *p, void *data)
{
return -EPERM;
}
static int kfd_ioctl_svm(struct file *filep, struct kfd_process *p, void *data)
{
return -EPERM;
}
#endif
static int criu_checkpoint_process(struct kfd_process *p,
uint8_t __user *user_priv_data,
uint64_t *priv_offset)
{
struct kfd_criu_process_priv_data process_priv;
int ret;
memset(&process_priv, 0, sizeof(process_priv));
process_priv.version = KFD_CRIU_PRIV_VERSION;
/* For CR, we don't consider negative xnack mode which is used for
* querying without changing it, here 0 simply means disabled and 1
* means enabled so retry for finding a valid PTE.
*/
process_priv.xnack_mode = p->xnack_enabled ? 1 : 0;
ret = copy_to_user(user_priv_data + *priv_offset,
&process_priv, sizeof(process_priv));
if (ret) {
pr_err("Failed to copy process information to user\n");
ret = -EFAULT;
}
*priv_offset += sizeof(process_priv);
return ret;
}
static int criu_checkpoint_devices(struct kfd_process *p,
uint32_t num_devices,
uint8_t __user *user_addr,
uint8_t __user *user_priv_data,
uint64_t *priv_offset)
{
struct kfd_criu_device_priv_data *device_priv = NULL;
struct kfd_criu_device_bucket *device_buckets = NULL;
int ret = 0, i;
device_buckets = kvzalloc(num_devices * sizeof(*device_buckets), GFP_KERNEL);
if (!device_buckets) {
ret = -ENOMEM;
goto exit;
}
device_priv = kvzalloc(num_devices * sizeof(*device_priv), GFP_KERNEL);
if (!device_priv) {
ret = -ENOMEM;
goto exit;
}
for (i = 0; i < num_devices; i++) {
struct kfd_process_device *pdd = p->pdds[i];
device_buckets[i].user_gpu_id = pdd->user_gpu_id;
device_buckets[i].actual_gpu_id = pdd->dev->id;
/*
* priv_data does not contain useful information for now and is reserved for
* future use, so we do not set its contents.
*/
}
ret = copy_to_user(user_addr, device_buckets, num_devices * sizeof(*device_buckets));
if (ret) {
pr_err("Failed to copy device information to user\n");
ret = -EFAULT;
goto exit;
}
ret = copy_to_user(user_priv_data + *priv_offset,
device_priv,
num_devices * sizeof(*device_priv));
if (ret) {
pr_err("Failed to copy device information to user\n");
ret = -EFAULT;
}
*priv_offset += num_devices * sizeof(*device_priv);
exit:
kvfree(device_buckets);
kvfree(device_priv);
return ret;
}
static uint32_t get_process_num_bos(struct kfd_process *p)
{
uint32_t num_of_bos = 0;
int i;
/* Run over all PDDs of the process */
for (i = 0; i < p->n_pdds; i++) {
struct kfd_process_device *pdd = p->pdds[i];
void *mem;
int id;
idr_for_each_entry(&pdd->alloc_idr, mem, id) {
struct kgd_mem *kgd_mem = (struct kgd_mem *)mem;
if (!kgd_mem->va || kgd_mem->va > pdd->gpuvm_base)
num_of_bos++;
}
}
return num_of_bos;
}
static int criu_get_prime_handle(struct kgd_mem *mem, int flags,
u32 *shared_fd)
{
struct dma_buf *dmabuf;
int ret;
ret = amdgpu_amdkfd_gpuvm_export_dmabuf(mem, &dmabuf);
if (ret) {
pr_err("dmabuf export failed for the BO\n");
return ret;
}
ret = dma_buf_fd(dmabuf, flags);
if (ret < 0) {
pr_err("dmabuf create fd failed, ret:%d\n", ret);
goto out_free_dmabuf;
}
*shared_fd = ret;
return 0;
out_free_dmabuf:
dma_buf_put(dmabuf);
return ret;
}
static int criu_checkpoint_bos(struct kfd_process *p,
uint32_t num_bos,
uint8_t __user *user_bos,
uint8_t __user *user_priv_data,
uint64_t *priv_offset)
{
struct kfd_criu_bo_bucket *bo_buckets;
struct kfd_criu_bo_priv_data *bo_privs;
int ret = 0, pdd_index, bo_index = 0, id;
void *mem;
bo_buckets = kvzalloc(num_bos * sizeof(*bo_buckets), GFP_KERNEL);
if (!bo_buckets)
return -ENOMEM;
bo_privs = kvzalloc(num_bos * sizeof(*bo_privs), GFP_KERNEL);
if (!bo_privs) {
ret = -ENOMEM;
goto exit;
}
for (pdd_index = 0; pdd_index < p->n_pdds; pdd_index++) {
struct kfd_process_device *pdd = p->pdds[pdd_index];
struct amdgpu_bo *dumper_bo;
struct kgd_mem *kgd_mem;
idr_for_each_entry(&pdd->alloc_idr, mem, id) {
struct kfd_criu_bo_bucket *bo_bucket;
struct kfd_criu_bo_priv_data *bo_priv;
int i, dev_idx = 0;
if (!mem) {
ret = -ENOMEM;
goto exit;
}
kgd_mem = (struct kgd_mem *)mem;
dumper_bo = kgd_mem->bo;
/* Skip checkpointing BOs that are used for Trap handler
* code and state. Currently, these BOs have a VA that
* is less GPUVM Base
*/
if (kgd_mem->va && kgd_mem->va <= pdd->gpuvm_base)
continue;
bo_bucket = &bo_buckets[bo_index];
bo_priv = &bo_privs[bo_index];
bo_bucket->gpu_id = pdd->user_gpu_id;
bo_bucket->addr = (uint64_t)kgd_mem->va;
bo_bucket->size = amdgpu_bo_size(dumper_bo);
bo_bucket->alloc_flags = (uint32_t)kgd_mem->alloc_flags;
bo_priv->idr_handle = id;
if (bo_bucket->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_USERPTR) {
ret = amdgpu_ttm_tt_get_userptr(&dumper_bo->tbo,
&bo_priv->user_addr);
if (ret) {
pr_err("Failed to obtain user address for user-pointer bo\n");
goto exit;
}
}
if (bo_bucket->alloc_flags
& (KFD_IOC_ALLOC_MEM_FLAGS_VRAM | KFD_IOC_ALLOC_MEM_FLAGS_GTT)) {
ret = criu_get_prime_handle(kgd_mem,
bo_bucket->alloc_flags &
KFD_IOC_ALLOC_MEM_FLAGS_WRITABLE ? DRM_RDWR : 0,
&bo_bucket->dmabuf_fd);
if (ret)
goto exit;
} else {
bo_bucket->dmabuf_fd = KFD_INVALID_FD;
}
if (bo_bucket->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_DOORBELL)
bo_bucket->offset = KFD_MMAP_TYPE_DOORBELL |
KFD_MMAP_GPU_ID(pdd->dev->id);
else if (bo_bucket->alloc_flags &
KFD_IOC_ALLOC_MEM_FLAGS_MMIO_REMAP)
bo_bucket->offset = KFD_MMAP_TYPE_MMIO |
KFD_MMAP_GPU_ID(pdd->dev->id);
else
bo_bucket->offset = amdgpu_bo_mmap_offset(dumper_bo);
for (i = 0; i < p->n_pdds; i++) {
if (amdgpu_amdkfd_bo_mapped_to_dev(p->pdds[i]->dev->adev, kgd_mem))
bo_priv->mapped_gpuids[dev_idx++] = p->pdds[i]->user_gpu_id;
}
pr_debug("bo_size = 0x%llx, bo_addr = 0x%llx bo_offset = 0x%llx\n"
"gpu_id = 0x%x alloc_flags = 0x%x idr_handle = 0x%x",
bo_bucket->size,
bo_bucket->addr,
bo_bucket->offset,
bo_bucket->gpu_id,
bo_bucket->alloc_flags,
bo_priv->idr_handle);
bo_index++;
}
}
ret = copy_to_user(user_bos, bo_buckets, num_bos * sizeof(*bo_buckets));
if (ret) {
pr_err("Failed to copy BO information to user\n");
ret = -EFAULT;
goto exit;
}
ret = copy_to_user(user_priv_data + *priv_offset, bo_privs, num_bos * sizeof(*bo_privs));
if (ret) {
pr_err("Failed to copy BO priv information to user\n");
ret = -EFAULT;
goto exit;
}
*priv_offset += num_bos * sizeof(*bo_privs);
exit:
while (ret && bo_index--) {
if (bo_buckets[bo_index].alloc_flags
& (KFD_IOC_ALLOC_MEM_FLAGS_VRAM | KFD_IOC_ALLOC_MEM_FLAGS_GTT))
close_fd(bo_buckets[bo_index].dmabuf_fd);
}
kvfree(bo_buckets);
kvfree(bo_privs);
return ret;
}
static int criu_get_process_object_info(struct kfd_process *p,
uint32_t *num_devices,
uint32_t *num_bos,
uint32_t *num_objects,
uint64_t *objs_priv_size)
{
uint64_t queues_priv_data_size, svm_priv_data_size, priv_size;
uint32_t num_queues, num_events, num_svm_ranges;
int ret;
*num_devices = p->n_pdds;
*num_bos = get_process_num_bos(p);
ret = kfd_process_get_queue_info(p, &num_queues, &queues_priv_data_size);
if (ret)
return ret;
num_events = kfd_get_num_events(p);
ret = svm_range_get_info(p, &num_svm_ranges, &svm_priv_data_size);
if (ret)
return ret;
*num_objects = num_queues + num_events + num_svm_ranges;
if (objs_priv_size) {
priv_size = sizeof(struct kfd_criu_process_priv_data);
priv_size += *num_devices * sizeof(struct kfd_criu_device_priv_data);
priv_size += *num_bos * sizeof(struct kfd_criu_bo_priv_data);
priv_size += queues_priv_data_size;
priv_size += num_events * sizeof(struct kfd_criu_event_priv_data);
priv_size += svm_priv_data_size;
*objs_priv_size = priv_size;
}
return 0;
}
static int criu_checkpoint(struct file *filep,
struct kfd_process *p,
struct kfd_ioctl_criu_args *args)
{
int ret;
uint32_t num_devices, num_bos, num_objects;
uint64_t priv_size, priv_offset = 0, bo_priv_offset;
if (!args->devices || !args->bos || !args->priv_data)
return -EINVAL;
mutex_lock(&p->mutex);
if (!p->n_pdds) {
pr_err("No pdd for given process\n");
ret = -ENODEV;
goto exit_unlock;
}
/* Confirm all process queues are evicted */
if (!p->queues_paused) {
pr_err("Cannot dump process when queues are not in evicted state\n");
/* CRIU plugin did not call op PROCESS_INFO before checkpointing */
ret = -EINVAL;
goto exit_unlock;
}
ret = criu_get_process_object_info(p, &num_devices, &num_bos, &num_objects, &priv_size);
if (ret)
goto exit_unlock;
if (num_devices != args->num_devices ||
num_bos != args->num_bos ||
num_objects != args->num_objects ||
priv_size != args->priv_data_size) {
ret = -EINVAL;
goto exit_unlock;
}
/* each function will store private data inside priv_data and adjust priv_offset */
ret = criu_checkpoint_process(p, (uint8_t __user *)args->priv_data, &priv_offset);
if (ret)
goto exit_unlock;
ret = criu_checkpoint_devices(p, num_devices, (uint8_t __user *)args->devices,
(uint8_t __user *)args->priv_data, &priv_offset);
if (ret)
goto exit_unlock;
/* Leave room for BOs in the private data. They need to be restored
* before events, but we checkpoint them last to simplify the error
* handling.
*/
bo_priv_offset = priv_offset;
priv_offset += num_bos * sizeof(struct kfd_criu_bo_priv_data);
if (num_objects) {
ret = kfd_criu_checkpoint_queues(p, (uint8_t __user *)args->priv_data,
&priv_offset);
if (ret)
goto exit_unlock;
ret = kfd_criu_checkpoint_events(p, (uint8_t __user *)args->priv_data,
&priv_offset);
if (ret)
goto exit_unlock;
ret = kfd_criu_checkpoint_svm(p, (uint8_t __user *)args->priv_data, &priv_offset);
if (ret)
goto exit_unlock;
}
/* This must be the last thing in this function that can fail.
* Otherwise we leak dmabuf file descriptors.
*/
ret = criu_checkpoint_bos(p, num_bos, (uint8_t __user *)args->bos,
(uint8_t __user *)args->priv_data, &bo_priv_offset);
exit_unlock:
mutex_unlock(&p->mutex);
if (ret)
pr_err("Failed to dump CRIU ret:%d\n", ret);
else
pr_debug("CRIU dump ret:%d\n", ret);
return ret;
}
static int criu_restore_process(struct kfd_process *p,
struct kfd_ioctl_criu_args *args,
uint64_t *priv_offset,
uint64_t max_priv_data_size)
{
int ret = 0;
struct kfd_criu_process_priv_data process_priv;
if (*priv_offset + sizeof(process_priv) > max_priv_data_size)
return -EINVAL;
ret = copy_from_user(&process_priv,
(void __user *)(args->priv_data + *priv_offset),
sizeof(process_priv));
if (ret) {
pr_err("Failed to copy process private information from user\n");
ret = -EFAULT;
goto exit;
}
*priv_offset += sizeof(process_priv);
if (process_priv.version != KFD_CRIU_PRIV_VERSION) {
pr_err("Invalid CRIU API version (checkpointed:%d current:%d)\n",
process_priv.version, KFD_CRIU_PRIV_VERSION);
return -EINVAL;
}
pr_debug("Setting XNACK mode\n");
if (process_priv.xnack_mode && !kfd_process_xnack_mode(p, true)) {
pr_err("xnack mode cannot be set\n");
ret = -EPERM;
goto exit;
} else {
pr_debug("set xnack mode: %d\n", process_priv.xnack_mode);
p->xnack_enabled = process_priv.xnack_mode;
}
exit:
return ret;
}
static int criu_restore_devices(struct kfd_process *p,
struct kfd_ioctl_criu_args *args,
uint64_t *priv_offset,
uint64_t max_priv_data_size)
{
struct kfd_criu_device_bucket *device_buckets;
struct kfd_criu_device_priv_data *device_privs;
int ret = 0;
uint32_t i;
if (args->num_devices != p->n_pdds)
return -EINVAL;
if (*priv_offset + (args->num_devices * sizeof(*device_privs)) > max_priv_data_size)
return -EINVAL;
device_buckets = kmalloc_array(args->num_devices, sizeof(*device_buckets), GFP_KERNEL);
if (!device_buckets)
return -ENOMEM;
ret = copy_from_user(device_buckets, (void __user *)args->devices,
args->num_devices * sizeof(*device_buckets));
if (ret) {
pr_err("Failed to copy devices buckets from user\n");
ret = -EFAULT;
goto exit;
}
for (i = 0; i < args->num_devices; i++) {
struct kfd_node *dev;
struct kfd_process_device *pdd;
struct file *drm_file;
/* device private data is not currently used */
if (!device_buckets[i].user_gpu_id) {
pr_err("Invalid user gpu_id\n");
ret = -EINVAL;
goto exit;
}
dev = kfd_device_by_id(device_buckets[i].actual_gpu_id);
if (!dev) {
pr_err("Failed to find device with gpu_id = %x\n",
device_buckets[i].actual_gpu_id);
ret = -EINVAL;
goto exit;
}
pdd = kfd_get_process_device_data(dev, p);
if (!pdd) {
pr_err("Failed to get pdd for gpu_id = %x\n",
device_buckets[i].actual_gpu_id);
ret = -EINVAL;
goto exit;
}
pdd->user_gpu_id = device_buckets[i].user_gpu_id;
drm_file = fget(device_buckets[i].drm_fd);
if (!drm_file) {
pr_err("Invalid render node file descriptor sent from plugin (%d)\n",
device_buckets[i].drm_fd);
ret = -EINVAL;
goto exit;
}
if (pdd->drm_file) {
ret = -EINVAL;
goto exit;
}
/* create the vm using render nodes for kfd pdd */
if (kfd_process_device_init_vm(pdd, drm_file)) {
pr_err("could not init vm for given pdd\n");
/* On success, the PDD keeps the drm_file reference */
fput(drm_file);
ret = -EINVAL;
goto exit;
}
/*
* pdd now already has the vm bound to render node so below api won't create a new
* exclusive kfd mapping but use existing one with renderDXXX but is still needed
* for iommu v2 binding and runtime pm.
*/
pdd = kfd_bind_process_to_device(dev, p);
if (IS_ERR(pdd)) {
ret = PTR_ERR(pdd);
goto exit;
}
if (!pdd->qpd.proc_doorbells) {
ret = kfd_alloc_process_doorbells(dev->kfd, pdd);
if (ret)
goto exit;
}
}
/*
* We are not copying device private data from user as we are not using the data for now,
* but we still adjust for its private data.
*/
*priv_offset += args->num_devices * sizeof(*device_privs);
exit:
kfree(device_buckets);
return ret;
}
static int criu_restore_memory_of_gpu(struct kfd_process_device *pdd,
struct kfd_criu_bo_bucket *bo_bucket,
struct kfd_criu_bo_priv_data *bo_priv,
struct kgd_mem **kgd_mem)
{
int idr_handle;
int ret;
const bool criu_resume = true;
u64 offset;
if (bo_bucket->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_DOORBELL) {
if (bo_bucket->size !=
kfd_doorbell_process_slice(pdd->dev->kfd))
return -EINVAL;
offset = kfd_get_process_doorbells(pdd);
if (!offset)
return -ENOMEM;
} else if (bo_bucket->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_MMIO_REMAP) {
/* MMIO BOs need remapped bus address */
if (bo_bucket->size != PAGE_SIZE) {
pr_err("Invalid page size\n");
return -EINVAL;
}
offset = pdd->dev->adev->rmmio_remap.bus_addr;
if (!offset) {
pr_err("amdgpu_amdkfd_get_mmio_remap_phys_addr failed\n");
return -ENOMEM;
}
} else if (bo_bucket->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_USERPTR) {
offset = bo_priv->user_addr;
}
/* Create the BO */
ret = amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu(pdd->dev->adev, bo_bucket->addr,
bo_bucket->size, pdd->drm_priv, kgd_mem,
&offset, bo_bucket->alloc_flags, criu_resume);
if (ret) {
pr_err("Could not create the BO\n");
return ret;
}
pr_debug("New BO created: size:0x%llx addr:0x%llx offset:0x%llx\n",
bo_bucket->size, bo_bucket->addr, offset);
/* Restore previous IDR handle */
pr_debug("Restoring old IDR handle for the BO");
idr_handle = idr_alloc(&pdd->alloc_idr, *kgd_mem, bo_priv->idr_handle,
bo_priv->idr_handle + 1, GFP_KERNEL);
if (idr_handle < 0) {
pr_err("Could not allocate idr\n");
amdgpu_amdkfd_gpuvm_free_memory_of_gpu(pdd->dev->adev, *kgd_mem, pdd->drm_priv,
NULL);
return -ENOMEM;
}
if (bo_bucket->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_DOORBELL)
bo_bucket->restored_offset = KFD_MMAP_TYPE_DOORBELL | KFD_MMAP_GPU_ID(pdd->dev->id);
if (bo_bucket->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_MMIO_REMAP) {
bo_bucket->restored_offset = KFD_MMAP_TYPE_MMIO | KFD_MMAP_GPU_ID(pdd->dev->id);
} else if (bo_bucket->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_GTT) {
bo_bucket->restored_offset = offset;
} else if (bo_bucket->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_VRAM) {
bo_bucket->restored_offset = offset;
/* Update the VRAM usage count */
WRITE_ONCE(pdd->vram_usage, pdd->vram_usage + bo_bucket->size);
}
return 0;
}
static int criu_restore_bo(struct kfd_process *p,
struct kfd_criu_bo_bucket *bo_bucket,
struct kfd_criu_bo_priv_data *bo_priv)
{
struct kfd_process_device *pdd;
struct kgd_mem *kgd_mem;
int ret;
int j;
pr_debug("Restoring BO size:0x%llx addr:0x%llx gpu_id:0x%x flags:0x%x idr_handle:0x%x\n",
bo_bucket->size, bo_bucket->addr, bo_bucket->gpu_id, bo_bucket->alloc_flags,
bo_priv->idr_handle);
pdd = kfd_process_device_data_by_id(p, bo_bucket->gpu_id);
if (!pdd) {
pr_err("Failed to get pdd\n");
return -ENODEV;
}
ret = criu_restore_memory_of_gpu(pdd, bo_bucket, bo_priv, &kgd_mem);
if (ret)
return ret;
/* now map these BOs to GPU/s */
for (j = 0; j < p->n_pdds; j++) {
struct kfd_node *peer;
struct kfd_process_device *peer_pdd;
if (!bo_priv->mapped_gpuids[j])
break;
peer_pdd = kfd_process_device_data_by_id(p, bo_priv->mapped_gpuids[j]);
if (!peer_pdd)
return -EINVAL;
peer = peer_pdd->dev;
peer_pdd = kfd_bind_process_to_device(peer, p);
if (IS_ERR(peer_pdd))
return PTR_ERR(peer_pdd);
ret = amdgpu_amdkfd_gpuvm_map_memory_to_gpu(peer->adev, kgd_mem,
peer_pdd->drm_priv);
if (ret) {
pr_err("Failed to map to gpu %d/%d\n", j, p->n_pdds);
return ret;
}
}
pr_debug("map memory was successful for the BO\n");
/* create the dmabuf object and export the bo */
if (bo_bucket->alloc_flags
& (KFD_IOC_ALLOC_MEM_FLAGS_VRAM | KFD_IOC_ALLOC_MEM_FLAGS_GTT)) {
ret = criu_get_prime_handle(kgd_mem, DRM_RDWR,
&bo_bucket->dmabuf_fd);
if (ret)
return ret;
} else {
bo_bucket->dmabuf_fd = KFD_INVALID_FD;
}
return 0;
}
static int criu_restore_bos(struct kfd_process *p,
struct kfd_ioctl_criu_args *args,
uint64_t *priv_offset,
uint64_t max_priv_data_size)
{
struct kfd_criu_bo_bucket *bo_buckets = NULL;
struct kfd_criu_bo_priv_data *bo_privs = NULL;
int ret = 0;
uint32_t i = 0;
if (*priv_offset + (args->num_bos * sizeof(*bo_privs)) > max_priv_data_size)
return -EINVAL;
/* Prevent MMU notifications until stage-4 IOCTL (CRIU_RESUME) is received */
amdgpu_amdkfd_block_mmu_notifications(p->kgd_process_info);
bo_buckets = kvmalloc_array(args->num_bos, sizeof(*bo_buckets), GFP_KERNEL);
if (!bo_buckets)
return -ENOMEM;
ret = copy_from_user(bo_buckets, (void __user *)args->bos,
args->num_bos * sizeof(*bo_buckets));
if (ret) {
pr_err("Failed to copy BOs information from user\n");
ret = -EFAULT;
goto exit;
}
bo_privs = kvmalloc_array(args->num_bos, sizeof(*bo_privs), GFP_KERNEL);
if (!bo_privs) {
ret = -ENOMEM;
goto exit;
}
ret = copy_from_user(bo_privs, (void __user *)args->priv_data + *priv_offset,
args->num_bos * sizeof(*bo_privs));
if (ret) {
pr_err("Failed to copy BOs information from user\n");
ret = -EFAULT;
goto exit;
}
*priv_offset += args->num_bos * sizeof(*bo_privs);
/* Create and map new BOs */
for (; i < args->num_bos; i++) {
ret = criu_restore_bo(p, &bo_buckets[i], &bo_privs[i]);
if (ret) {
pr_debug("Failed to restore BO[%d] ret%d\n", i, ret);
goto exit;
}
} /* done */
/* Copy only the buckets back so user can read bo_buckets[N].restored_offset */
ret = copy_to_user((void __user *)args->bos,
bo_buckets,
(args->num_bos * sizeof(*bo_buckets)));
if (ret)
ret = -EFAULT;
exit:
while (ret && i--) {
if (bo_buckets[i].alloc_flags
& (KFD_IOC_ALLOC_MEM_FLAGS_VRAM | KFD_IOC_ALLOC_MEM_FLAGS_GTT))
close_fd(bo_buckets[i].dmabuf_fd);
}
kvfree(bo_buckets);
kvfree(bo_privs);
return ret;
}
static int criu_restore_objects(struct file *filep,
struct kfd_process *p,
struct kfd_ioctl_criu_args *args,
uint64_t *priv_offset,
uint64_t max_priv_data_size)
{
int ret = 0;
uint32_t i;
BUILD_BUG_ON(offsetof(struct kfd_criu_queue_priv_data, object_type));
BUILD_BUG_ON(offsetof(struct kfd_criu_event_priv_data, object_type));
BUILD_BUG_ON(offsetof(struct kfd_criu_svm_range_priv_data, object_type));
for (i = 0; i < args->num_objects; i++) {
uint32_t object_type;
if (*priv_offset + sizeof(object_type) > max_priv_data_size) {
pr_err("Invalid private data size\n");
return -EINVAL;
}
ret = get_user(object_type, (uint32_t __user *)(args->priv_data + *priv_offset));
if (ret) {
pr_err("Failed to copy private information from user\n");
goto exit;
}
switch (object_type) {
case KFD_CRIU_OBJECT_TYPE_QUEUE:
ret = kfd_criu_restore_queue(p, (uint8_t __user *)args->priv_data,
priv_offset, max_priv_data_size);
if (ret)
goto exit;
break;
case KFD_CRIU_OBJECT_TYPE_EVENT:
ret = kfd_criu_restore_event(filep, p, (uint8_t __user *)args->priv_data,
priv_offset, max_priv_data_size);
if (ret)
goto exit;
break;
case KFD_CRIU_OBJECT_TYPE_SVM_RANGE:
ret = kfd_criu_restore_svm(p, (uint8_t __user *)args->priv_data,
priv_offset, max_priv_data_size);
if (ret)
goto exit;
break;
default:
pr_err("Invalid object type:%u at index:%d\n", object_type, i);
ret = -EINVAL;
goto exit;
}
}
exit:
return ret;
}
static int criu_restore(struct file *filep,
struct kfd_process *p,
struct kfd_ioctl_criu_args *args)
{
uint64_t priv_offset = 0;
int ret = 0;
pr_debug("CRIU restore (num_devices:%u num_bos:%u num_objects:%u priv_data_size:%llu)\n",
args->num_devices, args->num_bos, args->num_objects, args->priv_data_size);
if (!args->bos || !args->devices || !args->priv_data || !args->priv_data_size ||
!args->num_devices || !args->num_bos)
return -EINVAL;
mutex_lock(&p->mutex);
/*
* Set the process to evicted state to avoid running any new queues before all the memory
* mappings are ready.
*/
ret = kfd_process_evict_queues(p, KFD_QUEUE_EVICTION_CRIU_RESTORE);
if (ret)
goto exit_unlock;
/* Each function will adjust priv_offset based on how many bytes they consumed */
ret = criu_restore_process(p, args, &priv_offset, args->priv_data_size);
if (ret)
goto exit_unlock;
ret = criu_restore_devices(p, args, &priv_offset, args->priv_data_size);
if (ret)
goto exit_unlock;
ret = criu_restore_bos(p, args, &priv_offset, args->priv_data_size);
if (ret)
goto exit_unlock;
ret = criu_restore_objects(filep, p, args, &priv_offset, args->priv_data_size);
if (ret)
goto exit_unlock;
if (priv_offset != args->priv_data_size) {
pr_err("Invalid private data size\n");
ret = -EINVAL;
}
exit_unlock:
mutex_unlock(&p->mutex);
if (ret)
pr_err("Failed to restore CRIU ret:%d\n", ret);
else
pr_debug("CRIU restore successful\n");
return ret;
}
static int criu_unpause(struct file *filep,
struct kfd_process *p,
struct kfd_ioctl_criu_args *args)
{
int ret;
mutex_lock(&p->mutex);
if (!p->queues_paused) {
mutex_unlock(&p->mutex);
return -EINVAL;
}
ret = kfd_process_restore_queues(p);
if (ret)
pr_err("Failed to unpause queues ret:%d\n", ret);
else
p->queues_paused = false;
mutex_unlock(&p->mutex);
return ret;
}
static int criu_resume(struct file *filep,
struct kfd_process *p,
struct kfd_ioctl_criu_args *args)
{
struct kfd_process *target = NULL;
struct pid *pid = NULL;
int ret = 0;
pr_debug("Inside %s, target pid for criu restore: %d\n", __func__,
args->pid);
pid = find_get_pid(args->pid);
if (!pid) {
pr_err("Cannot find pid info for %i\n", args->pid);
return -ESRCH;
}
pr_debug("calling kfd_lookup_process_by_pid\n");
target = kfd_lookup_process_by_pid(pid);
put_pid(pid);
if (!target) {
pr_debug("Cannot find process info for %i\n", args->pid);
return -ESRCH;
}
mutex_lock(&target->mutex);
ret = kfd_criu_resume_svm(target);
if (ret) {
pr_err("kfd_criu_resume_svm failed for %i\n", args->pid);
goto exit;
}
ret = amdgpu_amdkfd_criu_resume(target->kgd_process_info);
if (ret)
pr_err("amdgpu_amdkfd_criu_resume failed for %i\n", args->pid);
exit:
mutex_unlock(&target->mutex);
kfd_unref_process(target);
return ret;
}
static int criu_process_info(struct file *filep,
struct kfd_process *p,
struct kfd_ioctl_criu_args *args)
{
int ret = 0;
mutex_lock(&p->mutex);
if (!p->n_pdds) {
pr_err("No pdd for given process\n");
ret = -ENODEV;
goto err_unlock;
}
ret = kfd_process_evict_queues(p, KFD_QUEUE_EVICTION_CRIU_CHECKPOINT);
if (ret)
goto err_unlock;
p->queues_paused = true;
args->pid = task_pid_nr_ns(p->lead_thread,
task_active_pid_ns(p->lead_thread));
ret = criu_get_process_object_info(p, &args->num_devices, &args->num_bos,
&args->num_objects, &args->priv_data_size);
if (ret)
goto err_unlock;
dev_dbg(kfd_device, "Num of devices:%u bos:%u objects:%u priv_data_size:%lld\n",
args->num_devices, args->num_bos, args->num_objects,
args->priv_data_size);
err_unlock:
if (ret) {
kfd_process_restore_queues(p);
p->queues_paused = false;
}
mutex_unlock(&p->mutex);
return ret;
}
static int kfd_ioctl_criu(struct file *filep, struct kfd_process *p, void *data)
{
struct kfd_ioctl_criu_args *args = data;
int ret;
dev_dbg(kfd_device, "CRIU operation: %d\n", args->op);
switch (args->op) {
case KFD_CRIU_OP_PROCESS_INFO:
ret = criu_process_info(filep, p, args);
break;
case KFD_CRIU_OP_CHECKPOINT:
ret = criu_checkpoint(filep, p, args);
break;
case KFD_CRIU_OP_UNPAUSE:
ret = criu_unpause(filep, p, args);
break;
case KFD_CRIU_OP_RESTORE:
ret = criu_restore(filep, p, args);
break;
case KFD_CRIU_OP_RESUME:
ret = criu_resume(filep, p, args);
break;
default:
dev_dbg(kfd_device, "Unsupported CRIU operation:%d\n", args->op);
ret = -EINVAL;
break;
}
if (ret)
dev_dbg(kfd_device, "CRIU operation:%d err:%d\n", args->op, ret);
return ret;
}
static int runtime_enable(struct kfd_process *p, uint64_t r_debug,
bool enable_ttmp_setup)
{
int i = 0, ret = 0;
if (p->is_runtime_retry)
goto retry;
if (p->runtime_info.runtime_state != DEBUG_RUNTIME_STATE_DISABLED)
return -EBUSY;
for (i = 0; i < p->n_pdds; i++) {
struct kfd_process_device *pdd = p->pdds[i];
if (pdd->qpd.queue_count)
return -EEXIST;
/*
* Setup TTMPs by default.
* Note that this call must remain here for MES ADD QUEUE to
* skip_process_ctx_clear unconditionally as the first call to
* SET_SHADER_DEBUGGER clears any stale process context data
* saved in MES.
*/
if (pdd->dev->kfd->shared_resources.enable_mes)
kfd_dbg_set_mes_debug_mode(pdd, !kfd_dbg_has_cwsr_workaround(pdd->dev));
}
p->runtime_info.runtime_state = DEBUG_RUNTIME_STATE_ENABLED;
p->runtime_info.r_debug = r_debug;
p->runtime_info.ttmp_setup = enable_ttmp_setup;
if (p->runtime_info.ttmp_setup) {
for (i = 0; i < p->n_pdds; i++) {
struct kfd_process_device *pdd = p->pdds[i];
if (!kfd_dbg_is_rlc_restore_supported(pdd->dev)) {
amdgpu_gfx_off_ctrl(pdd->dev->adev, false);
pdd->dev->kfd2kgd->enable_debug_trap(
pdd->dev->adev,
true,
pdd->dev->vm_info.last_vmid_kfd);
} else if (kfd_dbg_is_per_vmid_supported(pdd->dev)) {
pdd->spi_dbg_override = pdd->dev->kfd2kgd->enable_debug_trap(
pdd->dev->adev,
false,
0);
}
}
}
retry:
if (p->debug_trap_enabled) {
if (!p->is_runtime_retry) {
kfd_dbg_trap_activate(p);
kfd_dbg_ev_raise(KFD_EC_MASK(EC_PROCESS_RUNTIME),
p, NULL, 0, false, NULL, 0);
}
mutex_unlock(&p->mutex);
ret = down_interruptible(&p->runtime_enable_sema);
mutex_lock(&p->mutex);
p->is_runtime_retry = !!ret;
}
return ret;
}
static int runtime_disable(struct kfd_process *p)
{
int i = 0, ret;
bool was_enabled = p->runtime_info.runtime_state == DEBUG_RUNTIME_STATE_ENABLED;
p->runtime_info.runtime_state = DEBUG_RUNTIME_STATE_DISABLED;
p->runtime_info.r_debug = 0;
if (p->debug_trap_enabled) {
if (was_enabled)
kfd_dbg_trap_deactivate(p, false, 0);
if (!p->is_runtime_retry)
kfd_dbg_ev_raise(KFD_EC_MASK(EC_PROCESS_RUNTIME),
p, NULL, 0, false, NULL, 0);
mutex_unlock(&p->mutex);
ret = down_interruptible(&p->runtime_enable_sema);
mutex_lock(&p->mutex);
p->is_runtime_retry = !!ret;
if (ret)
return ret;
}
if (was_enabled && p->runtime_info.ttmp_setup) {
for (i = 0; i < p->n_pdds; i++) {
struct kfd_process_device *pdd = p->pdds[i];
if (!kfd_dbg_is_rlc_restore_supported(pdd->dev))
amdgpu_gfx_off_ctrl(pdd->dev->adev, true);
}
}
p->runtime_info.ttmp_setup = false;
/* disable ttmp setup */
for (i = 0; i < p->n_pdds; i++) {
struct kfd_process_device *pdd = p->pdds[i];
if (kfd_dbg_is_per_vmid_supported(pdd->dev)) {
pdd->spi_dbg_override =
pdd->dev->kfd2kgd->disable_debug_trap(
pdd->dev->adev,
false,
pdd->dev->vm_info.last_vmid_kfd);
if (!pdd->dev->kfd->shared_resources.enable_mes)
debug_refresh_runlist(pdd->dev->dqm);
else
kfd_dbg_set_mes_debug_mode(pdd,
!kfd_dbg_has_cwsr_workaround(pdd->dev));
}
}
return 0;
}
static int kfd_ioctl_runtime_enable(struct file *filep, struct kfd_process *p, void *data)
{
struct kfd_ioctl_runtime_enable_args *args = data;
int r;
mutex_lock(&p->mutex);
if (args->mode_mask & KFD_RUNTIME_ENABLE_MODE_ENABLE_MASK)
r = runtime_enable(p, args->r_debug,
!!(args->mode_mask & KFD_RUNTIME_ENABLE_MODE_TTMP_SAVE_MASK));
else
r = runtime_disable(p);
mutex_unlock(&p->mutex);
return r;
}
static int kfd_ioctl_set_debug_trap(struct file *filep, struct kfd_process *p, void *data)
{
struct kfd_ioctl_dbg_trap_args *args = data;
struct task_struct *thread = NULL;
struct mm_struct *mm = NULL;
struct pid *pid = NULL;
struct kfd_process *target = NULL;
struct kfd_process_device *pdd = NULL;
int r = 0;
if (sched_policy == KFD_SCHED_POLICY_NO_HWS) {
pr_err("Debugging does not support sched_policy %i", sched_policy);
return -EINVAL;
}
pid = find_get_pid(args->pid);
if (!pid) {
pr_debug("Cannot find pid info for %i\n", args->pid);
r = -ESRCH;
goto out;
}
thread = get_pid_task(pid, PIDTYPE_PID);
if (!thread) {
r = -ESRCH;
goto out;
}
mm = get_task_mm(thread);
if (!mm) {
r = -ESRCH;
goto out;
}
if (args->op == KFD_IOC_DBG_TRAP_ENABLE) {
bool create_process;
rcu_read_lock();
create_process = thread && thread != current && ptrace_parent(thread) == current;
rcu_read_unlock();
target = create_process ? kfd_create_process(thread) :
kfd_lookup_process_by_pid(pid);
} else {
target = kfd_lookup_process_by_pid(pid);
}
if (IS_ERR_OR_NULL(target)) {
pr_debug("Cannot find process PID %i to debug\n", args->pid);
r = target ? PTR_ERR(target) : -ESRCH;
goto out;
}
/* Check if target is still PTRACED. */
rcu_read_lock();
if (target != p && args->op != KFD_IOC_DBG_TRAP_DISABLE
&& ptrace_parent(target->lead_thread) != current) {
pr_err("PID %i is not PTRACED and cannot be debugged\n", args->pid);
r = -EPERM;
}
rcu_read_unlock();
if (r)
goto out;
mutex_lock(&target->mutex);
if (args->op != KFD_IOC_DBG_TRAP_ENABLE && !target->debug_trap_enabled) {
pr_err("PID %i not debug enabled for op %i\n", args->pid, args->op);
r = -EINVAL;
goto unlock_out;
}
if (target->runtime_info.runtime_state != DEBUG_RUNTIME_STATE_ENABLED &&
(args->op == KFD_IOC_DBG_TRAP_SET_WAVE_LAUNCH_OVERRIDE ||
args->op == KFD_IOC_DBG_TRAP_SET_WAVE_LAUNCH_MODE ||
args->op == KFD_IOC_DBG_TRAP_SUSPEND_QUEUES ||
args->op == KFD_IOC_DBG_TRAP_RESUME_QUEUES ||
args->op == KFD_IOC_DBG_TRAP_SET_NODE_ADDRESS_WATCH ||
args->op == KFD_IOC_DBG_TRAP_CLEAR_NODE_ADDRESS_WATCH ||
args->op == KFD_IOC_DBG_TRAP_SET_FLAGS)) {
r = -EPERM;
goto unlock_out;
}
if (args->op == KFD_IOC_DBG_TRAP_SET_NODE_ADDRESS_WATCH ||
args->op == KFD_IOC_DBG_TRAP_CLEAR_NODE_ADDRESS_WATCH) {
int user_gpu_id = kfd_process_get_user_gpu_id(target,
args->op == KFD_IOC_DBG_TRAP_SET_NODE_ADDRESS_WATCH ?
args->set_node_address_watch.gpu_id :
args->clear_node_address_watch.gpu_id);
pdd = kfd_process_device_data_by_id(target, user_gpu_id);
if (user_gpu_id == -EINVAL || !pdd) {
r = -ENODEV;
goto unlock_out;
}
}
switch (args->op) {
case KFD_IOC_DBG_TRAP_ENABLE:
if (target != p)
target->debugger_process = p;
r = kfd_dbg_trap_enable(target,
args->enable.dbg_fd,
(void __user *)args->enable.rinfo_ptr,
&args->enable.rinfo_size);
if (!r)
target->exception_enable_mask = args->enable.exception_mask;
break;
case KFD_IOC_DBG_TRAP_DISABLE:
r = kfd_dbg_trap_disable(target);
break;
case KFD_IOC_DBG_TRAP_SEND_RUNTIME_EVENT:
r = kfd_dbg_send_exception_to_runtime(target,
args->send_runtime_event.gpu_id,
args->send_runtime_event.queue_id,
args->send_runtime_event.exception_mask);
break;
case KFD_IOC_DBG_TRAP_SET_EXCEPTIONS_ENABLED:
kfd_dbg_set_enabled_debug_exception_mask(target,
args->set_exceptions_enabled.exception_mask);
break;
case KFD_IOC_DBG_TRAP_SET_WAVE_LAUNCH_OVERRIDE:
r = kfd_dbg_trap_set_wave_launch_override(target,
args->launch_override.override_mode,
args->launch_override.enable_mask,
args->launch_override.support_request_mask,
&args->launch_override.enable_mask,
&args->launch_override.support_request_mask);
break;
case KFD_IOC_DBG_TRAP_SET_WAVE_LAUNCH_MODE:
r = kfd_dbg_trap_set_wave_launch_mode(target,
args->launch_mode.launch_mode);
break;
case KFD_IOC_DBG_TRAP_SUSPEND_QUEUES:
r = suspend_queues(target,
args->suspend_queues.num_queues,
args->suspend_queues.grace_period,
args->suspend_queues.exception_mask,
(uint32_t *)args->suspend_queues.queue_array_ptr);
break;
case KFD_IOC_DBG_TRAP_RESUME_QUEUES:
r = resume_queues(target, args->resume_queues.num_queues,
(uint32_t *)args->resume_queues.queue_array_ptr);
break;
case KFD_IOC_DBG_TRAP_SET_NODE_ADDRESS_WATCH:
r = kfd_dbg_trap_set_dev_address_watch(pdd,
args->set_node_address_watch.address,
args->set_node_address_watch.mask,
&args->set_node_address_watch.id,
args->set_node_address_watch.mode);
break;
case KFD_IOC_DBG_TRAP_CLEAR_NODE_ADDRESS_WATCH:
r = kfd_dbg_trap_clear_dev_address_watch(pdd,
args->clear_node_address_watch.id);
break;
case KFD_IOC_DBG_TRAP_SET_FLAGS:
r = kfd_dbg_trap_set_flags(target, &args->set_flags.flags);
break;
case KFD_IOC_DBG_TRAP_QUERY_DEBUG_EVENT:
r = kfd_dbg_ev_query_debug_event(target,
&args->query_debug_event.queue_id,
&args->query_debug_event.gpu_id,
args->query_debug_event.exception_mask,
&args->query_debug_event.exception_mask);
break;
case KFD_IOC_DBG_TRAP_QUERY_EXCEPTION_INFO:
r = kfd_dbg_trap_query_exception_info(target,
args->query_exception_info.source_id,
args->query_exception_info.exception_code,
args->query_exception_info.clear_exception,
(void __user *)args->query_exception_info.info_ptr,
&args->query_exception_info.info_size);
break;
case KFD_IOC_DBG_TRAP_GET_QUEUE_SNAPSHOT:
r = pqm_get_queue_snapshot(&target->pqm,
args->queue_snapshot.exception_mask,
(void __user *)args->queue_snapshot.snapshot_buf_ptr,
&args->queue_snapshot.num_queues,
&args->queue_snapshot.entry_size);
break;
case KFD_IOC_DBG_TRAP_GET_DEVICE_SNAPSHOT:
r = kfd_dbg_trap_device_snapshot(target,
args->device_snapshot.exception_mask,
(void __user *)args->device_snapshot.snapshot_buf_ptr,
&args->device_snapshot.num_devices,
&args->device_snapshot.entry_size);
break;
default:
pr_err("Invalid option: %i\n", args->op);
r = -EINVAL;
}
unlock_out:
mutex_unlock(&target->mutex);
out:
if (thread)
put_task_struct(thread);
if (mm)
mmput(mm);
if (pid)
put_pid(pid);
if (target)
kfd_unref_process(target);
return r;
}
#define AMDKFD_IOCTL_DEF(ioctl, _func, _flags) \
[_IOC_NR(ioctl)] = {.cmd = ioctl, .func = _func, .flags = _flags, \
.cmd_drv = 0, .name = #ioctl}
/** Ioctl table */
static const struct amdkfd_ioctl_desc amdkfd_ioctls[] = {
AMDKFD_IOCTL_DEF(AMDKFD_IOC_GET_VERSION,
kfd_ioctl_get_version, 0),
AMDKFD_IOCTL_DEF(AMDKFD_IOC_CREATE_QUEUE,
kfd_ioctl_create_queue, 0),
AMDKFD_IOCTL_DEF(AMDKFD_IOC_DESTROY_QUEUE,
kfd_ioctl_destroy_queue, 0),
AMDKFD_IOCTL_DEF(AMDKFD_IOC_SET_MEMORY_POLICY,
kfd_ioctl_set_memory_policy, 0),
AMDKFD_IOCTL_DEF(AMDKFD_IOC_GET_CLOCK_COUNTERS,
kfd_ioctl_get_clock_counters, 0),
AMDKFD_IOCTL_DEF(AMDKFD_IOC_GET_PROCESS_APERTURES,
kfd_ioctl_get_process_apertures, 0),
AMDKFD_IOCTL_DEF(AMDKFD_IOC_UPDATE_QUEUE,
kfd_ioctl_update_queue, 0),
AMDKFD_IOCTL_DEF(AMDKFD_IOC_CREATE_EVENT,
kfd_ioctl_create_event, 0),
AMDKFD_IOCTL_DEF(AMDKFD_IOC_DESTROY_EVENT,
kfd_ioctl_destroy_event, 0),
AMDKFD_IOCTL_DEF(AMDKFD_IOC_SET_EVENT,
kfd_ioctl_set_event, 0),
AMDKFD_IOCTL_DEF(AMDKFD_IOC_RESET_EVENT,
kfd_ioctl_reset_event, 0),
AMDKFD_IOCTL_DEF(AMDKFD_IOC_WAIT_EVENTS,
kfd_ioctl_wait_events, 0),
AMDKFD_IOCTL_DEF(AMDKFD_IOC_DBG_REGISTER_DEPRECATED,
kfd_ioctl_dbg_register, 0),
AMDKFD_IOCTL_DEF(AMDKFD_IOC_DBG_UNREGISTER_DEPRECATED,
kfd_ioctl_dbg_unregister, 0),
AMDKFD_IOCTL_DEF(AMDKFD_IOC_DBG_ADDRESS_WATCH_DEPRECATED,
kfd_ioctl_dbg_address_watch, 0),
AMDKFD_IOCTL_DEF(AMDKFD_IOC_DBG_WAVE_CONTROL_DEPRECATED,
kfd_ioctl_dbg_wave_control, 0),
AMDKFD_IOCTL_DEF(AMDKFD_IOC_SET_SCRATCH_BACKING_VA,
kfd_ioctl_set_scratch_backing_va, 0),
AMDKFD_IOCTL_DEF(AMDKFD_IOC_GET_TILE_CONFIG,
kfd_ioctl_get_tile_config, 0),
AMDKFD_IOCTL_DEF(AMDKFD_IOC_SET_TRAP_HANDLER,
kfd_ioctl_set_trap_handler, 0),
AMDKFD_IOCTL_DEF(AMDKFD_IOC_GET_PROCESS_APERTURES_NEW,
kfd_ioctl_get_process_apertures_new, 0),
AMDKFD_IOCTL_DEF(AMDKFD_IOC_ACQUIRE_VM,
kfd_ioctl_acquire_vm, 0),
AMDKFD_IOCTL_DEF(AMDKFD_IOC_ALLOC_MEMORY_OF_GPU,
kfd_ioctl_alloc_memory_of_gpu, 0),
AMDKFD_IOCTL_DEF(AMDKFD_IOC_FREE_MEMORY_OF_GPU,
kfd_ioctl_free_memory_of_gpu, 0),
AMDKFD_IOCTL_DEF(AMDKFD_IOC_MAP_MEMORY_TO_GPU,
kfd_ioctl_map_memory_to_gpu, 0),
AMDKFD_IOCTL_DEF(AMDKFD_IOC_UNMAP_MEMORY_FROM_GPU,
kfd_ioctl_unmap_memory_from_gpu, 0),
AMDKFD_IOCTL_DEF(AMDKFD_IOC_SET_CU_MASK,
kfd_ioctl_set_cu_mask, 0),
AMDKFD_IOCTL_DEF(AMDKFD_IOC_GET_QUEUE_WAVE_STATE,
kfd_ioctl_get_queue_wave_state, 0),
AMDKFD_IOCTL_DEF(AMDKFD_IOC_GET_DMABUF_INFO,
kfd_ioctl_get_dmabuf_info, 0),
AMDKFD_IOCTL_DEF(AMDKFD_IOC_IMPORT_DMABUF,
kfd_ioctl_import_dmabuf, 0),
AMDKFD_IOCTL_DEF(AMDKFD_IOC_ALLOC_QUEUE_GWS,
kfd_ioctl_alloc_queue_gws, 0),
AMDKFD_IOCTL_DEF(AMDKFD_IOC_SMI_EVENTS,
kfd_ioctl_smi_events, 0),
AMDKFD_IOCTL_DEF(AMDKFD_IOC_SVM, kfd_ioctl_svm, 0),
AMDKFD_IOCTL_DEF(AMDKFD_IOC_SET_XNACK_MODE,
kfd_ioctl_set_xnack_mode, 0),
AMDKFD_IOCTL_DEF(AMDKFD_IOC_CRIU_OP,
kfd_ioctl_criu, KFD_IOC_FLAG_CHECKPOINT_RESTORE),
AMDKFD_IOCTL_DEF(AMDKFD_IOC_AVAILABLE_MEMORY,
kfd_ioctl_get_available_memory, 0),
AMDKFD_IOCTL_DEF(AMDKFD_IOC_EXPORT_DMABUF,
kfd_ioctl_export_dmabuf, 0),
AMDKFD_IOCTL_DEF(AMDKFD_IOC_RUNTIME_ENABLE,
kfd_ioctl_runtime_enable, 0),
AMDKFD_IOCTL_DEF(AMDKFD_IOC_DBG_TRAP,
kfd_ioctl_set_debug_trap, 0),
};
#define AMDKFD_CORE_IOCTL_COUNT ARRAY_SIZE(amdkfd_ioctls)
static long kfd_ioctl(struct file *filep, unsigned int cmd, unsigned long arg)
{
struct kfd_process *process;
amdkfd_ioctl_t *func;
const struct amdkfd_ioctl_desc *ioctl = NULL;
unsigned int nr = _IOC_NR(cmd);
char stack_kdata[128];
char *kdata = NULL;
unsigned int usize, asize;
int retcode = -EINVAL;
bool ptrace_attached = false;
if (nr >= AMDKFD_CORE_IOCTL_COUNT)
goto err_i1;
if ((nr >= AMDKFD_COMMAND_START) && (nr < AMDKFD_COMMAND_END)) {
u32 amdkfd_size;
ioctl = &amdkfd_ioctls[nr];
amdkfd_size = _IOC_SIZE(ioctl->cmd);
usize = asize = _IOC_SIZE(cmd);
if (amdkfd_size > asize)
asize = amdkfd_size;
cmd = ioctl->cmd;
} else
goto err_i1;
dev_dbg(kfd_device, "ioctl cmd 0x%x (#0x%x), arg 0x%lx\n", cmd, nr, arg);
/* Get the process struct from the filep. Only the process
* that opened /dev/kfd can use the file descriptor. Child
* processes need to create their own KFD device context.
*/
process = filep->private_data;
rcu_read_lock();
if ((ioctl->flags & KFD_IOC_FLAG_CHECKPOINT_RESTORE) &&
ptrace_parent(process->lead_thread) == current)
ptrace_attached = true;
rcu_read_unlock();
if (process->lead_thread != current->group_leader
&& !ptrace_attached) {
dev_dbg(kfd_device, "Using KFD FD in wrong process\n");
retcode = -EBADF;
goto err_i1;
}
/* Do not trust userspace, use our own definition */
func = ioctl->func;
if (unlikely(!func)) {
dev_dbg(kfd_device, "no function\n");
retcode = -EINVAL;
goto err_i1;
}
/*
* Versions of docker shipped in Ubuntu 18.xx and 20.xx do not support
* CAP_CHECKPOINT_RESTORE, so we also allow access if CAP_SYS_ADMIN as CAP_SYS_ADMIN is a
* more priviledged access.
*/
if (unlikely(ioctl->flags & KFD_IOC_FLAG_CHECKPOINT_RESTORE)) {
if (!capable(CAP_CHECKPOINT_RESTORE) &&
!capable(CAP_SYS_ADMIN)) {
retcode = -EACCES;
goto err_i1;
}
}
if (cmd & (IOC_IN | IOC_OUT)) {
if (asize <= sizeof(stack_kdata)) {
kdata = stack_kdata;
} else {
kdata = kmalloc(asize, GFP_KERNEL);
if (!kdata) {
retcode = -ENOMEM;
goto err_i1;
}
}
if (asize > usize)
memset(kdata + usize, 0, asize - usize);
}
if (cmd & IOC_IN) {
if (copy_from_user(kdata, (void __user *)arg, usize) != 0) {
retcode = -EFAULT;
goto err_i1;
}
} else if (cmd & IOC_OUT) {
memset(kdata, 0, usize);
}
retcode = func(filep, process, kdata);
if (cmd & IOC_OUT)
if (copy_to_user((void __user *)arg, kdata, usize) != 0)
retcode = -EFAULT;
err_i1:
if (!ioctl)
dev_dbg(kfd_device, "invalid ioctl: pid=%d, cmd=0x%02x, nr=0x%02x\n",
task_pid_nr(current), cmd, nr);
if (kdata != stack_kdata)
kfree(kdata);
if (retcode)
dev_dbg(kfd_device, "ioctl cmd (#0x%x), arg 0x%lx, ret = %d\n",
nr, arg, retcode);
return retcode;
}
static int kfd_mmio_mmap(struct kfd_node *dev, struct kfd_process *process,
struct vm_area_struct *vma)
{
phys_addr_t address;
if (vma->vm_end - vma->vm_start != PAGE_SIZE)
return -EINVAL;
address = dev->adev->rmmio_remap.bus_addr;
vm_flags_set(vma, VM_IO | VM_DONTCOPY | VM_DONTEXPAND | VM_NORESERVE |
VM_DONTDUMP | VM_PFNMAP);
vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
pr_debug("pasid 0x%x mapping mmio page\n"
" target user address == 0x%08llX\n"
" physical address == 0x%08llX\n"
" vm_flags == 0x%04lX\n"
" size == 0x%04lX\n",
process->pasid, (unsigned long long) vma->vm_start,
address, vma->vm_flags, PAGE_SIZE);
return io_remap_pfn_range(vma,
vma->vm_start,
address >> PAGE_SHIFT,
PAGE_SIZE,
vma->vm_page_prot);
}
static int kfd_mmap(struct file *filp, struct vm_area_struct *vma)
{
struct kfd_process *process;
struct kfd_node *dev = NULL;
unsigned long mmap_offset;
unsigned int gpu_id;
process = kfd_get_process(current);
if (IS_ERR(process))
return PTR_ERR(process);
mmap_offset = vma->vm_pgoff << PAGE_SHIFT;
gpu_id = KFD_MMAP_GET_GPU_ID(mmap_offset);
if (gpu_id)
dev = kfd_device_by_id(gpu_id);
switch (mmap_offset & KFD_MMAP_TYPE_MASK) {
case KFD_MMAP_TYPE_DOORBELL:
if (!dev)
return -ENODEV;
return kfd_doorbell_mmap(dev, process, vma);
case KFD_MMAP_TYPE_EVENTS:
return kfd_event_mmap(process, vma);
case KFD_MMAP_TYPE_RESERVED_MEM:
if (!dev)
return -ENODEV;
return kfd_reserved_mem_mmap(dev, process, vma);
case KFD_MMAP_TYPE_MMIO:
if (!dev)
return -ENODEV;
return kfd_mmio_mmap(dev, process, vma);
}
return -EFAULT;
}
| linux-master | drivers/gpu/drm/amd/amdkfd/kfd_chardev.c |
// SPDX-License-Identifier: GPL-2.0 OR MIT
/*
* Copyright 2016-2022 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
#include <linux/debugfs.h>
#include <linux/uaccess.h>
#include "kfd_priv.h"
static struct dentry *debugfs_root;
static int kfd_debugfs_open(struct inode *inode, struct file *file)
{
int (*show)(struct seq_file *, void *) = inode->i_private;
return single_open(file, show, NULL);
}
static int kfd_debugfs_hang_hws_read(struct seq_file *m, void *data)
{
seq_puts(m, "echo gpu_id > hang_hws\n");
return 0;
}
static ssize_t kfd_debugfs_hang_hws_write(struct file *file,
const char __user *user_buf, size_t size, loff_t *ppos)
{
struct kfd_node *dev;
char tmp[16];
uint32_t gpu_id;
int ret = -EINVAL;
memset(tmp, 0, 16);
if (size >= 16) {
pr_err("Invalid input for gpu id.\n");
goto out;
}
if (copy_from_user(tmp, user_buf, size)) {
ret = -EFAULT;
goto out;
}
if (kstrtoint(tmp, 10, &gpu_id)) {
pr_err("Invalid input for gpu id.\n");
goto out;
}
dev = kfd_device_by_id(gpu_id);
if (dev) {
kfd_debugfs_hang_hws(dev);
ret = size;
} else
pr_err("Cannot find device %d.\n", gpu_id);
out:
return ret;
}
static const struct file_operations kfd_debugfs_fops = {
.owner = THIS_MODULE,
.open = kfd_debugfs_open,
.read = seq_read,
.llseek = seq_lseek,
.release = single_release,
};
static const struct file_operations kfd_debugfs_hang_hws_fops = {
.owner = THIS_MODULE,
.open = kfd_debugfs_open,
.read = seq_read,
.write = kfd_debugfs_hang_hws_write,
.llseek = seq_lseek,
.release = single_release,
};
void kfd_debugfs_init(void)
{
debugfs_root = debugfs_create_dir("kfd", NULL);
debugfs_create_file("mqds", S_IFREG | 0444, debugfs_root,
kfd_debugfs_mqds_by_process, &kfd_debugfs_fops);
debugfs_create_file("hqds", S_IFREG | 0444, debugfs_root,
kfd_debugfs_hqds_by_device, &kfd_debugfs_fops);
debugfs_create_file("rls", S_IFREG | 0444, debugfs_root,
kfd_debugfs_rls_by_device, &kfd_debugfs_fops);
debugfs_create_file("hang_hws", S_IFREG | 0200, debugfs_root,
kfd_debugfs_hang_hws_read, &kfd_debugfs_hang_hws_fops);
debugfs_create_file("mem_limit", S_IFREG | 0200, debugfs_root,
kfd_debugfs_kfd_mem_limits, &kfd_debugfs_fops);
}
void kfd_debugfs_fini(void)
{
debugfs_remove_recursive(debugfs_root);
}
| linux-master | drivers/gpu/drm/amd/amdkfd/kfd_debugfs.c |
/*
* Copyright 2021 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
*/
#include "kfd_device_queue_manager.h"
#include "gc/gc_11_0_0_offset.h"
#include "gc/gc_11_0_0_sh_mask.h"
#include "soc21_enum.h"
static int update_qpd_v11(struct device_queue_manager *dqm,
struct qcm_process_device *qpd);
static void init_sdma_vm_v11(struct device_queue_manager *dqm, struct queue *q,
struct qcm_process_device *qpd);
void device_queue_manager_init_v11(
struct device_queue_manager_asic_ops *asic_ops)
{
asic_ops->update_qpd = update_qpd_v11;
asic_ops->init_sdma_vm = init_sdma_vm_v11;
asic_ops->mqd_manager_init = mqd_manager_init_v11;
}
static uint32_t compute_sh_mem_bases_64bit(struct kfd_process_device *pdd)
{
uint32_t shared_base = pdd->lds_base >> 48;
uint32_t private_base = pdd->scratch_base >> 48;
return (shared_base << SH_MEM_BASES__SHARED_BASE__SHIFT) |
private_base;
}
static int update_qpd_v11(struct device_queue_manager *dqm,
struct qcm_process_device *qpd)
{
struct kfd_process_device *pdd;
pdd = qpd_to_pdd(qpd);
/* check if sh_mem_config register already configured */
if (qpd->sh_mem_config == 0) {
qpd->sh_mem_config =
(SH_MEM_ALIGNMENT_MODE_UNALIGNED <<
SH_MEM_CONFIG__ALIGNMENT_MODE__SHIFT) |
(3 << SH_MEM_CONFIG__INITIAL_INST_PREFETCH__SHIFT);
qpd->sh_mem_ape1_limit = 0;
qpd->sh_mem_ape1_base = 0;
}
qpd->sh_mem_bases = compute_sh_mem_bases_64bit(pdd);
pr_debug("sh_mem_bases 0x%X\n", qpd->sh_mem_bases);
return 0;
}
static void init_sdma_vm_v11(struct device_queue_manager *dqm, struct queue *q,
struct qcm_process_device *qpd)
{
/* Not needed on SDMAv4 onwards any more */
q->properties.sdma_vm_addr = 0;
}
| linux-master | drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_v11.c |
// SPDX-License-Identifier: GPL-2.0 OR MIT
/*
* Copyright 2014-2022 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
*/
#include <linux/device.h>
#include <linux/export.h>
#include <linux/err.h>
#include <linux/fs.h>
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/uaccess.h>
#include <linux/compat.h>
#include <uapi/linux/kfd_ioctl.h>
#include <linux/time.h>
#include "kfd_priv.h"
#include <linux/mm.h>
#include <linux/mman.h>
#include <linux/processor.h>
/*
* The primary memory I/O features being added for revisions of gfxip
* beyond 7.0 (Kaveri) are:
*
* Access to ATC/IOMMU mapped memory w/ associated extension of VA to 48b
*
* “Flat” shader memory access – These are new shader vector memory
* operations that do not reference a T#/V# so a “pointer” is what is
* sourced from the vector gprs for direct access to memory.
* This pointer space has the Shared(LDS) and Private(Scratch) memory
* mapped into this pointer space as apertures.
* The hardware then determines how to direct the memory request
* based on what apertures the request falls in.
*
* Unaligned support and alignment check
*
*
* System Unified Address - SUA
*
* The standard usage for GPU virtual addresses are that they are mapped by
* a set of page tables we call GPUVM and these page tables are managed by
* a combination of vidMM/driver software components. The current virtual
* address (VA) range for GPUVM is 40b.
*
* As of gfxip7.1 and beyond we’re adding the ability for compute memory
* clients (CP/RLC, DMA, SHADER(ifetch, scalar, and vector ops)) to access
* the same page tables used by host x86 processors and that are managed by
* the operating system. This is via a technique and hardware called ATC/IOMMU.
* The GPU has the capability of accessing both the GPUVM and ATC address
* spaces for a given VMID (process) simultaneously and we call this feature
* system unified address (SUA).
*
* There are three fundamental address modes of operation for a given VMID
* (process) on the GPU:
*
* HSA64 – 64b pointers and the default address space is ATC
* HSA32 – 32b pointers and the default address space is ATC
* GPUVM – 64b pointers and the default address space is GPUVM (driver
* model mode)
*
*
* HSA64 - ATC/IOMMU 64b
*
* A 64b pointer in the AMD64/IA64 CPU architecture is not fully utilized
* by the CPU so an AMD CPU can only access the high area
* (VA[63:47] == 0x1FFFF) and low area (VA[63:47 == 0) of the address space
* so the actual VA carried to translation is 48b. There is a “hole” in
* the middle of the 64b VA space.
*
* The GPU not only has access to all of the CPU accessible address space via
* ATC/IOMMU, but it also has access to the GPUVM address space. The “system
* unified address” feature (SUA) is the mapping of GPUVM and ATC address
* spaces into a unified pointer space. The method we take for 64b mode is
* to map the full 40b GPUVM address space into the hole of the 64b address
* space.
* The GPUVM_Base/GPUVM_Limit defines the aperture in the 64b space where we
* direct requests to be translated via GPUVM page tables instead of the
* IOMMU path.
*
*
* 64b to 49b Address conversion
*
* Note that there are still significant portions of unused regions (holes)
* in the 64b address space even for the GPU. There are several places in
* the pipeline (sw and hw), we wish to compress the 64b virtual address
* to a 49b address. This 49b address is constituted of an “ATC” bit
* plus a 48b virtual address. This 49b address is what is passed to the
* translation hardware. ATC==0 means the 48b address is a GPUVM address
* (max of 2^40 – 1) intended to be translated via GPUVM page tables.
* ATC==1 means the 48b address is intended to be translated via IOMMU
* page tables.
*
* A 64b pointer is compared to the apertures that are defined (Base/Limit), in
* this case the GPUVM aperture (red) is defined and if a pointer falls in this
* aperture, we subtract the GPUVM_Base address and set the ATC bit to zero
* as part of the 64b to 49b conversion.
*
* Where this 64b to 49b conversion is done is a function of the usage.
* Most GPU memory access is via memory objects where the driver builds
* a descriptor which consists of a base address and a memory access by
* the GPU usually consists of some kind of an offset or Cartesian coordinate
* that references this memory descriptor. This is the case for shader
* instructions that reference the T# or V# constants, or for specified
* locations of assets (ex. the shader program location). In these cases
* the driver is what handles the 64b to 49b conversion and the base
* address in the descriptor (ex. V# or T# or shader program location)
* is defined as a 48b address w/ an ATC bit. For this usage a given
* memory object cannot straddle multiple apertures in the 64b address
* space. For example a shader program cannot jump in/out between ATC
* and GPUVM space.
*
* In some cases we wish to pass a 64b pointer to the GPU hardware and
* the GPU hw does the 64b to 49b conversion before passing memory
* requests to the cache/memory system. This is the case for the
* S_LOAD and FLAT_* shader memory instructions where we have 64b pointers
* in scalar and vector GPRs respectively.
*
* In all cases (no matter where the 64b -> 49b conversion is done), the gfxip
* hardware sends a 48b address along w/ an ATC bit, to the memory controller
* on the memory request interfaces.
*
* <client>_MC_rdreq_atc // read request ATC bit
*
* 0 : <client>_MC_rdreq_addr is a GPUVM VA
*
* 1 : <client>_MC_rdreq_addr is a ATC VA
*
*
* “Spare” aperture (APE1)
*
* We use the GPUVM aperture to differentiate ATC vs. GPUVM, but we also use
* apertures to set the Mtype field for S_LOAD/FLAT_* ops which is input to the
* config tables for setting cache policies. The “spare” (APE1) aperture is
* motivated by getting a different Mtype from the default.
* The default aperture isn’t an actual base/limit aperture; it is just the
* address space that doesn’t hit any defined base/limit apertures.
* The following diagram is a complete picture of the gfxip7.x SUA apertures.
* The APE1 can be placed either below or above
* the hole (cannot be in the hole).
*
*
* General Aperture definitions and rules
*
* An aperture register definition consists of a Base, Limit, Mtype, and
* usually an ATC bit indicating which translation tables that aperture uses.
* In all cases (for SUA and DUA apertures discussed later), aperture base
* and limit definitions are 64KB aligned.
*
* <ape>_Base[63:0] = { <ape>_Base_register[63:16], 0x0000 }
*
* <ape>_Limit[63:0] = { <ape>_Limit_register[63:16], 0xFFFF }
*
* The base and limit are considered inclusive to an aperture so being
* inside an aperture means (address >= Base) AND (address <= Limit).
*
* In no case is a payload that straddles multiple apertures expected to work.
* For example a load_dword_x4 that starts in one aperture and ends in another,
* does not work. For the vector FLAT_* ops we have detection capability in
* the shader for reporting a “memory violation” back to the
* SQ block for use in traps.
* A memory violation results when an op falls into the hole,
* or a payload straddles multiple apertures. The S_LOAD instruction
* does not have this detection.
*
* Apertures cannot overlap.
*
*
*
* HSA32 - ATC/IOMMU 32b
*
* For HSA32 mode, the pointers are interpreted as 32 bits and use a single GPR
* instead of two for the S_LOAD and FLAT_* ops. The entire GPUVM space of 40b
* will not fit so there is only partial visibility to the GPUVM
* space (defined by the aperture) for S_LOAD and FLAT_* ops.
* There is no spare (APE1) aperture for HSA32 mode.
*
*
* GPUVM 64b mode (driver model)
*
* This mode is related to HSA64 in that the difference really is that
* the default aperture is GPUVM (ATC==0) and not ATC space.
* We have gfxip7.x hardware that has FLAT_* and S_LOAD support for
* SUA GPUVM mode, but does not support HSA32/HSA64.
*
*
* Device Unified Address - DUA
*
* Device unified address (DUA) is the name of the feature that maps the
* Shared(LDS) memory and Private(Scratch) memory into the overall address
* space for use by the new FLAT_* vector memory ops. The Shared and
* Private memories are mapped as apertures into the address space,
* and the hardware detects when a FLAT_* memory request is to be redirected
* to the LDS or Scratch memory when it falls into one of these apertures.
* Like the SUA apertures, the Shared/Private apertures are 64KB aligned and
* the base/limit is “in” the aperture. For both HSA64 and GPUVM SUA modes,
* the Shared/Private apertures are always placed in a limited selection of
* options in the hole of the 64b address space. For HSA32 mode, the
* Shared/Private apertures can be placed anywhere in the 32b space
* except at 0.
*
*
* HSA64 Apertures for FLAT_* vector ops
*
* For HSA64 SUA mode, the Shared and Private apertures are always placed
* in the hole w/ a limited selection of possible locations. The requests
* that fall in the private aperture are expanded as a function of the
* work-item id (tid) and redirected to the location of the
* “hidden private memory”. The hidden private can be placed in either GPUVM
* or ATC space. The addresses that fall in the shared aperture are
* re-directed to the on-chip LDS memory hardware.
*
*
* HSA32 Apertures for FLAT_* vector ops
*
* In HSA32 mode, the Private and Shared apertures can be placed anywhere
* in the 32b space except at 0 (Private or Shared Base at zero disables
* the apertures). If the base address of the apertures are non-zero
* (ie apertures exists), the size is always 64KB.
*
*
* GPUVM Apertures for FLAT_* vector ops
*
* In GPUVM mode, the Shared/Private apertures are specified identically
* to HSA64 mode where they are always in the hole at a limited selection
* of locations.
*
*
* Aperture Definitions for SUA and DUA
*
* The interpretation of the aperture register definitions for a given
* VMID is a function of the “SUA Mode” which is one of HSA64, HSA32, or
* GPUVM64 discussed in previous sections. The mode is first decoded, and
* then the remaining register decode is a function of the mode.
*
*
* SUA Mode Decode
*
* For the S_LOAD and FLAT_* shader operations, the SUA mode is decoded from
* the COMPUTE_DISPATCH_INITIATOR:DATA_ATC bit and
* the SH_MEM_CONFIG:PTR32 bits.
*
* COMPUTE_DISPATCH_INITIATOR:DATA_ATC SH_MEM_CONFIG:PTR32 Mode
*
* 1 0 HSA64
*
* 1 1 HSA32
*
* 0 X GPUVM64
*
* In general the hardware will ignore the PTR32 bit and treat
* as “0” whenever DATA_ATC = “0”, but sw should set PTR32=0
* when DATA_ATC=0.
*
* The DATA_ATC bit is only set for compute dispatches.
* All “Draw” dispatches are hardcoded to GPUVM64 mode
* for FLAT_* / S_LOAD operations.
*/
#define MAKE_GPUVM_APP_BASE_VI(gpu_num) \
(((uint64_t)(gpu_num) << 61) + 0x1000000000000L)
#define MAKE_GPUVM_APP_LIMIT(base, size) \
(((uint64_t)(base) & 0xFFFFFF0000000000UL) + (size) - 1)
#define MAKE_SCRATCH_APP_BASE_VI() \
(((uint64_t)(0x1UL) << 61) + 0x100000000L)
#define MAKE_SCRATCH_APP_LIMIT(base) \
(((uint64_t)base & 0xFFFFFFFF00000000UL) | 0xFFFFFFFF)
#define MAKE_LDS_APP_BASE_VI() \
(((uint64_t)(0x1UL) << 61) + 0x0)
#define MAKE_LDS_APP_LIMIT(base) \
(((uint64_t)(base) & 0xFFFFFFFF00000000UL) | 0xFFFFFFFF)
/* On GFXv9 the LDS and scratch apertures are programmed independently
* using the high 16 bits of the 64-bit virtual address. They must be
* in the hole, which will be the case as long as the high 16 bits are
* not 0.
*
* The aperture sizes are still 4GB implicitly.
*
* A GPUVM aperture is not applicable on GFXv9.
*/
#define MAKE_LDS_APP_BASE_V9() ((uint64_t)(0x1UL) << 48)
#define MAKE_SCRATCH_APP_BASE_V9() ((uint64_t)(0x2UL) << 48)
/* User mode manages most of the SVM aperture address space. The low
* 16MB are reserved for kernel use (CWSR trap handler and kernel IB
* for now).
*/
#define SVM_USER_BASE (u64)(KFD_CWSR_TBA_TMA_SIZE + 2*PAGE_SIZE)
#define SVM_CWSR_BASE (SVM_USER_BASE - KFD_CWSR_TBA_TMA_SIZE)
#define SVM_IB_BASE (SVM_CWSR_BASE - PAGE_SIZE)
static void kfd_init_apertures_vi(struct kfd_process_device *pdd, uint8_t id)
{
/*
* node id couldn't be 0 - the three MSB bits of
* aperture shouldn't be 0
*/
pdd->lds_base = MAKE_LDS_APP_BASE_VI();
pdd->lds_limit = MAKE_LDS_APP_LIMIT(pdd->lds_base);
/* dGPUs: SVM aperture starting at 0
* with small reserved space for kernel.
* Set them to CANONICAL addresses.
*/
pdd->gpuvm_base = SVM_USER_BASE;
pdd->gpuvm_limit =
pdd->dev->kfd->shared_resources.gpuvm_size - 1;
/* dGPUs: the reserved space for kernel
* before SVM
*/
pdd->qpd.cwsr_base = SVM_CWSR_BASE;
pdd->qpd.ib_base = SVM_IB_BASE;
pdd->scratch_base = MAKE_SCRATCH_APP_BASE_VI();
pdd->scratch_limit = MAKE_SCRATCH_APP_LIMIT(pdd->scratch_base);
}
static void kfd_init_apertures_v9(struct kfd_process_device *pdd, uint8_t id)
{
pdd->lds_base = MAKE_LDS_APP_BASE_V9();
pdd->lds_limit = MAKE_LDS_APP_LIMIT(pdd->lds_base);
pdd->gpuvm_base = PAGE_SIZE;
pdd->gpuvm_limit =
pdd->dev->kfd->shared_resources.gpuvm_size - 1;
pdd->scratch_base = MAKE_SCRATCH_APP_BASE_V9();
pdd->scratch_limit = MAKE_SCRATCH_APP_LIMIT(pdd->scratch_base);
/*
* Place TBA/TMA on opposite side of VM hole to prevent
* stray faults from triggering SVM on these pages.
*/
pdd->qpd.cwsr_base = pdd->dev->kfd->shared_resources.gpuvm_size;
}
int kfd_init_apertures(struct kfd_process *process)
{
uint8_t id = 0;
struct kfd_node *dev;
struct kfd_process_device *pdd;
/*Iterating over all devices*/
while (kfd_topology_enum_kfd_devices(id, &dev) == 0) {
if (!dev || kfd_devcgroup_check_permission(dev)) {
/* Skip non GPU devices and devices to which the
* current process have no access to. Access can be
* limited by placing the process in a specific
* cgroup hierarchy
*/
id++;
continue;
}
pdd = kfd_create_process_device_data(dev, process);
if (!pdd) {
pr_err("Failed to create process device data\n");
return -ENOMEM;
}
/*
* For 64 bit process apertures will be statically reserved in
* the x86_64 non canonical process address space
* amdkfd doesn't currently support apertures for 32 bit process
*/
if (process->is_32bit_user_mode) {
pdd->lds_base = pdd->lds_limit = 0;
pdd->gpuvm_base = pdd->gpuvm_limit = 0;
pdd->scratch_base = pdd->scratch_limit = 0;
} else {
switch (dev->adev->asic_type) {
case CHIP_KAVERI:
case CHIP_HAWAII:
case CHIP_CARRIZO:
case CHIP_TONGA:
case CHIP_FIJI:
case CHIP_POLARIS10:
case CHIP_POLARIS11:
case CHIP_POLARIS12:
case CHIP_VEGAM:
kfd_init_apertures_vi(pdd, id);
break;
default:
if (KFD_GC_VERSION(dev) >= IP_VERSION(9, 0, 1))
kfd_init_apertures_v9(pdd, id);
else {
WARN(1, "Unexpected ASIC family %u",
dev->adev->asic_type);
return -EINVAL;
}
}
}
dev_dbg(kfd_device, "node id %u\n", id);
dev_dbg(kfd_device, "gpu id %u\n", pdd->dev->id);
dev_dbg(kfd_device, "lds_base %llX\n", pdd->lds_base);
dev_dbg(kfd_device, "lds_limit %llX\n", pdd->lds_limit);
dev_dbg(kfd_device, "gpuvm_base %llX\n", pdd->gpuvm_base);
dev_dbg(kfd_device, "gpuvm_limit %llX\n", pdd->gpuvm_limit);
dev_dbg(kfd_device, "scratch_base %llX\n", pdd->scratch_base);
dev_dbg(kfd_device, "scratch_limit %llX\n", pdd->scratch_limit);
id++;
}
return 0;
}
| linux-master | drivers/gpu/drm/amd/amdkfd/kfd_flat_memory.c |
// SPDX-License-Identifier: GPL-2.0 OR MIT
/*
* Copyright 2014-2022 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
*/
#include "kfd_mqd_manager.h"
#include "amdgpu_amdkfd.h"
#include "kfd_device_queue_manager.h"
/* Mapping queue priority to pipe priority, indexed by queue priority */
int pipe_priority_map[] = {
KFD_PIPE_PRIORITY_CS_LOW,
KFD_PIPE_PRIORITY_CS_LOW,
KFD_PIPE_PRIORITY_CS_LOW,
KFD_PIPE_PRIORITY_CS_LOW,
KFD_PIPE_PRIORITY_CS_LOW,
KFD_PIPE_PRIORITY_CS_LOW,
KFD_PIPE_PRIORITY_CS_LOW,
KFD_PIPE_PRIORITY_CS_MEDIUM,
KFD_PIPE_PRIORITY_CS_MEDIUM,
KFD_PIPE_PRIORITY_CS_MEDIUM,
KFD_PIPE_PRIORITY_CS_MEDIUM,
KFD_PIPE_PRIORITY_CS_HIGH,
KFD_PIPE_PRIORITY_CS_HIGH,
KFD_PIPE_PRIORITY_CS_HIGH,
KFD_PIPE_PRIORITY_CS_HIGH,
KFD_PIPE_PRIORITY_CS_HIGH
};
struct kfd_mem_obj *allocate_hiq_mqd(struct kfd_node *dev, struct queue_properties *q)
{
struct kfd_mem_obj *mqd_mem_obj;
mqd_mem_obj = kzalloc(sizeof(struct kfd_mem_obj), GFP_KERNEL);
if (!mqd_mem_obj)
return NULL;
mqd_mem_obj->gtt_mem = dev->dqm->hiq_sdma_mqd.gtt_mem;
mqd_mem_obj->gpu_addr = dev->dqm->hiq_sdma_mqd.gpu_addr;
mqd_mem_obj->cpu_ptr = dev->dqm->hiq_sdma_mqd.cpu_ptr;
return mqd_mem_obj;
}
struct kfd_mem_obj *allocate_sdma_mqd(struct kfd_node *dev,
struct queue_properties *q)
{
struct kfd_mem_obj *mqd_mem_obj;
uint64_t offset;
mqd_mem_obj = kzalloc(sizeof(struct kfd_mem_obj), GFP_KERNEL);
if (!mqd_mem_obj)
return NULL;
offset = (q->sdma_engine_id *
dev->kfd->device_info.num_sdma_queues_per_engine +
q->sdma_queue_id) *
dev->dqm->mqd_mgrs[KFD_MQD_TYPE_SDMA]->mqd_size;
offset += dev->dqm->mqd_mgrs[KFD_MQD_TYPE_HIQ]->mqd_size *
NUM_XCC(dev->xcc_mask);
mqd_mem_obj->gtt_mem = (void *)((uint64_t)dev->dqm->hiq_sdma_mqd.gtt_mem
+ offset);
mqd_mem_obj->gpu_addr = dev->dqm->hiq_sdma_mqd.gpu_addr + offset;
mqd_mem_obj->cpu_ptr = (uint32_t *)((uint64_t)
dev->dqm->hiq_sdma_mqd.cpu_ptr + offset);
return mqd_mem_obj;
}
void free_mqd_hiq_sdma(struct mqd_manager *mm, void *mqd,
struct kfd_mem_obj *mqd_mem_obj)
{
WARN_ON(!mqd_mem_obj->gtt_mem);
kfree(mqd_mem_obj);
}
void mqd_symmetrically_map_cu_mask(struct mqd_manager *mm,
const uint32_t *cu_mask, uint32_t cu_mask_count,
uint32_t *se_mask, uint32_t inst)
{
struct kfd_cu_info cu_info;
uint32_t cu_per_sh[KFD_MAX_NUM_SE][KFD_MAX_NUM_SH_PER_SE] = {0};
bool wgp_mode_req = KFD_GC_VERSION(mm->dev) >= IP_VERSION(10, 0, 0);
uint32_t en_mask = wgp_mode_req ? 0x3 : 0x1;
int i, se, sh, cu, cu_bitmap_sh_mul, cu_inc = wgp_mode_req ? 2 : 1;
uint32_t cu_active_per_node;
int inc = cu_inc * NUM_XCC(mm->dev->xcc_mask);
int xcc_inst = inst + ffs(mm->dev->xcc_mask) - 1;
amdgpu_amdkfd_get_cu_info(mm->dev->adev, &cu_info);
cu_active_per_node = cu_info.cu_active_number / mm->dev->kfd->num_nodes;
if (cu_mask_count > cu_active_per_node)
cu_mask_count = cu_active_per_node;
/* Exceeding these bounds corrupts the stack and indicates a coding error.
* Returning with no CU's enabled will hang the queue, which should be
* attention grabbing.
*/
if (cu_info.num_shader_engines > KFD_MAX_NUM_SE) {
pr_err("Exceeded KFD_MAX_NUM_SE, chip reports %d\n", cu_info.num_shader_engines);
return;
}
if (cu_info.num_shader_arrays_per_engine > KFD_MAX_NUM_SH_PER_SE) {
pr_err("Exceeded KFD_MAX_NUM_SH, chip reports %d\n",
cu_info.num_shader_arrays_per_engine * cu_info.num_shader_engines);
return;
}
cu_bitmap_sh_mul = (KFD_GC_VERSION(mm->dev) >= IP_VERSION(11, 0, 0) &&
KFD_GC_VERSION(mm->dev) < IP_VERSION(12, 0, 0)) ? 2 : 1;
/* Count active CUs per SH.
*
* Some CUs in an SH may be disabled. HW expects disabled CUs to be
* represented in the high bits of each SH's enable mask (the upper and lower
* 16 bits of se_mask) and will take care of the actual distribution of
* disabled CUs within each SH automatically.
* Each half of se_mask must be filled only on bits 0-cu_per_sh[se][sh]-1.
*
* See note on Arcturus cu_bitmap layout in gfx_v9_0_get_cu_info.
* See note on GFX11 cu_bitmap layout in gfx_v11_0_get_cu_info.
*/
for (se = 0; se < cu_info.num_shader_engines; se++)
for (sh = 0; sh < cu_info.num_shader_arrays_per_engine; sh++)
cu_per_sh[se][sh] = hweight32(
cu_info.cu_bitmap[xcc_inst][se % 4][sh + (se / 4) *
cu_bitmap_sh_mul]);
/* Symmetrically map cu_mask to all SEs & SHs:
* se_mask programs up to 2 SH in the upper and lower 16 bits.
*
* Examples
* Assuming 1 SH/SE, 4 SEs:
* cu_mask[0] bit0 -> se_mask[0] bit0
* cu_mask[0] bit1 -> se_mask[1] bit0
* ...
* cu_mask[0] bit4 -> se_mask[0] bit1
* ...
*
* Assuming 2 SH/SE, 4 SEs
* cu_mask[0] bit0 -> se_mask[0] bit0 (SE0,SH0,CU0)
* cu_mask[0] bit1 -> se_mask[1] bit0 (SE1,SH0,CU0)
* ...
* cu_mask[0] bit4 -> se_mask[0] bit16 (SE0,SH1,CU0)
* cu_mask[0] bit5 -> se_mask[1] bit16 (SE1,SH1,CU0)
* ...
* cu_mask[0] bit8 -> se_mask[0] bit1 (SE0,SH0,CU1)
* ...
*
* For GFX 9.4.3, the following code only looks at a
* subset of the cu_mask corresponding to the inst parameter.
* If we have n XCCs under one GPU node
* cu_mask[0] bit0 -> XCC0 se_mask[0] bit0 (XCC0,SE0,SH0,CU0)
* cu_mask[0] bit1 -> XCC1 se_mask[0] bit0 (XCC1,SE0,SH0,CU0)
* ..
* cu_mask[0] bitn -> XCCn se_mask[0] bit0 (XCCn,SE0,SH0,CU0)
* cu_mask[0] bit n+1 -> XCC0 se_mask[1] bit0 (XCC0,SE1,SH0,CU0)
*
* For example, if there are 6 XCCs under 1 KFD node, this code
* running for each inst, will look at the bits as:
* inst, inst + 6, inst + 12...
*
* First ensure all CUs are disabled, then enable user specified CUs.
*/
for (i = 0; i < cu_info.num_shader_engines; i++)
se_mask[i] = 0;
i = inst;
for (cu = 0; cu < 16; cu += cu_inc) {
for (sh = 0; sh < cu_info.num_shader_arrays_per_engine; sh++) {
for (se = 0; se < cu_info.num_shader_engines; se++) {
if (cu_per_sh[se][sh] > cu) {
if (cu_mask[i / 32] & (en_mask << (i % 32)))
se_mask[se] |= en_mask << (cu + sh * 16);
i += inc;
if (i >= cu_mask_count)
return;
}
}
}
}
}
int kfd_hiq_load_mqd_kiq(struct mqd_manager *mm, void *mqd,
uint32_t pipe_id, uint32_t queue_id,
struct queue_properties *p, struct mm_struct *mms)
{
return mm->dev->kfd2kgd->hiq_mqd_load(mm->dev->adev, mqd, pipe_id,
queue_id, p->doorbell_off, 0);
}
int kfd_destroy_mqd_cp(struct mqd_manager *mm, void *mqd,
enum kfd_preempt_type type, unsigned int timeout,
uint32_t pipe_id, uint32_t queue_id)
{
return mm->dev->kfd2kgd->hqd_destroy(mm->dev->adev, mqd, type, timeout,
pipe_id, queue_id, 0);
}
void kfd_free_mqd_cp(struct mqd_manager *mm, void *mqd,
struct kfd_mem_obj *mqd_mem_obj)
{
if (mqd_mem_obj->gtt_mem) {
amdgpu_amdkfd_free_gtt_mem(mm->dev->adev, mqd_mem_obj->gtt_mem);
kfree(mqd_mem_obj);
} else {
kfd_gtt_sa_free(mm->dev, mqd_mem_obj);
}
}
bool kfd_is_occupied_cp(struct mqd_manager *mm, void *mqd,
uint64_t queue_address, uint32_t pipe_id,
uint32_t queue_id)
{
return mm->dev->kfd2kgd->hqd_is_occupied(mm->dev->adev, queue_address,
pipe_id, queue_id, 0);
}
int kfd_load_mqd_sdma(struct mqd_manager *mm, void *mqd,
uint32_t pipe_id, uint32_t queue_id,
struct queue_properties *p, struct mm_struct *mms)
{
return mm->dev->kfd2kgd->hqd_sdma_load(mm->dev->adev, mqd,
(uint32_t __user *)p->write_ptr,
mms);
}
/*
* preempt type here is ignored because there is only one way
* to preempt sdma queue
*/
int kfd_destroy_mqd_sdma(struct mqd_manager *mm, void *mqd,
enum kfd_preempt_type type,
unsigned int timeout, uint32_t pipe_id,
uint32_t queue_id)
{
return mm->dev->kfd2kgd->hqd_sdma_destroy(mm->dev->adev, mqd, timeout);
}
bool kfd_is_occupied_sdma(struct mqd_manager *mm, void *mqd,
uint64_t queue_address, uint32_t pipe_id,
uint32_t queue_id)
{
return mm->dev->kfd2kgd->hqd_sdma_is_occupied(mm->dev->adev, mqd);
}
uint64_t kfd_hiq_mqd_stride(struct kfd_node *dev)
{
return dev->dqm->mqd_mgrs[KFD_MQD_TYPE_HIQ]->mqd_size;
}
void kfd_get_hiq_xcc_mqd(struct kfd_node *dev, struct kfd_mem_obj *mqd_mem_obj,
uint32_t virtual_xcc_id)
{
uint64_t offset;
offset = kfd_hiq_mqd_stride(dev) * virtual_xcc_id;
mqd_mem_obj->gtt_mem = (virtual_xcc_id == 0) ?
dev->dqm->hiq_sdma_mqd.gtt_mem : NULL;
mqd_mem_obj->gpu_addr = dev->dqm->hiq_sdma_mqd.gpu_addr + offset;
mqd_mem_obj->cpu_ptr = (uint32_t *)((uintptr_t)
dev->dqm->hiq_sdma_mqd.cpu_ptr + offset);
}
uint64_t kfd_mqd_stride(struct mqd_manager *mm,
struct queue_properties *q)
{
return mm->mqd_size;
}
| linux-master | drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.c |
// SPDX-License-Identifier: GPL-2.0 OR MIT
/*
* Copyright 2014-2022 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
*/
#include <linux/printk.h>
#include <linux/slab.h>
#include <linux/mm_types.h>
#include "kfd_priv.h"
#include "kfd_mqd_manager.h"
#include "cik_regs.h"
#include "cik_structs.h"
#include "oss/oss_2_4_sh_mask.h"
static inline struct cik_mqd *get_mqd(void *mqd)
{
return (struct cik_mqd *)mqd;
}
static inline struct cik_sdma_rlc_registers *get_sdma_mqd(void *mqd)
{
return (struct cik_sdma_rlc_registers *)mqd;
}
static void update_cu_mask(struct mqd_manager *mm, void *mqd,
struct mqd_update_info *minfo)
{
struct cik_mqd *m;
uint32_t se_mask[4] = {0}; /* 4 is the max # of SEs */
if (!minfo || !minfo->cu_mask.ptr)
return;
mqd_symmetrically_map_cu_mask(mm,
minfo->cu_mask.ptr, minfo->cu_mask.count, se_mask, 0);
m = get_mqd(mqd);
m->compute_static_thread_mgmt_se0 = se_mask[0];
m->compute_static_thread_mgmt_se1 = se_mask[1];
m->compute_static_thread_mgmt_se2 = se_mask[2];
m->compute_static_thread_mgmt_se3 = se_mask[3];
pr_debug("Update cu mask to %#x %#x %#x %#x\n",
m->compute_static_thread_mgmt_se0,
m->compute_static_thread_mgmt_se1,
m->compute_static_thread_mgmt_se2,
m->compute_static_thread_mgmt_se3);
}
static void set_priority(struct cik_mqd *m, struct queue_properties *q)
{
m->cp_hqd_pipe_priority = pipe_priority_map[q->priority];
m->cp_hqd_queue_priority = q->priority;
}
static struct kfd_mem_obj *allocate_mqd(struct kfd_node *kfd,
struct queue_properties *q)
{
struct kfd_mem_obj *mqd_mem_obj;
if (kfd_gtt_sa_allocate(kfd, sizeof(struct cik_mqd),
&mqd_mem_obj))
return NULL;
return mqd_mem_obj;
}
static void init_mqd(struct mqd_manager *mm, void **mqd,
struct kfd_mem_obj *mqd_mem_obj, uint64_t *gart_addr,
struct queue_properties *q)
{
uint64_t addr;
struct cik_mqd *m;
m = (struct cik_mqd *) mqd_mem_obj->cpu_ptr;
addr = mqd_mem_obj->gpu_addr;
memset(m, 0, ALIGN(sizeof(struct cik_mqd), 256));
m->header = 0xC0310800;
m->compute_pipelinestat_enable = 1;
m->compute_static_thread_mgmt_se0 = 0xFFFFFFFF;
m->compute_static_thread_mgmt_se1 = 0xFFFFFFFF;
m->compute_static_thread_mgmt_se2 = 0xFFFFFFFF;
m->compute_static_thread_mgmt_se3 = 0xFFFFFFFF;
/*
* Make sure to use the last queue state saved on mqd when the cp
* reassigns the queue, so when queue is switched on/off (e.g over
* subscription or quantum timeout) the context will be consistent
*/
m->cp_hqd_persistent_state =
DEFAULT_CP_HQD_PERSISTENT_STATE | PRELOAD_REQ;
m->cp_mqd_control = MQD_CONTROL_PRIV_STATE_EN;
m->cp_mqd_base_addr_lo = lower_32_bits(addr);
m->cp_mqd_base_addr_hi = upper_32_bits(addr);
m->cp_hqd_quantum = QUANTUM_EN | QUANTUM_SCALE_1MS |
QUANTUM_DURATION(10);
/*
* Pipe Priority
* Identifies the pipe relative priority when this queue is connected
* to the pipeline. The pipe priority is against the GFX pipe and HP3D.
* In KFD we are using a fixed pipe priority set to CS_MEDIUM.
* 0 = CS_LOW (typically below GFX)
* 1 = CS_MEDIUM (typically between HP3D and GFX
* 2 = CS_HIGH (typically above HP3D)
*/
set_priority(m, q);
if (q->format == KFD_QUEUE_FORMAT_AQL)
m->cp_hqd_iq_rptr = AQL_ENABLE;
*mqd = m;
if (gart_addr)
*gart_addr = addr;
mm->update_mqd(mm, m, q, NULL);
}
static void init_mqd_sdma(struct mqd_manager *mm, void **mqd,
struct kfd_mem_obj *mqd_mem_obj, uint64_t *gart_addr,
struct queue_properties *q)
{
struct cik_sdma_rlc_registers *m;
m = (struct cik_sdma_rlc_registers *) mqd_mem_obj->cpu_ptr;
memset(m, 0, sizeof(struct cik_sdma_rlc_registers));
*mqd = m;
if (gart_addr)
*gart_addr = mqd_mem_obj->gpu_addr;
mm->update_mqd(mm, m, q, NULL);
}
static int load_mqd(struct mqd_manager *mm, void *mqd, uint32_t pipe_id,
uint32_t queue_id, struct queue_properties *p,
struct mm_struct *mms)
{
/* AQL write pointer counts in 64B packets, PM4/CP counts in dwords. */
uint32_t wptr_shift = (p->format == KFD_QUEUE_FORMAT_AQL ? 4 : 0);
uint32_t wptr_mask = (uint32_t)((p->queue_size / 4) - 1);
return mm->dev->kfd2kgd->hqd_load(mm->dev->adev, mqd, pipe_id, queue_id,
(uint32_t __user *)p->write_ptr,
wptr_shift, wptr_mask, mms, 0);
}
static void __update_mqd(struct mqd_manager *mm, void *mqd,
struct queue_properties *q, struct mqd_update_info *minfo,
unsigned int atc_bit)
{
struct cik_mqd *m;
m = get_mqd(mqd);
m->cp_hqd_pq_control = DEFAULT_RPTR_BLOCK_SIZE |
DEFAULT_MIN_AVAIL_SIZE;
m->cp_hqd_ib_control = DEFAULT_MIN_IB_AVAIL_SIZE;
if (atc_bit) {
m->cp_hqd_pq_control |= PQ_ATC_EN;
m->cp_hqd_ib_control |= IB_ATC_EN;
}
/*
* Calculating queue size which is log base 2 of actual queue size -1
* dwords and another -1 for ffs
*/
m->cp_hqd_pq_control |= order_base_2(q->queue_size / 4) - 1;
m->cp_hqd_pq_base_lo = lower_32_bits((uint64_t)q->queue_address >> 8);
m->cp_hqd_pq_base_hi = upper_32_bits((uint64_t)q->queue_address >> 8);
m->cp_hqd_pq_rptr_report_addr_lo = lower_32_bits((uint64_t)q->read_ptr);
m->cp_hqd_pq_rptr_report_addr_hi = upper_32_bits((uint64_t)q->read_ptr);
m->cp_hqd_pq_doorbell_control = DOORBELL_OFFSET(q->doorbell_off);
m->cp_hqd_vmid = q->vmid;
if (q->format == KFD_QUEUE_FORMAT_AQL)
m->cp_hqd_pq_control |= NO_UPDATE_RPTR;
update_cu_mask(mm, mqd, minfo);
set_priority(m, q);
q->is_active = QUEUE_IS_ACTIVE(*q);
}
static uint32_t read_doorbell_id(void *mqd)
{
struct cik_mqd *m = (struct cik_mqd *)mqd;
return m->queue_doorbell_id0;
}
static void update_mqd(struct mqd_manager *mm, void *mqd,
struct queue_properties *q,
struct mqd_update_info *minfo)
{
__update_mqd(mm, mqd, q, minfo, 0);
}
static void update_mqd_sdma(struct mqd_manager *mm, void *mqd,
struct queue_properties *q,
struct mqd_update_info *minfo)
{
struct cik_sdma_rlc_registers *m;
m = get_sdma_mqd(mqd);
m->sdma_rlc_rb_cntl = order_base_2(q->queue_size / 4)
<< SDMA0_RLC0_RB_CNTL__RB_SIZE__SHIFT |
q->vmid << SDMA0_RLC0_RB_CNTL__RB_VMID__SHIFT |
1 << SDMA0_RLC0_RB_CNTL__RPTR_WRITEBACK_ENABLE__SHIFT |
6 << SDMA0_RLC0_RB_CNTL__RPTR_WRITEBACK_TIMER__SHIFT;
m->sdma_rlc_rb_base = lower_32_bits(q->queue_address >> 8);
m->sdma_rlc_rb_base_hi = upper_32_bits(q->queue_address >> 8);
m->sdma_rlc_rb_rptr_addr_lo = lower_32_bits((uint64_t)q->read_ptr);
m->sdma_rlc_rb_rptr_addr_hi = upper_32_bits((uint64_t)q->read_ptr);
m->sdma_rlc_doorbell =
q->doorbell_off << SDMA0_RLC0_DOORBELL__OFFSET__SHIFT;
m->sdma_rlc_virtual_addr = q->sdma_vm_addr;
m->sdma_engine_id = q->sdma_engine_id;
m->sdma_queue_id = q->sdma_queue_id;
q->is_active = QUEUE_IS_ACTIVE(*q);
}
static void checkpoint_mqd(struct mqd_manager *mm, void *mqd, void *mqd_dst, void *ctl_stack_dst)
{
struct cik_mqd *m;
m = get_mqd(mqd);
memcpy(mqd_dst, m, sizeof(struct cik_mqd));
}
static void restore_mqd(struct mqd_manager *mm, void **mqd,
struct kfd_mem_obj *mqd_mem_obj, uint64_t *gart_addr,
struct queue_properties *qp,
const void *mqd_src,
const void *ctl_stack_src, const u32 ctl_stack_size)
{
uint64_t addr;
struct cik_mqd *m;
m = (struct cik_mqd *) mqd_mem_obj->cpu_ptr;
addr = mqd_mem_obj->gpu_addr;
memcpy(m, mqd_src, sizeof(*m));
*mqd = m;
if (gart_addr)
*gart_addr = addr;
m->cp_hqd_pq_doorbell_control = DOORBELL_OFFSET(qp->doorbell_off);
pr_debug("cp_hqd_pq_doorbell_control 0x%x\n",
m->cp_hqd_pq_doorbell_control);
qp->is_active = 0;
}
static void checkpoint_mqd_sdma(struct mqd_manager *mm,
void *mqd,
void *mqd_dst,
void *ctl_stack_dst)
{
struct cik_sdma_rlc_registers *m;
m = get_sdma_mqd(mqd);
memcpy(mqd_dst, m, sizeof(struct cik_sdma_rlc_registers));
}
static void restore_mqd_sdma(struct mqd_manager *mm, void **mqd,
struct kfd_mem_obj *mqd_mem_obj, uint64_t *gart_addr,
struct queue_properties *qp,
const void *mqd_src,
const void *ctl_stack_src, const u32 ctl_stack_size)
{
uint64_t addr;
struct cik_sdma_rlc_registers *m;
m = (struct cik_sdma_rlc_registers *) mqd_mem_obj->cpu_ptr;
addr = mqd_mem_obj->gpu_addr;
memcpy(m, mqd_src, sizeof(*m));
m->sdma_rlc_doorbell =
qp->doorbell_off << SDMA0_RLC0_DOORBELL__OFFSET__SHIFT;
*mqd = m;
if (gart_addr)
*gart_addr = addr;
qp->is_active = 0;
}
/*
* HIQ MQD Implementation, concrete implementation for HIQ MQD implementation.
* The HIQ queue in Kaveri is using the same MQD structure as all the user mode
* queues but with different initial values.
*/
static void init_mqd_hiq(struct mqd_manager *mm, void **mqd,
struct kfd_mem_obj *mqd_mem_obj, uint64_t *gart_addr,
struct queue_properties *q)
{
init_mqd(mm, mqd, mqd_mem_obj, gart_addr, q);
}
static void update_mqd_hiq(struct mqd_manager *mm, void *mqd,
struct queue_properties *q,
struct mqd_update_info *minfo)
{
struct cik_mqd *m;
m = get_mqd(mqd);
m->cp_hqd_pq_control = DEFAULT_RPTR_BLOCK_SIZE |
DEFAULT_MIN_AVAIL_SIZE |
PRIV_STATE |
KMD_QUEUE;
/*
* Calculating queue size which is log base 2 of actual queue
* size -1 dwords
*/
m->cp_hqd_pq_control |= order_base_2(q->queue_size / 4) - 1;
m->cp_hqd_pq_base_lo = lower_32_bits((uint64_t)q->queue_address >> 8);
m->cp_hqd_pq_base_hi = upper_32_bits((uint64_t)q->queue_address >> 8);
m->cp_hqd_pq_rptr_report_addr_lo = lower_32_bits((uint64_t)q->read_ptr);
m->cp_hqd_pq_rptr_report_addr_hi = upper_32_bits((uint64_t)q->read_ptr);
m->cp_hqd_pq_doorbell_control = DOORBELL_OFFSET(q->doorbell_off);
m->cp_hqd_vmid = q->vmid;
q->is_active = QUEUE_IS_ACTIVE(*q);
set_priority(m, q);
}
#if defined(CONFIG_DEBUG_FS)
static int debugfs_show_mqd(struct seq_file *m, void *data)
{
seq_hex_dump(m, " ", DUMP_PREFIX_OFFSET, 32, 4,
data, sizeof(struct cik_mqd), false);
return 0;
}
static int debugfs_show_mqd_sdma(struct seq_file *m, void *data)
{
seq_hex_dump(m, " ", DUMP_PREFIX_OFFSET, 32, 4,
data, sizeof(struct cik_sdma_rlc_registers), false);
return 0;
}
#endif
struct mqd_manager *mqd_manager_init_cik(enum KFD_MQD_TYPE type,
struct kfd_node *dev)
{
struct mqd_manager *mqd;
if (WARN_ON(type >= KFD_MQD_TYPE_MAX))
return NULL;
mqd = kzalloc(sizeof(*mqd), GFP_KERNEL);
if (!mqd)
return NULL;
mqd->dev = dev;
switch (type) {
case KFD_MQD_TYPE_CP:
mqd->allocate_mqd = allocate_mqd;
mqd->init_mqd = init_mqd;
mqd->free_mqd = kfd_free_mqd_cp;
mqd->load_mqd = load_mqd;
mqd->update_mqd = update_mqd;
mqd->destroy_mqd = kfd_destroy_mqd_cp;
mqd->is_occupied = kfd_is_occupied_cp;
mqd->checkpoint_mqd = checkpoint_mqd;
mqd->restore_mqd = restore_mqd;
mqd->mqd_size = sizeof(struct cik_mqd);
#if defined(CONFIG_DEBUG_FS)
mqd->debugfs_show_mqd = debugfs_show_mqd;
#endif
break;
case KFD_MQD_TYPE_HIQ:
mqd->allocate_mqd = allocate_hiq_mqd;
mqd->init_mqd = init_mqd_hiq;
mqd->free_mqd = free_mqd_hiq_sdma;
mqd->load_mqd = load_mqd;
mqd->update_mqd = update_mqd_hiq;
mqd->destroy_mqd = kfd_destroy_mqd_cp;
mqd->is_occupied = kfd_is_occupied_cp;
mqd->mqd_size = sizeof(struct cik_mqd);
mqd->mqd_stride = kfd_mqd_stride;
#if defined(CONFIG_DEBUG_FS)
mqd->debugfs_show_mqd = debugfs_show_mqd;
#endif
mqd->read_doorbell_id = read_doorbell_id;
break;
case KFD_MQD_TYPE_DIQ:
mqd->allocate_mqd = allocate_mqd;
mqd->init_mqd = init_mqd_hiq;
mqd->free_mqd = kfd_free_mqd_cp;
mqd->load_mqd = load_mqd;
mqd->update_mqd = update_mqd_hiq;
mqd->destroy_mqd = kfd_destroy_mqd_cp;
mqd->is_occupied = kfd_is_occupied_cp;
mqd->mqd_size = sizeof(struct cik_mqd);
mqd->mqd_stride = kfd_mqd_stride;
#if defined(CONFIG_DEBUG_FS)
mqd->debugfs_show_mqd = debugfs_show_mqd;
#endif
break;
case KFD_MQD_TYPE_SDMA:
mqd->allocate_mqd = allocate_sdma_mqd;
mqd->init_mqd = init_mqd_sdma;
mqd->free_mqd = free_mqd_hiq_sdma;
mqd->load_mqd = kfd_load_mqd_sdma;
mqd->update_mqd = update_mqd_sdma;
mqd->destroy_mqd = kfd_destroy_mqd_sdma;
mqd->is_occupied = kfd_is_occupied_sdma;
mqd->checkpoint_mqd = checkpoint_mqd_sdma;
mqd->restore_mqd = restore_mqd_sdma;
mqd->mqd_size = sizeof(struct cik_sdma_rlc_registers);
mqd->mqd_stride = kfd_mqd_stride;
#if defined(CONFIG_DEBUG_FS)
mqd->debugfs_show_mqd = debugfs_show_mqd_sdma;
#endif
break;
default:
kfree(mqd);
return NULL;
}
return mqd;
}
| linux-master | drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c |
// SPDX-License-Identifier: GPL-2.0 OR MIT
/*
* Copyright 2014-2022 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
*/
#include <linux/printk.h>
#include <linux/slab.h>
#include <linux/mm_types.h>
#include "kfd_priv.h"
#include "kfd_mqd_manager.h"
#include "vi_structs.h"
#include "gca/gfx_8_0_sh_mask.h"
#include "gca/gfx_8_0_enum.h"
#include "oss/oss_3_0_sh_mask.h"
#define CP_MQD_CONTROL__PRIV_STATE__SHIFT 0x8
static inline struct vi_mqd *get_mqd(void *mqd)
{
return (struct vi_mqd *)mqd;
}
static inline struct vi_sdma_mqd *get_sdma_mqd(void *mqd)
{
return (struct vi_sdma_mqd *)mqd;
}
static void update_cu_mask(struct mqd_manager *mm, void *mqd,
struct mqd_update_info *minfo)
{
struct vi_mqd *m;
uint32_t se_mask[4] = {0}; /* 4 is the max # of SEs */
if (!minfo || !minfo->cu_mask.ptr)
return;
mqd_symmetrically_map_cu_mask(mm,
minfo->cu_mask.ptr, minfo->cu_mask.count, se_mask, 0);
m = get_mqd(mqd);
m->compute_static_thread_mgmt_se0 = se_mask[0];
m->compute_static_thread_mgmt_se1 = se_mask[1];
m->compute_static_thread_mgmt_se2 = se_mask[2];
m->compute_static_thread_mgmt_se3 = se_mask[3];
pr_debug("Update cu mask to %#x %#x %#x %#x\n",
m->compute_static_thread_mgmt_se0,
m->compute_static_thread_mgmt_se1,
m->compute_static_thread_mgmt_se2,
m->compute_static_thread_mgmt_se3);
}
static void set_priority(struct vi_mqd *m, struct queue_properties *q)
{
m->cp_hqd_pipe_priority = pipe_priority_map[q->priority];
m->cp_hqd_queue_priority = q->priority;
}
static struct kfd_mem_obj *allocate_mqd(struct kfd_node *kfd,
struct queue_properties *q)
{
struct kfd_mem_obj *mqd_mem_obj;
if (kfd_gtt_sa_allocate(kfd, sizeof(struct vi_mqd),
&mqd_mem_obj))
return NULL;
return mqd_mem_obj;
}
static void init_mqd(struct mqd_manager *mm, void **mqd,
struct kfd_mem_obj *mqd_mem_obj, uint64_t *gart_addr,
struct queue_properties *q)
{
uint64_t addr;
struct vi_mqd *m;
m = (struct vi_mqd *) mqd_mem_obj->cpu_ptr;
addr = mqd_mem_obj->gpu_addr;
memset(m, 0, sizeof(struct vi_mqd));
m->header = 0xC0310800;
m->compute_pipelinestat_enable = 1;
m->compute_static_thread_mgmt_se0 = 0xFFFFFFFF;
m->compute_static_thread_mgmt_se1 = 0xFFFFFFFF;
m->compute_static_thread_mgmt_se2 = 0xFFFFFFFF;
m->compute_static_thread_mgmt_se3 = 0xFFFFFFFF;
m->cp_hqd_persistent_state = CP_HQD_PERSISTENT_STATE__PRELOAD_REQ_MASK |
0x53 << CP_HQD_PERSISTENT_STATE__PRELOAD_SIZE__SHIFT;
m->cp_mqd_control = 1 << CP_MQD_CONTROL__PRIV_STATE__SHIFT |
MTYPE_UC << CP_MQD_CONTROL__MTYPE__SHIFT;
m->cp_mqd_base_addr_lo = lower_32_bits(addr);
m->cp_mqd_base_addr_hi = upper_32_bits(addr);
m->cp_hqd_quantum = 1 << CP_HQD_QUANTUM__QUANTUM_EN__SHIFT |
1 << CP_HQD_QUANTUM__QUANTUM_SCALE__SHIFT |
1 << CP_HQD_QUANTUM__QUANTUM_DURATION__SHIFT;
set_priority(m, q);
m->cp_hqd_eop_rptr = 1 << CP_HQD_EOP_RPTR__INIT_FETCHER__SHIFT;
if (q->format == KFD_QUEUE_FORMAT_AQL)
m->cp_hqd_iq_rptr = 1;
if (q->tba_addr) {
m->compute_tba_lo = lower_32_bits(q->tba_addr >> 8);
m->compute_tba_hi = upper_32_bits(q->tba_addr >> 8);
m->compute_tma_lo = lower_32_bits(q->tma_addr >> 8);
m->compute_tma_hi = upper_32_bits(q->tma_addr >> 8);
m->compute_pgm_rsrc2 |=
(1 << COMPUTE_PGM_RSRC2__TRAP_PRESENT__SHIFT);
}
if (mm->dev->kfd->cwsr_enabled && q->ctx_save_restore_area_address) {
m->cp_hqd_persistent_state |=
(1 << CP_HQD_PERSISTENT_STATE__QSWITCH_MODE__SHIFT);
m->cp_hqd_ctx_save_base_addr_lo =
lower_32_bits(q->ctx_save_restore_area_address);
m->cp_hqd_ctx_save_base_addr_hi =
upper_32_bits(q->ctx_save_restore_area_address);
m->cp_hqd_ctx_save_size = q->ctx_save_restore_area_size;
m->cp_hqd_cntl_stack_size = q->ctl_stack_size;
m->cp_hqd_cntl_stack_offset = q->ctl_stack_size;
m->cp_hqd_wg_state_offset = q->ctl_stack_size;
}
*mqd = m;
if (gart_addr)
*gart_addr = addr;
mm->update_mqd(mm, m, q, NULL);
}
static int load_mqd(struct mqd_manager *mm, void *mqd,
uint32_t pipe_id, uint32_t queue_id,
struct queue_properties *p, struct mm_struct *mms)
{
/* AQL write pointer counts in 64B packets, PM4/CP counts in dwords. */
uint32_t wptr_shift = (p->format == KFD_QUEUE_FORMAT_AQL ? 4 : 0);
uint32_t wptr_mask = (uint32_t)((p->queue_size / 4) - 1);
return mm->dev->kfd2kgd->hqd_load(mm->dev->adev, mqd, pipe_id, queue_id,
(uint32_t __user *)p->write_ptr,
wptr_shift, wptr_mask, mms, 0);
}
static void __update_mqd(struct mqd_manager *mm, void *mqd,
struct queue_properties *q, struct mqd_update_info *minfo,
unsigned int mtype, unsigned int atc_bit)
{
struct vi_mqd *m;
m = get_mqd(mqd);
m->cp_hqd_pq_control = 5 << CP_HQD_PQ_CONTROL__RPTR_BLOCK_SIZE__SHIFT |
atc_bit << CP_HQD_PQ_CONTROL__PQ_ATC__SHIFT |
mtype << CP_HQD_PQ_CONTROL__MTYPE__SHIFT;
m->cp_hqd_pq_control |= order_base_2(q->queue_size / 4) - 1;
pr_debug("cp_hqd_pq_control 0x%x\n", m->cp_hqd_pq_control);
m->cp_hqd_pq_base_lo = lower_32_bits((uint64_t)q->queue_address >> 8);
m->cp_hqd_pq_base_hi = upper_32_bits((uint64_t)q->queue_address >> 8);
m->cp_hqd_pq_rptr_report_addr_lo = lower_32_bits((uint64_t)q->read_ptr);
m->cp_hqd_pq_rptr_report_addr_hi = upper_32_bits((uint64_t)q->read_ptr);
m->cp_hqd_pq_wptr_poll_addr_lo = lower_32_bits((uint64_t)q->write_ptr);
m->cp_hqd_pq_wptr_poll_addr_hi = upper_32_bits((uint64_t)q->write_ptr);
m->cp_hqd_pq_doorbell_control =
q->doorbell_off <<
CP_HQD_PQ_DOORBELL_CONTROL__DOORBELL_OFFSET__SHIFT;
pr_debug("cp_hqd_pq_doorbell_control 0x%x\n",
m->cp_hqd_pq_doorbell_control);
m->cp_hqd_eop_control = atc_bit << CP_HQD_EOP_CONTROL__EOP_ATC__SHIFT |
mtype << CP_HQD_EOP_CONTROL__MTYPE__SHIFT;
m->cp_hqd_ib_control = atc_bit << CP_HQD_IB_CONTROL__IB_ATC__SHIFT |
3 << CP_HQD_IB_CONTROL__MIN_IB_AVAIL_SIZE__SHIFT |
mtype << CP_HQD_IB_CONTROL__MTYPE__SHIFT;
/*
* HW does not clamp this field correctly. Maximum EOP queue size
* is constrained by per-SE EOP done signal count, which is 8-bit.
* Limit is 0xFF EOP entries (= 0x7F8 dwords). CP will not submit
* more than (EOP entry count - 1) so a queue size of 0x800 dwords
* is safe, giving a maximum field value of 0xA.
*/
m->cp_hqd_eop_control |= min(0xA,
order_base_2(q->eop_ring_buffer_size / 4) - 1);
m->cp_hqd_eop_base_addr_lo =
lower_32_bits(q->eop_ring_buffer_address >> 8);
m->cp_hqd_eop_base_addr_hi =
upper_32_bits(q->eop_ring_buffer_address >> 8);
m->cp_hqd_iq_timer = atc_bit << CP_HQD_IQ_TIMER__IQ_ATC__SHIFT |
mtype << CP_HQD_IQ_TIMER__MTYPE__SHIFT;
m->cp_hqd_vmid = q->vmid;
if (q->format == KFD_QUEUE_FORMAT_AQL) {
m->cp_hqd_pq_control |= CP_HQD_PQ_CONTROL__NO_UPDATE_RPTR_MASK |
2 << CP_HQD_PQ_CONTROL__SLOT_BASED_WPTR__SHIFT;
}
if (mm->dev->kfd->cwsr_enabled && q->ctx_save_restore_area_address)
m->cp_hqd_ctx_save_control =
atc_bit << CP_HQD_CTX_SAVE_CONTROL__ATC__SHIFT |
mtype << CP_HQD_CTX_SAVE_CONTROL__MTYPE__SHIFT;
update_cu_mask(mm, mqd, minfo);
set_priority(m, q);
q->is_active = QUEUE_IS_ACTIVE(*q);
}
static uint32_t read_doorbell_id(void *mqd)
{
struct vi_mqd *m = (struct vi_mqd *)mqd;
return m->queue_doorbell_id0;
}
static void update_mqd(struct mqd_manager *mm, void *mqd,
struct queue_properties *q,
struct mqd_update_info *minfo)
{
__update_mqd(mm, mqd, q, minfo, MTYPE_UC, 0);
}
static int get_wave_state(struct mqd_manager *mm, void *mqd,
struct queue_properties *q,
void __user *ctl_stack,
u32 *ctl_stack_used_size,
u32 *save_area_used_size)
{
struct vi_mqd *m;
m = get_mqd(mqd);
*ctl_stack_used_size = m->cp_hqd_cntl_stack_size -
m->cp_hqd_cntl_stack_offset;
*save_area_used_size = m->cp_hqd_wg_state_offset -
m->cp_hqd_cntl_stack_size;
/* Control stack is not copied to user mode for GFXv8 because
* it's part of the context save area that is already
* accessible to user mode
*/
return 0;
}
static void get_checkpoint_info(struct mqd_manager *mm, void *mqd, u32 *ctl_stack_size)
{
/* Control stack is stored in user mode */
*ctl_stack_size = 0;
}
static void checkpoint_mqd(struct mqd_manager *mm, void *mqd, void *mqd_dst, void *ctl_stack_dst)
{
struct vi_mqd *m;
m = get_mqd(mqd);
memcpy(mqd_dst, m, sizeof(struct vi_mqd));
}
static void restore_mqd(struct mqd_manager *mm, void **mqd,
struct kfd_mem_obj *mqd_mem_obj, uint64_t *gart_addr,
struct queue_properties *qp,
const void *mqd_src,
const void *ctl_stack_src, const u32 ctl_stack_size)
{
uint64_t addr;
struct vi_mqd *m;
m = (struct vi_mqd *) mqd_mem_obj->cpu_ptr;
addr = mqd_mem_obj->gpu_addr;
memcpy(m, mqd_src, sizeof(*m));
*mqd = m;
if (gart_addr)
*gart_addr = addr;
m->cp_hqd_pq_doorbell_control =
qp->doorbell_off <<
CP_HQD_PQ_DOORBELL_CONTROL__DOORBELL_OFFSET__SHIFT;
pr_debug("cp_hqd_pq_doorbell_control 0x%x\n",
m->cp_hqd_pq_doorbell_control);
qp->is_active = 0;
}
static void init_mqd_hiq(struct mqd_manager *mm, void **mqd,
struct kfd_mem_obj *mqd_mem_obj, uint64_t *gart_addr,
struct queue_properties *q)
{
struct vi_mqd *m;
init_mqd(mm, mqd, mqd_mem_obj, gart_addr, q);
m = get_mqd(*mqd);
m->cp_hqd_pq_control |= 1 << CP_HQD_PQ_CONTROL__PRIV_STATE__SHIFT |
1 << CP_HQD_PQ_CONTROL__KMD_QUEUE__SHIFT;
}
static void update_mqd_hiq(struct mqd_manager *mm, void *mqd,
struct queue_properties *q,
struct mqd_update_info *minfo)
{
__update_mqd(mm, mqd, q, minfo, MTYPE_UC, 0);
}
static void init_mqd_sdma(struct mqd_manager *mm, void **mqd,
struct kfd_mem_obj *mqd_mem_obj, uint64_t *gart_addr,
struct queue_properties *q)
{
struct vi_sdma_mqd *m;
m = (struct vi_sdma_mqd *) mqd_mem_obj->cpu_ptr;
memset(m, 0, sizeof(struct vi_sdma_mqd));
*mqd = m;
if (gart_addr)
*gart_addr = mqd_mem_obj->gpu_addr;
mm->update_mqd(mm, m, q, NULL);
}
static void update_mqd_sdma(struct mqd_manager *mm, void *mqd,
struct queue_properties *q,
struct mqd_update_info *minfo)
{
struct vi_sdma_mqd *m;
m = get_sdma_mqd(mqd);
m->sdmax_rlcx_rb_cntl = order_base_2(q->queue_size / 4)
<< SDMA0_RLC0_RB_CNTL__RB_SIZE__SHIFT |
q->vmid << SDMA0_RLC0_RB_CNTL__RB_VMID__SHIFT |
1 << SDMA0_RLC0_RB_CNTL__RPTR_WRITEBACK_ENABLE__SHIFT |
6 << SDMA0_RLC0_RB_CNTL__RPTR_WRITEBACK_TIMER__SHIFT;
m->sdmax_rlcx_rb_base = lower_32_bits(q->queue_address >> 8);
m->sdmax_rlcx_rb_base_hi = upper_32_bits(q->queue_address >> 8);
m->sdmax_rlcx_rb_rptr_addr_lo = lower_32_bits((uint64_t)q->read_ptr);
m->sdmax_rlcx_rb_rptr_addr_hi = upper_32_bits((uint64_t)q->read_ptr);
m->sdmax_rlcx_doorbell =
q->doorbell_off << SDMA0_RLC0_DOORBELL__OFFSET__SHIFT;
m->sdmax_rlcx_virtual_addr = q->sdma_vm_addr;
m->sdma_engine_id = q->sdma_engine_id;
m->sdma_queue_id = q->sdma_queue_id;
q->is_active = QUEUE_IS_ACTIVE(*q);
}
static void checkpoint_mqd_sdma(struct mqd_manager *mm,
void *mqd,
void *mqd_dst,
void *ctl_stack_dst)
{
struct vi_sdma_mqd *m;
m = get_sdma_mqd(mqd);
memcpy(mqd_dst, m, sizeof(struct vi_sdma_mqd));
}
static void restore_mqd_sdma(struct mqd_manager *mm, void **mqd,
struct kfd_mem_obj *mqd_mem_obj, uint64_t *gart_addr,
struct queue_properties *qp,
const void *mqd_src,
const void *ctl_stack_src, const u32 ctl_stack_size)
{
uint64_t addr;
struct vi_sdma_mqd *m;
m = (struct vi_sdma_mqd *) mqd_mem_obj->cpu_ptr;
addr = mqd_mem_obj->gpu_addr;
memcpy(m, mqd_src, sizeof(*m));
m->sdmax_rlcx_doorbell =
qp->doorbell_off << SDMA0_RLC0_DOORBELL__OFFSET__SHIFT;
*mqd = m;
if (gart_addr)
*gart_addr = addr;
qp->is_active = 0;
}
#if defined(CONFIG_DEBUG_FS)
static int debugfs_show_mqd(struct seq_file *m, void *data)
{
seq_hex_dump(m, " ", DUMP_PREFIX_OFFSET, 32, 4,
data, sizeof(struct vi_mqd), false);
return 0;
}
static int debugfs_show_mqd_sdma(struct seq_file *m, void *data)
{
seq_hex_dump(m, " ", DUMP_PREFIX_OFFSET, 32, 4,
data, sizeof(struct vi_sdma_mqd), false);
return 0;
}
#endif
struct mqd_manager *mqd_manager_init_vi(enum KFD_MQD_TYPE type,
struct kfd_node *dev)
{
struct mqd_manager *mqd;
if (WARN_ON(type >= KFD_MQD_TYPE_MAX))
return NULL;
mqd = kzalloc(sizeof(*mqd), GFP_KERNEL);
if (!mqd)
return NULL;
mqd->dev = dev;
switch (type) {
case KFD_MQD_TYPE_CP:
mqd->allocate_mqd = allocate_mqd;
mqd->init_mqd = init_mqd;
mqd->free_mqd = kfd_free_mqd_cp;
mqd->load_mqd = load_mqd;
mqd->update_mqd = update_mqd;
mqd->destroy_mqd = kfd_destroy_mqd_cp;
mqd->is_occupied = kfd_is_occupied_cp;
mqd->get_wave_state = get_wave_state;
mqd->get_checkpoint_info = get_checkpoint_info;
mqd->checkpoint_mqd = checkpoint_mqd;
mqd->restore_mqd = restore_mqd;
mqd->mqd_size = sizeof(struct vi_mqd);
#if defined(CONFIG_DEBUG_FS)
mqd->debugfs_show_mqd = debugfs_show_mqd;
#endif
break;
case KFD_MQD_TYPE_HIQ:
mqd->allocate_mqd = allocate_hiq_mqd;
mqd->init_mqd = init_mqd_hiq;
mqd->free_mqd = free_mqd_hiq_sdma;
mqd->load_mqd = load_mqd;
mqd->update_mqd = update_mqd_hiq;
mqd->destroy_mqd = kfd_destroy_mqd_cp;
mqd->is_occupied = kfd_is_occupied_cp;
mqd->mqd_size = sizeof(struct vi_mqd);
mqd->mqd_stride = kfd_mqd_stride;
#if defined(CONFIG_DEBUG_FS)
mqd->debugfs_show_mqd = debugfs_show_mqd;
#endif
mqd->read_doorbell_id = read_doorbell_id;
break;
case KFD_MQD_TYPE_DIQ:
mqd->allocate_mqd = allocate_mqd;
mqd->init_mqd = init_mqd_hiq;
mqd->free_mqd = kfd_free_mqd_cp;
mqd->load_mqd = load_mqd;
mqd->update_mqd = update_mqd_hiq;
mqd->destroy_mqd = kfd_destroy_mqd_cp;
mqd->is_occupied = kfd_is_occupied_cp;
mqd->mqd_size = sizeof(struct vi_mqd);
mqd->mqd_stride = kfd_mqd_stride;
#if defined(CONFIG_DEBUG_FS)
mqd->debugfs_show_mqd = debugfs_show_mqd;
#endif
break;
case KFD_MQD_TYPE_SDMA:
mqd->allocate_mqd = allocate_sdma_mqd;
mqd->init_mqd = init_mqd_sdma;
mqd->free_mqd = free_mqd_hiq_sdma;
mqd->load_mqd = kfd_load_mqd_sdma;
mqd->update_mqd = update_mqd_sdma;
mqd->destroy_mqd = kfd_destroy_mqd_sdma;
mqd->is_occupied = kfd_is_occupied_sdma;
mqd->checkpoint_mqd = checkpoint_mqd_sdma;
mqd->restore_mqd = restore_mqd_sdma;
mqd->mqd_size = sizeof(struct vi_sdma_mqd);
mqd->mqd_stride = kfd_mqd_stride;
#if defined(CONFIG_DEBUG_FS)
mqd->debugfs_show_mqd = debugfs_show_mqd_sdma;
#endif
break;
default:
kfree(mqd);
return NULL;
}
return mqd;
}
| linux-master | drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c |
/*
* Copyright 2023 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
#include "kfd_events.h"
#include "kfd_debug.h"
#include "soc15_int.h"
#include "kfd_device_queue_manager.h"
/*
* GFX10 SQ Interrupts
*
* There are 3 encoding types of interrupts sourced from SQ sent as a 44-bit
* packet to the Interrupt Handler:
* Auto - Generated by the SQG (various cmd overflows, timestamps etc)
* Wave - Generated by S_SENDMSG through a shader program
* Error - HW generated errors (Illegal instructions, Memviols, EDC etc)
*
* The 44-bit packet is mapped as {context_id1[7:0],context_id0[31:0]} plus
* 4-bits for VMID (SOC15_VMID_FROM_IH_ENTRY) as such:
*
* - context_id1[7:6]
* Encoding type (0 = Auto, 1 = Wave, 2 = Error)
*
* - context_id0[24]
* PRIV bit indicates that Wave S_SEND or error occurred within trap
*
* - context_id0[22:0]
* 23-bit data with the following layout per encoding type:
* Auto - only context_id0[8:0] is used, which reports various interrupts
* generated by SQG. The rest is 0.
* Wave - user data sent from m0 via S_SENDMSG
* Error - Error type (context_id0[22:19]), Error Details (rest of bits)
*
* The other context_id bits show coordinates (SE/SH/CU/SIMD/WGP) for wave
* S_SENDMSG and Errors. These are 0 for Auto.
*/
enum SQ_INTERRUPT_WORD_ENCODING {
SQ_INTERRUPT_WORD_ENCODING_AUTO = 0x0,
SQ_INTERRUPT_WORD_ENCODING_INST,
SQ_INTERRUPT_WORD_ENCODING_ERROR,
};
enum SQ_INTERRUPT_ERROR_TYPE {
SQ_INTERRUPT_ERROR_TYPE_EDC_FUE = 0x0,
SQ_INTERRUPT_ERROR_TYPE_ILLEGAL_INST,
SQ_INTERRUPT_ERROR_TYPE_MEMVIOL,
SQ_INTERRUPT_ERROR_TYPE_EDC_FED,
};
/* SQ_INTERRUPT_WORD_AUTO_CTXID */
#define SQ_INTERRUPT_WORD_AUTO_CTXID0__THREAD_TRACE__SHIFT 0
#define SQ_INTERRUPT_WORD_AUTO_CTXID0__WLT__SHIFT 1
#define SQ_INTERRUPT_WORD_AUTO_CTXID0__THREAD_TRACE_BUF0_FULL__SHIFT 2
#define SQ_INTERRUPT_WORD_AUTO_CTXID0__THREAD_TRACE_BUF1_FULL__SHIFT 3
#define SQ_INTERRUPT_WORD_AUTO_CTXID0__THREAD_TRACE_UTC_ERROR__SHIFT 7
#define SQ_INTERRUPT_WORD_AUTO_CTXID1__SE_ID__SHIFT 4
#define SQ_INTERRUPT_WORD_AUTO_CTXID1__ENCODING__SHIFT 6
#define SQ_INTERRUPT_WORD_AUTO_CTXID0__THREAD_TRACE_MASK 0x00000001
#define SQ_INTERRUPT_WORD_AUTO_CTXID0__WLT_MASK 0x00000002
#define SQ_INTERRUPT_WORD_AUTO_CTXID0__THREAD_TRACE_BUF0_FULL_MASK 0x00000004
#define SQ_INTERRUPT_WORD_AUTO_CTXID0__THREAD_TRACE_BUF1_FULL_MASK 0x00000008
#define SQ_INTERRUPT_WORD_AUTO_CTXID0__THREAD_TRACE_UTC_ERROR_MASK 0x00000080
#define SQ_INTERRUPT_WORD_AUTO_CTXID1__SE_ID_MASK 0x030
#define SQ_INTERRUPT_WORD_AUTO_CTXID1__ENCODING_MASK 0x0c0
/* SQ_INTERRUPT_WORD_WAVE_CTXID */
#define SQ_INTERRUPT_WORD_WAVE_CTXID0__DATA__SHIFT 0
#define SQ_INTERRUPT_WORD_WAVE_CTXID0__SA_ID__SHIFT 23
#define SQ_INTERRUPT_WORD_WAVE_CTXID0__PRIV__SHIFT 24
#define SQ_INTERRUPT_WORD_WAVE_CTXID0__WAVE_ID__SHIFT 25
#define SQ_INTERRUPT_WORD_WAVE_CTXID0__SIMD_ID__SHIFT 30
#define SQ_INTERRUPT_WORD_WAVE_CTXID1__WGP_ID__SHIFT 0
#define SQ_INTERRUPT_WORD_WAVE_CTXID1__SE_ID__SHIFT 4
#define SQ_INTERRUPT_WORD_WAVE_CTXID1__ENCODING__SHIFT 6
#define SQ_INTERRUPT_WORD_WAVE_CTXID0__DATA_MASK 0x000007fffff
#define SQ_INTERRUPT_WORD_WAVE_CTXID0__SA_ID_MASK 0x0000800000
#define SQ_INTERRUPT_WORD_WAVE_CTXID0__PRIV_MASK 0x00001000000
#define SQ_INTERRUPT_WORD_WAVE_CTXID0__WAVE_ID_MASK 0x0003e000000
#define SQ_INTERRUPT_WORD_WAVE_CTXID0__SIMD_ID_MASK 0x000c0000000
#define SQ_INTERRUPT_WORD_WAVE_CTXID1__WGP_ID_MASK 0x00f
#define SQ_INTERRUPT_WORD_WAVE_CTXID1__SE_ID_MASK 0x030
#define SQ_INTERRUPT_WORD_WAVE_CTXID1__ENCODING_MASK 0x0c0
#define KFD_CTXID0__ERR_TYPE_MASK 0x780000
#define KFD_CTXID0__ERR_TYPE__SHIFT 19
/* GFX10 SQ interrupt ENC type bit (context_id1[7:6]) for wave s_sendmsg */
#define KFD_CONTEXT_ID1_ENC_TYPE_WAVE_MASK 0x40
/* GFX10 SQ interrupt PRIV bit (context_id0[24]) for s_sendmsg inside trap */
#define KFD_CONTEXT_ID0_PRIV_MASK 0x1000000
/*
* The debugger will send user data(m0) with PRIV=1 to indicate it requires
* notification from the KFD with the following queue id (DOORBELL_ID) and
* trap code (TRAP_CODE).
*/
#define KFD_CONTEXT_ID0_DEBUG_DOORBELL_MASK 0x0003ff
#define KFD_CONTEXT_ID0_DEBUG_TRAP_CODE_SHIFT 10
#define KFD_CONTEXT_ID0_DEBUG_TRAP_CODE_MASK 0x07fc00
#define KFD_DEBUG_DOORBELL_ID(ctxid0) ((ctxid0) & \
KFD_CONTEXT_ID0_DEBUG_DOORBELL_MASK)
#define KFD_DEBUG_TRAP_CODE(ctxid0) (((ctxid0) & \
KFD_CONTEXT_ID0_DEBUG_TRAP_CODE_MASK) \
>> KFD_CONTEXT_ID0_DEBUG_TRAP_CODE_SHIFT)
#define KFD_DEBUG_CP_BAD_OP_ECODE_MASK 0x3fffc00
#define KFD_DEBUG_CP_BAD_OP_ECODE_SHIFT 10
#define KFD_DEBUG_CP_BAD_OP_ECODE(ctxid0) (((ctxid0) & \
KFD_DEBUG_CP_BAD_OP_ECODE_MASK) \
>> KFD_DEBUG_CP_BAD_OP_ECODE_SHIFT)
static void event_interrupt_poison_consumption(struct kfd_node *dev,
uint16_t pasid, uint16_t client_id)
{
int old_poison, ret = -EINVAL;
struct kfd_process *p = kfd_lookup_process_by_pasid(pasid);
if (!p)
return;
/* all queues of a process will be unmapped in one time */
old_poison = atomic_cmpxchg(&p->poison, 0, 1);
kfd_unref_process(p);
if (old_poison)
return;
switch (client_id) {
case SOC15_IH_CLIENTID_SE0SH:
case SOC15_IH_CLIENTID_SE1SH:
case SOC15_IH_CLIENTID_SE2SH:
case SOC15_IH_CLIENTID_SE3SH:
case SOC15_IH_CLIENTID_UTCL2:
ret = kfd_dqm_evict_pasid(dev->dqm, pasid);
break;
case SOC15_IH_CLIENTID_SDMA0:
case SOC15_IH_CLIENTID_SDMA1:
case SOC15_IH_CLIENTID_SDMA2:
case SOC15_IH_CLIENTID_SDMA3:
case SOC15_IH_CLIENTID_SDMA4:
break;
default:
break;
}
kfd_signal_poison_consumed_event(dev, pasid);
/* resetting queue passes, do page retirement without gpu reset
* resetting queue fails, fallback to gpu reset solution
*/
if (!ret) {
dev_warn(dev->adev->dev,
"RAS poison consumption, unmap queue flow succeeded: client id %d\n",
client_id);
amdgpu_amdkfd_ras_poison_consumption_handler(dev->adev, false);
} else {
dev_warn(dev->adev->dev,
"RAS poison consumption, fall back to gpu reset flow: client id %d\n",
client_id);
amdgpu_amdkfd_ras_poison_consumption_handler(dev->adev, true);
}
}
static bool event_interrupt_isr_v10(struct kfd_node *dev,
const uint32_t *ih_ring_entry,
uint32_t *patched_ihre,
bool *patched_flag)
{
uint16_t source_id, client_id, pasid, vmid;
const uint32_t *data = ih_ring_entry;
source_id = SOC15_SOURCE_ID_FROM_IH_ENTRY(ih_ring_entry);
client_id = SOC15_CLIENT_ID_FROM_IH_ENTRY(ih_ring_entry);
/* Only handle interrupts from KFD VMIDs */
vmid = SOC15_VMID_FROM_IH_ENTRY(ih_ring_entry);
if (!KFD_IRQ_IS_FENCE(client_id, source_id) &&
(vmid < dev->vm_info.first_vmid_kfd ||
vmid > dev->vm_info.last_vmid_kfd))
return false;
pasid = SOC15_PASID_FROM_IH_ENTRY(ih_ring_entry);
/* Only handle clients we care about */
if (client_id != SOC15_IH_CLIENTID_GRBM_CP &&
client_id != SOC15_IH_CLIENTID_SDMA0 &&
client_id != SOC15_IH_CLIENTID_SDMA1 &&
client_id != SOC15_IH_CLIENTID_SDMA2 &&
client_id != SOC15_IH_CLIENTID_SDMA3 &&
client_id != SOC15_IH_CLIENTID_SDMA4 &&
client_id != SOC15_IH_CLIENTID_SDMA5 &&
client_id != SOC15_IH_CLIENTID_SDMA6 &&
client_id != SOC15_IH_CLIENTID_SDMA7 &&
client_id != SOC15_IH_CLIENTID_VMC &&
client_id != SOC15_IH_CLIENTID_VMC1 &&
client_id != SOC15_IH_CLIENTID_UTCL2 &&
client_id != SOC15_IH_CLIENTID_SE0SH &&
client_id != SOC15_IH_CLIENTID_SE1SH &&
client_id != SOC15_IH_CLIENTID_SE2SH &&
client_id != SOC15_IH_CLIENTID_SE3SH)
return false;
pr_debug("client id 0x%x, source id %d, vmid %d, pasid 0x%x. raw data:\n",
client_id, source_id, vmid, pasid);
pr_debug("%8X, %8X, %8X, %8X, %8X, %8X, %8X, %8X.\n",
data[0], data[1], data[2], data[3],
data[4], data[5], data[6], data[7]);
/* If there is no valid PASID, it's likely a bug */
if (WARN_ONCE(pasid == 0, "Bug: No PASID in KFD interrupt"))
return 0;
/* Interrupt types we care about: various signals and faults.
* They will be forwarded to a work queue (see below).
*/
return source_id == SOC15_INTSRC_CP_END_OF_PIPE ||
source_id == SOC15_INTSRC_SDMA_TRAP ||
source_id == SOC15_INTSRC_SQ_INTERRUPT_MSG ||
source_id == SOC15_INTSRC_CP_BAD_OPCODE ||
client_id == SOC15_IH_CLIENTID_VMC ||
client_id == SOC15_IH_CLIENTID_VMC1 ||
client_id == SOC15_IH_CLIENTID_UTCL2 ||
KFD_IRQ_IS_FENCE(client_id, source_id);
}
static void event_interrupt_wq_v10(struct kfd_node *dev,
const uint32_t *ih_ring_entry)
{
uint16_t source_id, client_id, pasid, vmid;
uint32_t context_id0, context_id1;
uint32_t encoding, sq_intr_err_type;
source_id = SOC15_SOURCE_ID_FROM_IH_ENTRY(ih_ring_entry);
client_id = SOC15_CLIENT_ID_FROM_IH_ENTRY(ih_ring_entry);
pasid = SOC15_PASID_FROM_IH_ENTRY(ih_ring_entry);
vmid = SOC15_VMID_FROM_IH_ENTRY(ih_ring_entry);
context_id0 = SOC15_CONTEXT_ID0_FROM_IH_ENTRY(ih_ring_entry);
context_id1 = SOC15_CONTEXT_ID1_FROM_IH_ENTRY(ih_ring_entry);
if (client_id == SOC15_IH_CLIENTID_GRBM_CP ||
client_id == SOC15_IH_CLIENTID_SE0SH ||
client_id == SOC15_IH_CLIENTID_SE1SH ||
client_id == SOC15_IH_CLIENTID_SE2SH ||
client_id == SOC15_IH_CLIENTID_SE3SH) {
if (source_id == SOC15_INTSRC_CP_END_OF_PIPE)
kfd_signal_event_interrupt(pasid, context_id0, 32);
else if (source_id == SOC15_INTSRC_SQ_INTERRUPT_MSG) {
encoding = REG_GET_FIELD(context_id1,
SQ_INTERRUPT_WORD_WAVE_CTXID1, ENCODING);
switch (encoding) {
case SQ_INTERRUPT_WORD_ENCODING_AUTO:
pr_debug(
"sq_intr: auto, se %d, ttrace %d, wlt %d, ttrac_buf0_full %d, ttrac_buf1_full %d, ttrace_utc_err %d\n",
REG_GET_FIELD(context_id1, SQ_INTERRUPT_WORD_AUTO_CTXID1,
SE_ID),
REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_AUTO_CTXID0,
THREAD_TRACE),
REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_AUTO_CTXID0,
WLT),
REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_AUTO_CTXID0,
THREAD_TRACE_BUF0_FULL),
REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_AUTO_CTXID0,
THREAD_TRACE_BUF1_FULL),
REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_AUTO_CTXID0,
THREAD_TRACE_UTC_ERROR));
break;
case SQ_INTERRUPT_WORD_ENCODING_INST:
pr_debug("sq_intr: inst, se %d, data 0x%x, sa %d, priv %d, wave_id %d, simd_id %d, wgp_id %d\n",
REG_GET_FIELD(context_id1, SQ_INTERRUPT_WORD_WAVE_CTXID1,
SE_ID),
REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_WAVE_CTXID0,
DATA),
REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_WAVE_CTXID0,
SA_ID),
REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_WAVE_CTXID0,
PRIV),
REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_WAVE_CTXID0,
WAVE_ID),
REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_WAVE_CTXID0,
SIMD_ID),
REG_GET_FIELD(context_id1, SQ_INTERRUPT_WORD_WAVE_CTXID1,
WGP_ID));
if (context_id0 & SQ_INTERRUPT_WORD_WAVE_CTXID0__PRIV_MASK) {
if (kfd_set_dbg_ev_from_interrupt(dev, pasid,
KFD_DEBUG_DOORBELL_ID(context_id0),
KFD_DEBUG_TRAP_CODE(context_id0),
NULL, 0))
return;
}
break;
case SQ_INTERRUPT_WORD_ENCODING_ERROR:
sq_intr_err_type = REG_GET_FIELD(context_id0, KFD_CTXID0,
ERR_TYPE);
pr_warn("sq_intr: error, se %d, data 0x%x, sa %d, priv %d, wave_id %d, simd_id %d, wgp_id %d, err_type %d\n",
REG_GET_FIELD(context_id1, SQ_INTERRUPT_WORD_WAVE_CTXID1,
SE_ID),
REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_WAVE_CTXID0,
DATA),
REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_WAVE_CTXID0,
SA_ID),
REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_WAVE_CTXID0,
PRIV),
REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_WAVE_CTXID0,
WAVE_ID),
REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_WAVE_CTXID0,
SIMD_ID),
REG_GET_FIELD(context_id1, SQ_INTERRUPT_WORD_WAVE_CTXID1,
WGP_ID),
sq_intr_err_type);
if (sq_intr_err_type != SQ_INTERRUPT_ERROR_TYPE_ILLEGAL_INST &&
sq_intr_err_type != SQ_INTERRUPT_ERROR_TYPE_MEMVIOL) {
event_interrupt_poison_consumption(dev, pasid, source_id);
return;
}
break;
default:
break;
}
kfd_signal_event_interrupt(pasid, context_id0 & 0x7fffff, 23);
} else if (source_id == SOC15_INTSRC_CP_BAD_OPCODE) {
kfd_set_dbg_ev_from_interrupt(dev, pasid,
KFD_DEBUG_DOORBELL_ID(context_id0),
KFD_EC_MASK(KFD_DEBUG_CP_BAD_OP_ECODE(context_id0)),
NULL,
0);
}
} else if (client_id == SOC15_IH_CLIENTID_SDMA0 ||
client_id == SOC15_IH_CLIENTID_SDMA1 ||
client_id == SOC15_IH_CLIENTID_SDMA2 ||
client_id == SOC15_IH_CLIENTID_SDMA3 ||
(client_id == SOC15_IH_CLIENTID_SDMA3_Sienna_Cichlid &&
KFD_GC_VERSION(dev) == IP_VERSION(10, 3, 0)) ||
client_id == SOC15_IH_CLIENTID_SDMA4 ||
client_id == SOC15_IH_CLIENTID_SDMA5 ||
client_id == SOC15_IH_CLIENTID_SDMA6 ||
client_id == SOC15_IH_CLIENTID_SDMA7) {
if (source_id == SOC15_INTSRC_SDMA_TRAP) {
kfd_signal_event_interrupt(pasid, context_id0 & 0xfffffff, 28);
} else if (source_id == SOC15_INTSRC_SDMA_ECC) {
event_interrupt_poison_consumption(dev, pasid, source_id);
return;
}
} else if (client_id == SOC15_IH_CLIENTID_VMC ||
client_id == SOC15_IH_CLIENTID_VMC1 ||
client_id == SOC15_IH_CLIENTID_UTCL2) {
struct kfd_vm_fault_info info = {0};
uint16_t ring_id = SOC15_RING_ID_FROM_IH_ENTRY(ih_ring_entry);
struct kfd_hsa_memory_exception_data exception_data;
if (client_id == SOC15_IH_CLIENTID_UTCL2 &&
amdgpu_amdkfd_ras_query_utcl2_poison_status(dev->adev)) {
event_interrupt_poison_consumption(dev, pasid, client_id);
return;
}
info.vmid = vmid;
info.mc_id = client_id;
info.page_addr = ih_ring_entry[4] |
(uint64_t)(ih_ring_entry[5] & 0xf) << 32;
info.prot_valid = ring_id & 0x08;
info.prot_read = ring_id & 0x10;
info.prot_write = ring_id & 0x20;
memset(&exception_data, 0, sizeof(exception_data));
exception_data.gpu_id = dev->id;
exception_data.va = (info.page_addr) << PAGE_SHIFT;
exception_data.failure.NotPresent = info.prot_valid ? 1 : 0;
exception_data.failure.NoExecute = info.prot_exec ? 1 : 0;
exception_data.failure.ReadOnly = info.prot_write ? 1 : 0;
exception_data.failure.imprecise = 0;
kfd_set_dbg_ev_from_interrupt(dev,
pasid,
-1,
KFD_EC_MASK(EC_DEVICE_MEMORY_VIOLATION),
&exception_data,
sizeof(exception_data));
} else if (KFD_IRQ_IS_FENCE(client_id, source_id)) {
kfd_process_close_interrupt_drain(pasid);
}
}
const struct kfd_event_interrupt_class event_interrupt_class_v10 = {
.interrupt_isr = event_interrupt_isr_v10,
.interrupt_wq = event_interrupt_wq_v10,
};
| linux-master | drivers/gpu/drm/amd/amdkfd/kfd_int_process_v10.c |
// SPDX-License-Identifier: GPL-2.0 OR MIT
/*
* Copyright 2020-2021 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
#include <linux/types.h>
#include <linux/hmm.h>
#include <linux/dma-direction.h>
#include <linux/dma-mapping.h>
#include <linux/migrate.h>
#include "amdgpu_sync.h"
#include "amdgpu_object.h"
#include "amdgpu_vm.h"
#include "amdgpu_res_cursor.h"
#include "kfd_priv.h"
#include "kfd_svm.h"
#include "kfd_migrate.h"
#include "kfd_smi_events.h"
#ifdef dev_fmt
#undef dev_fmt
#endif
#define dev_fmt(fmt) "kfd_migrate: " fmt
static uint64_t
svm_migrate_direct_mapping_addr(struct amdgpu_device *adev, uint64_t addr)
{
return addr + amdgpu_ttm_domain_start(adev, TTM_PL_VRAM);
}
static int
svm_migrate_gart_map(struct amdgpu_ring *ring, uint64_t npages,
dma_addr_t *addr, uint64_t *gart_addr, uint64_t flags)
{
struct amdgpu_device *adev = ring->adev;
struct amdgpu_job *job;
unsigned int num_dw, num_bytes;
struct dma_fence *fence;
uint64_t src_addr, dst_addr;
uint64_t pte_flags;
void *cpu_addr;
int r;
/* use gart window 0 */
*gart_addr = adev->gmc.gart_start;
num_dw = ALIGN(adev->mman.buffer_funcs->copy_num_dw, 8);
num_bytes = npages * 8;
r = amdgpu_job_alloc_with_ib(adev, &adev->mman.high_pr,
AMDGPU_FENCE_OWNER_UNDEFINED,
num_dw * 4 + num_bytes,
AMDGPU_IB_POOL_DELAYED,
&job);
if (r)
return r;
src_addr = num_dw * 4;
src_addr += job->ibs[0].gpu_addr;
dst_addr = amdgpu_bo_gpu_offset(adev->gart.bo);
amdgpu_emit_copy_buffer(adev, &job->ibs[0], src_addr,
dst_addr, num_bytes, false);
amdgpu_ring_pad_ib(ring, &job->ibs[0]);
WARN_ON(job->ibs[0].length_dw > num_dw);
pte_flags = AMDGPU_PTE_VALID | AMDGPU_PTE_READABLE;
pte_flags |= AMDGPU_PTE_SYSTEM | AMDGPU_PTE_SNOOPED;
if (!(flags & KFD_IOCTL_SVM_FLAG_GPU_RO))
pte_flags |= AMDGPU_PTE_WRITEABLE;
pte_flags |= adev->gart.gart_pte_flags;
cpu_addr = &job->ibs[0].ptr[num_dw];
amdgpu_gart_map(adev, 0, npages, addr, pte_flags, cpu_addr);
fence = amdgpu_job_submit(job);
dma_fence_put(fence);
return r;
}
/**
* svm_migrate_copy_memory_gart - sdma copy data between ram and vram
*
* @adev: amdgpu device the sdma ring running
* @sys: system DMA pointer to be copied
* @vram: vram destination DMA pointer
* @npages: number of pages to copy
* @direction: enum MIGRATION_COPY_DIR
* @mfence: output, sdma fence to signal after sdma is done
*
* ram address uses GART table continuous entries mapping to ram pages,
* vram address uses direct mapping of vram pages, which must have npages
* number of continuous pages.
* GART update and sdma uses same buf copy function ring, sdma is splited to
* multiple GTT_MAX_PAGES transfer, all sdma operations are serialized, wait for
* the last sdma finish fence which is returned to check copy memory is done.
*
* Context: Process context, takes and releases gtt_window_lock
*
* Return:
* 0 - OK, otherwise error code
*/
static int
svm_migrate_copy_memory_gart(struct amdgpu_device *adev, dma_addr_t *sys,
uint64_t *vram, uint64_t npages,
enum MIGRATION_COPY_DIR direction,
struct dma_fence **mfence)
{
const uint64_t GTT_MAX_PAGES = AMDGPU_GTT_MAX_TRANSFER_SIZE;
struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring;
uint64_t gart_s, gart_d;
struct dma_fence *next;
uint64_t size;
int r;
mutex_lock(&adev->mman.gtt_window_lock);
while (npages) {
size = min(GTT_MAX_PAGES, npages);
if (direction == FROM_VRAM_TO_RAM) {
gart_s = svm_migrate_direct_mapping_addr(adev, *vram);
r = svm_migrate_gart_map(ring, size, sys, &gart_d, 0);
} else if (direction == FROM_RAM_TO_VRAM) {
r = svm_migrate_gart_map(ring, size, sys, &gart_s,
KFD_IOCTL_SVM_FLAG_GPU_RO);
gart_d = svm_migrate_direct_mapping_addr(adev, *vram);
}
if (r) {
dev_err(adev->dev, "fail %d create gart mapping\n", r);
goto out_unlock;
}
r = amdgpu_copy_buffer(ring, gart_s, gart_d, size * PAGE_SIZE,
NULL, &next, false, true, false);
if (r) {
dev_err(adev->dev, "fail %d to copy memory\n", r);
goto out_unlock;
}
dma_fence_put(*mfence);
*mfence = next;
npages -= size;
if (npages) {
sys += size;
vram += size;
}
}
out_unlock:
mutex_unlock(&adev->mman.gtt_window_lock);
return r;
}
/**
* svm_migrate_copy_done - wait for memory copy sdma is done
*
* @adev: amdgpu device the sdma memory copy is executing on
* @mfence: migrate fence
*
* Wait for dma fence is signaled, if the copy ssplit into multiple sdma
* operations, this is the last sdma operation fence.
*
* Context: called after svm_migrate_copy_memory
*
* Return:
* 0 - success
* otherwise - error code from dma fence signal
*/
static int
svm_migrate_copy_done(struct amdgpu_device *adev, struct dma_fence *mfence)
{
int r = 0;
if (mfence) {
r = dma_fence_wait(mfence, false);
dma_fence_put(mfence);
pr_debug("sdma copy memory fence done\n");
}
return r;
}
unsigned long
svm_migrate_addr_to_pfn(struct amdgpu_device *adev, unsigned long addr)
{
return (addr + adev->kfd.pgmap.range.start) >> PAGE_SHIFT;
}
static void
svm_migrate_get_vram_page(struct svm_range *prange, unsigned long pfn)
{
struct page *page;
page = pfn_to_page(pfn);
svm_range_bo_ref(prange->svm_bo);
page->zone_device_data = prange->svm_bo;
zone_device_page_init(page);
}
static void
svm_migrate_put_vram_page(struct amdgpu_device *adev, unsigned long addr)
{
struct page *page;
page = pfn_to_page(svm_migrate_addr_to_pfn(adev, addr));
unlock_page(page);
put_page(page);
}
static unsigned long
svm_migrate_addr(struct amdgpu_device *adev, struct page *page)
{
unsigned long addr;
addr = page_to_pfn(page) << PAGE_SHIFT;
return (addr - adev->kfd.pgmap.range.start);
}
static struct page *
svm_migrate_get_sys_page(struct vm_area_struct *vma, unsigned long addr)
{
struct page *page;
page = alloc_page_vma(GFP_HIGHUSER, vma, addr);
if (page)
lock_page(page);
return page;
}
static void svm_migrate_put_sys_page(unsigned long addr)
{
struct page *page;
page = pfn_to_page(addr >> PAGE_SHIFT);
unlock_page(page);
put_page(page);
}
static unsigned long svm_migrate_successful_pages(struct migrate_vma *migrate)
{
unsigned long cpages = 0;
unsigned long i;
for (i = 0; i < migrate->npages; i++) {
if (migrate->src[i] & MIGRATE_PFN_VALID &&
migrate->src[i] & MIGRATE_PFN_MIGRATE)
cpages++;
}
return cpages;
}
static unsigned long svm_migrate_unsuccessful_pages(struct migrate_vma *migrate)
{
unsigned long upages = 0;
unsigned long i;
for (i = 0; i < migrate->npages; i++) {
if (migrate->src[i] & MIGRATE_PFN_VALID &&
!(migrate->src[i] & MIGRATE_PFN_MIGRATE))
upages++;
}
return upages;
}
static int
svm_migrate_copy_to_vram(struct kfd_node *node, struct svm_range *prange,
struct migrate_vma *migrate, struct dma_fence **mfence,
dma_addr_t *scratch, uint64_t ttm_res_offset)
{
uint64_t npages = migrate->cpages;
struct amdgpu_device *adev = node->adev;
struct device *dev = adev->dev;
struct amdgpu_res_cursor cursor;
dma_addr_t *src;
uint64_t *dst;
uint64_t i, j;
int r;
pr_debug("svms 0x%p [0x%lx 0x%lx 0x%llx]\n", prange->svms, prange->start,
prange->last, ttm_res_offset);
src = scratch;
dst = (uint64_t *)(scratch + npages);
amdgpu_res_first(prange->ttm_res, ttm_res_offset,
npages << PAGE_SHIFT, &cursor);
for (i = j = 0; i < npages; i++) {
struct page *spage;
dst[i] = cursor.start + (j << PAGE_SHIFT);
migrate->dst[i] = svm_migrate_addr_to_pfn(adev, dst[i]);
svm_migrate_get_vram_page(prange, migrate->dst[i]);
migrate->dst[i] = migrate_pfn(migrate->dst[i]);
spage = migrate_pfn_to_page(migrate->src[i]);
if (spage && !is_zone_device_page(spage)) {
src[i] = dma_map_page(dev, spage, 0, PAGE_SIZE,
DMA_TO_DEVICE);
r = dma_mapping_error(dev, src[i]);
if (r) {
dev_err(dev, "%s: fail %d dma_map_page\n",
__func__, r);
goto out_free_vram_pages;
}
} else {
if (j) {
r = svm_migrate_copy_memory_gart(
adev, src + i - j,
dst + i - j, j,
FROM_RAM_TO_VRAM,
mfence);
if (r)
goto out_free_vram_pages;
amdgpu_res_next(&cursor, (j + 1) << PAGE_SHIFT);
j = 0;
} else {
amdgpu_res_next(&cursor, PAGE_SIZE);
}
continue;
}
pr_debug_ratelimited("dma mapping src to 0x%llx, pfn 0x%lx\n",
src[i] >> PAGE_SHIFT, page_to_pfn(spage));
if (j >= (cursor.size >> PAGE_SHIFT) - 1 && i < npages - 1) {
r = svm_migrate_copy_memory_gart(adev, src + i - j,
dst + i - j, j + 1,
FROM_RAM_TO_VRAM,
mfence);
if (r)
goto out_free_vram_pages;
amdgpu_res_next(&cursor, (j + 1) * PAGE_SIZE);
j = 0;
} else {
j++;
}
}
r = svm_migrate_copy_memory_gart(adev, src + i - j, dst + i - j, j,
FROM_RAM_TO_VRAM, mfence);
out_free_vram_pages:
if (r) {
pr_debug("failed %d to copy memory to vram\n", r);
while (i--) {
svm_migrate_put_vram_page(adev, dst[i]);
migrate->dst[i] = 0;
}
}
#ifdef DEBUG_FORCE_MIXED_DOMAINS
for (i = 0, j = 0; i < npages; i += 4, j++) {
if (j & 1)
continue;
svm_migrate_put_vram_page(adev, dst[i]);
migrate->dst[i] = 0;
svm_migrate_put_vram_page(adev, dst[i + 1]);
migrate->dst[i + 1] = 0;
svm_migrate_put_vram_page(adev, dst[i + 2]);
migrate->dst[i + 2] = 0;
svm_migrate_put_vram_page(adev, dst[i + 3]);
migrate->dst[i + 3] = 0;
}
#endif
return r;
}
static long
svm_migrate_vma_to_vram(struct kfd_node *node, struct svm_range *prange,
struct vm_area_struct *vma, uint64_t start,
uint64_t end, uint32_t trigger, uint64_t ttm_res_offset)
{
struct kfd_process *p = container_of(prange->svms, struct kfd_process, svms);
uint64_t npages = (end - start) >> PAGE_SHIFT;
struct amdgpu_device *adev = node->adev;
struct kfd_process_device *pdd;
struct dma_fence *mfence = NULL;
struct migrate_vma migrate = { 0 };
unsigned long cpages = 0;
dma_addr_t *scratch;
void *buf;
int r = -ENOMEM;
memset(&migrate, 0, sizeof(migrate));
migrate.vma = vma;
migrate.start = start;
migrate.end = end;
migrate.flags = MIGRATE_VMA_SELECT_SYSTEM;
migrate.pgmap_owner = SVM_ADEV_PGMAP_OWNER(adev);
buf = kvcalloc(npages,
2 * sizeof(*migrate.src) + sizeof(uint64_t) + sizeof(dma_addr_t),
GFP_KERNEL);
if (!buf)
goto out;
migrate.src = buf;
migrate.dst = migrate.src + npages;
scratch = (dma_addr_t *)(migrate.dst + npages);
kfd_smi_event_migration_start(node, p->lead_thread->pid,
start >> PAGE_SHIFT, end >> PAGE_SHIFT,
0, node->id, prange->prefetch_loc,
prange->preferred_loc, trigger);
r = migrate_vma_setup(&migrate);
if (r) {
dev_err(adev->dev, "%s: vma setup fail %d range [0x%lx 0x%lx]\n",
__func__, r, prange->start, prange->last);
goto out_free;
}
cpages = migrate.cpages;
if (!cpages) {
pr_debug("failed collect migrate sys pages [0x%lx 0x%lx]\n",
prange->start, prange->last);
goto out_free;
}
if (cpages != npages)
pr_debug("partial migration, 0x%lx/0x%llx pages migrated\n",
cpages, npages);
else
pr_debug("0x%lx pages migrated\n", cpages);
r = svm_migrate_copy_to_vram(node, prange, &migrate, &mfence, scratch, ttm_res_offset);
migrate_vma_pages(&migrate);
pr_debug("successful/cpages/npages 0x%lx/0x%lx/0x%lx\n",
svm_migrate_successful_pages(&migrate), cpages, migrate.npages);
svm_migrate_copy_done(adev, mfence);
migrate_vma_finalize(&migrate);
kfd_smi_event_migration_end(node, p->lead_thread->pid,
start >> PAGE_SHIFT, end >> PAGE_SHIFT,
0, node->id, trigger);
svm_range_dma_unmap(adev->dev, scratch, 0, npages);
out_free:
kvfree(buf);
out:
if (!r && cpages) {
pdd = svm_range_get_pdd_by_node(prange, node);
if (pdd)
WRITE_ONCE(pdd->page_in, pdd->page_in + cpages);
return cpages;
}
return r;
}
/**
* svm_migrate_ram_to_vram - migrate svm range from system to device
* @prange: range structure
* @best_loc: the device to migrate to
* @mm: the process mm structure
* @trigger: reason of migration
*
* Context: Process context, caller hold mmap read lock, svms lock, prange lock
*
* Return:
* 0 - OK, otherwise error code
*/
static int
svm_migrate_ram_to_vram(struct svm_range *prange, uint32_t best_loc,
struct mm_struct *mm, uint32_t trigger)
{
unsigned long addr, start, end;
struct vm_area_struct *vma;
uint64_t ttm_res_offset;
struct kfd_node *node;
unsigned long cpages = 0;
long r = 0;
if (prange->actual_loc == best_loc) {
pr_debug("svms 0x%p [0x%lx 0x%lx] already on best_loc 0x%x\n",
prange->svms, prange->start, prange->last, best_loc);
return 0;
}
node = svm_range_get_node_by_id(prange, best_loc);
if (!node) {
pr_debug("failed to get kfd node by id 0x%x\n", best_loc);
return -ENODEV;
}
pr_debug("svms 0x%p [0x%lx 0x%lx] to gpu 0x%x\n", prange->svms,
prange->start, prange->last, best_loc);
start = prange->start << PAGE_SHIFT;
end = (prange->last + 1) << PAGE_SHIFT;
r = svm_range_vram_node_new(node, prange, true);
if (r) {
dev_dbg(node->adev->dev, "fail %ld to alloc vram\n", r);
return r;
}
ttm_res_offset = prange->offset << PAGE_SHIFT;
for (addr = start; addr < end;) {
unsigned long next;
vma = vma_lookup(mm, addr);
if (!vma)
break;
next = min(vma->vm_end, end);
r = svm_migrate_vma_to_vram(node, prange, vma, addr, next, trigger, ttm_res_offset);
if (r < 0) {
pr_debug("failed %ld to migrate\n", r);
break;
} else {
cpages += r;
}
ttm_res_offset += next - addr;
addr = next;
}
if (cpages) {
prange->actual_loc = best_loc;
svm_range_free_dma_mappings(prange, true);
} else {
svm_range_vram_node_free(prange);
}
return r < 0 ? r : 0;
}
static void svm_migrate_page_free(struct page *page)
{
struct svm_range_bo *svm_bo = page->zone_device_data;
if (svm_bo) {
pr_debug_ratelimited("ref: %d\n", kref_read(&svm_bo->kref));
svm_range_bo_unref_async(svm_bo);
}
}
static int
svm_migrate_copy_to_ram(struct amdgpu_device *adev, struct svm_range *prange,
struct migrate_vma *migrate, struct dma_fence **mfence,
dma_addr_t *scratch, uint64_t npages)
{
struct device *dev = adev->dev;
uint64_t *src;
dma_addr_t *dst;
struct page *dpage;
uint64_t i = 0, j;
uint64_t addr;
int r = 0;
pr_debug("svms 0x%p [0x%lx 0x%lx]\n", prange->svms, prange->start,
prange->last);
addr = prange->start << PAGE_SHIFT;
src = (uint64_t *)(scratch + npages);
dst = scratch;
for (i = 0, j = 0; i < npages; i++, addr += PAGE_SIZE) {
struct page *spage;
spage = migrate_pfn_to_page(migrate->src[i]);
if (!spage || !is_zone_device_page(spage)) {
pr_debug("invalid page. Could be in CPU already svms 0x%p [0x%lx 0x%lx]\n",
prange->svms, prange->start, prange->last);
if (j) {
r = svm_migrate_copy_memory_gart(adev, dst + i - j,
src + i - j, j,
FROM_VRAM_TO_RAM,
mfence);
if (r)
goto out_oom;
j = 0;
}
continue;
}
src[i] = svm_migrate_addr(adev, spage);
if (j > 0 && src[i] != src[i - 1] + PAGE_SIZE) {
r = svm_migrate_copy_memory_gart(adev, dst + i - j,
src + i - j, j,
FROM_VRAM_TO_RAM,
mfence);
if (r)
goto out_oom;
j = 0;
}
dpage = svm_migrate_get_sys_page(migrate->vma, addr);
if (!dpage) {
pr_debug("failed get page svms 0x%p [0x%lx 0x%lx]\n",
prange->svms, prange->start, prange->last);
r = -ENOMEM;
goto out_oom;
}
dst[i] = dma_map_page(dev, dpage, 0, PAGE_SIZE, DMA_FROM_DEVICE);
r = dma_mapping_error(dev, dst[i]);
if (r) {
dev_err(adev->dev, "%s: fail %d dma_map_page\n", __func__, r);
goto out_oom;
}
pr_debug_ratelimited("dma mapping dst to 0x%llx, pfn 0x%lx\n",
dst[i] >> PAGE_SHIFT, page_to_pfn(dpage));
migrate->dst[i] = migrate_pfn(page_to_pfn(dpage));
j++;
}
r = svm_migrate_copy_memory_gart(adev, dst + i - j, src + i - j, j,
FROM_VRAM_TO_RAM, mfence);
out_oom:
if (r) {
pr_debug("failed %d copy to ram\n", r);
while (i--) {
svm_migrate_put_sys_page(dst[i]);
migrate->dst[i] = 0;
}
}
return r;
}
/**
* svm_migrate_vma_to_ram - migrate range inside one vma from device to system
*
* @prange: svm range structure
* @vma: vm_area_struct that range [start, end] belongs to
* @start: range start virtual address in pages
* @end: range end virtual address in pages
* @node: kfd node device to migrate from
* @trigger: reason of migration
* @fault_page: is from vmf->page, svm_migrate_to_ram(), this is CPU page fault callback
*
* Context: Process context, caller hold mmap read lock, prange->migrate_mutex
*
* Return:
* 0 - success with all pages migrated
* negative values - indicate error
* positive values - partial migration, number of pages not migrated
*/
static long
svm_migrate_vma_to_ram(struct kfd_node *node, struct svm_range *prange,
struct vm_area_struct *vma, uint64_t start, uint64_t end,
uint32_t trigger, struct page *fault_page)
{
struct kfd_process *p = container_of(prange->svms, struct kfd_process, svms);
uint64_t npages = (end - start) >> PAGE_SHIFT;
unsigned long upages = npages;
unsigned long cpages = 0;
struct amdgpu_device *adev = node->adev;
struct kfd_process_device *pdd;
struct dma_fence *mfence = NULL;
struct migrate_vma migrate = { 0 };
dma_addr_t *scratch;
void *buf;
int r = -ENOMEM;
memset(&migrate, 0, sizeof(migrate));
migrate.vma = vma;
migrate.start = start;
migrate.end = end;
migrate.pgmap_owner = SVM_ADEV_PGMAP_OWNER(adev);
if (adev->gmc.xgmi.connected_to_cpu)
migrate.flags = MIGRATE_VMA_SELECT_DEVICE_COHERENT;
else
migrate.flags = MIGRATE_VMA_SELECT_DEVICE_PRIVATE;
buf = kvcalloc(npages,
2 * sizeof(*migrate.src) + sizeof(uint64_t) + sizeof(dma_addr_t),
GFP_KERNEL);
if (!buf)
goto out;
migrate.src = buf;
migrate.dst = migrate.src + npages;
migrate.fault_page = fault_page;
scratch = (dma_addr_t *)(migrate.dst + npages);
kfd_smi_event_migration_start(node, p->lead_thread->pid,
start >> PAGE_SHIFT, end >> PAGE_SHIFT,
node->id, 0, prange->prefetch_loc,
prange->preferred_loc, trigger);
r = migrate_vma_setup(&migrate);
if (r) {
dev_err(adev->dev, "%s: vma setup fail %d range [0x%lx 0x%lx]\n",
__func__, r, prange->start, prange->last);
goto out_free;
}
cpages = migrate.cpages;
if (!cpages) {
pr_debug("failed collect migrate device pages [0x%lx 0x%lx]\n",
prange->start, prange->last);
upages = svm_migrate_unsuccessful_pages(&migrate);
goto out_free;
}
if (cpages != npages)
pr_debug("partial migration, 0x%lx/0x%llx pages migrated\n",
cpages, npages);
else
pr_debug("0x%lx pages migrated\n", cpages);
r = svm_migrate_copy_to_ram(adev, prange, &migrate, &mfence,
scratch, npages);
migrate_vma_pages(&migrate);
upages = svm_migrate_unsuccessful_pages(&migrate);
pr_debug("unsuccessful/cpages/npages 0x%lx/0x%lx/0x%lx\n",
upages, cpages, migrate.npages);
svm_migrate_copy_done(adev, mfence);
migrate_vma_finalize(&migrate);
kfd_smi_event_migration_end(node, p->lead_thread->pid,
start >> PAGE_SHIFT, end >> PAGE_SHIFT,
node->id, 0, trigger);
svm_range_dma_unmap(adev->dev, scratch, 0, npages);
out_free:
kvfree(buf);
out:
if (!r && cpages) {
pdd = svm_range_get_pdd_by_node(prange, node);
if (pdd)
WRITE_ONCE(pdd->page_out, pdd->page_out + cpages);
}
return r ? r : upages;
}
/**
* svm_migrate_vram_to_ram - migrate svm range from device to system
* @prange: range structure
* @mm: process mm, use current->mm if NULL
* @trigger: reason of migration
* @fault_page: is from vmf->page, svm_migrate_to_ram(), this is CPU page fault callback
*
* Context: Process context, caller hold mmap read lock, prange->migrate_mutex
*
* Return:
* 0 - OK, otherwise error code
*/
int svm_migrate_vram_to_ram(struct svm_range *prange, struct mm_struct *mm,
uint32_t trigger, struct page *fault_page)
{
struct kfd_node *node;
struct vm_area_struct *vma;
unsigned long addr;
unsigned long start;
unsigned long end;
unsigned long upages = 0;
long r = 0;
if (!prange->actual_loc) {
pr_debug("[0x%lx 0x%lx] already migrated to ram\n",
prange->start, prange->last);
return 0;
}
node = svm_range_get_node_by_id(prange, prange->actual_loc);
if (!node) {
pr_debug("failed to get kfd node by id 0x%x\n", prange->actual_loc);
return -ENODEV;
}
pr_debug("svms 0x%p prange 0x%p [0x%lx 0x%lx] from gpu 0x%x to ram\n",
prange->svms, prange, prange->start, prange->last,
prange->actual_loc);
start = prange->start << PAGE_SHIFT;
end = (prange->last + 1) << PAGE_SHIFT;
for (addr = start; addr < end;) {
unsigned long next;
vma = vma_lookup(mm, addr);
if (!vma) {
pr_debug("failed to find vma for prange %p\n", prange);
r = -EFAULT;
break;
}
next = min(vma->vm_end, end);
r = svm_migrate_vma_to_ram(node, prange, vma, addr, next, trigger,
fault_page);
if (r < 0) {
pr_debug("failed %ld to migrate prange %p\n", r, prange);
break;
} else {
upages += r;
}
addr = next;
}
if (r >= 0 && !upages) {
svm_range_vram_node_free(prange);
prange->actual_loc = 0;
}
return r < 0 ? r : 0;
}
/**
* svm_migrate_vram_to_vram - migrate svm range from device to device
* @prange: range structure
* @best_loc: the device to migrate to
* @mm: process mm, use current->mm if NULL
* @trigger: reason of migration
*
* Context: Process context, caller hold mmap read lock, svms lock, prange lock
*
* Return:
* 0 - OK, otherwise error code
*/
static int
svm_migrate_vram_to_vram(struct svm_range *prange, uint32_t best_loc,
struct mm_struct *mm, uint32_t trigger)
{
int r, retries = 3;
/*
* TODO: for both devices with PCIe large bar or on same xgmi hive, skip
* system memory as migration bridge
*/
pr_debug("from gpu 0x%x to gpu 0x%x\n", prange->actual_loc, best_loc);
do {
r = svm_migrate_vram_to_ram(prange, mm, trigger, NULL);
if (r)
return r;
} while (prange->actual_loc && --retries);
if (prange->actual_loc)
return -EDEADLK;
return svm_migrate_ram_to_vram(prange, best_loc, mm, trigger);
}
int
svm_migrate_to_vram(struct svm_range *prange, uint32_t best_loc,
struct mm_struct *mm, uint32_t trigger)
{
if (!prange->actual_loc)
return svm_migrate_ram_to_vram(prange, best_loc, mm, trigger);
else
return svm_migrate_vram_to_vram(prange, best_loc, mm, trigger);
}
/**
* svm_migrate_to_ram - CPU page fault handler
* @vmf: CPU vm fault vma, address
*
* Context: vm fault handler, caller holds the mmap read lock
*
* Return:
* 0 - OK
* VM_FAULT_SIGBUS - notice application to have SIGBUS page fault
*/
static vm_fault_t svm_migrate_to_ram(struct vm_fault *vmf)
{
unsigned long addr = vmf->address;
struct svm_range_bo *svm_bo;
enum svm_work_list_ops op;
struct svm_range *parent;
struct svm_range *prange;
struct kfd_process *p;
struct mm_struct *mm;
int r = 0;
svm_bo = vmf->page->zone_device_data;
if (!svm_bo) {
pr_debug("failed get device page at addr 0x%lx\n", addr);
return VM_FAULT_SIGBUS;
}
if (!mmget_not_zero(svm_bo->eviction_fence->mm)) {
pr_debug("addr 0x%lx of process mm is destroyed\n", addr);
return VM_FAULT_SIGBUS;
}
mm = svm_bo->eviction_fence->mm;
if (mm != vmf->vma->vm_mm)
pr_debug("addr 0x%lx is COW mapping in child process\n", addr);
p = kfd_lookup_process_by_mm(mm);
if (!p) {
pr_debug("failed find process at fault address 0x%lx\n", addr);
r = VM_FAULT_SIGBUS;
goto out_mmput;
}
if (READ_ONCE(p->svms.faulting_task) == current) {
pr_debug("skipping ram migration\n");
r = 0;
goto out_unref_process;
}
pr_debug("CPU page fault svms 0x%p address 0x%lx\n", &p->svms, addr);
addr >>= PAGE_SHIFT;
mutex_lock(&p->svms.lock);
prange = svm_range_from_addr(&p->svms, addr, &parent);
if (!prange) {
pr_debug("failed get range svms 0x%p addr 0x%lx\n", &p->svms, addr);
r = -EFAULT;
goto out_unlock_svms;
}
mutex_lock(&parent->migrate_mutex);
if (prange != parent)
mutex_lock_nested(&prange->migrate_mutex, 1);
if (!prange->actual_loc)
goto out_unlock_prange;
svm_range_lock(parent);
if (prange != parent)
mutex_lock_nested(&prange->lock, 1);
r = svm_range_split_by_granularity(p, mm, addr, parent, prange);
if (prange != parent)
mutex_unlock(&prange->lock);
svm_range_unlock(parent);
if (r) {
pr_debug("failed %d to split range by granularity\n", r);
goto out_unlock_prange;
}
r = svm_migrate_vram_to_ram(prange, vmf->vma->vm_mm,
KFD_MIGRATE_TRIGGER_PAGEFAULT_CPU,
vmf->page);
if (r)
pr_debug("failed %d migrate svms 0x%p range 0x%p [0x%lx 0x%lx]\n",
r, prange->svms, prange, prange->start, prange->last);
/* xnack on, update mapping on GPUs with ACCESS_IN_PLACE */
if (p->xnack_enabled && parent == prange)
op = SVM_OP_UPDATE_RANGE_NOTIFIER_AND_MAP;
else
op = SVM_OP_UPDATE_RANGE_NOTIFIER;
svm_range_add_list_work(&p->svms, parent, mm, op);
schedule_deferred_list_work(&p->svms);
out_unlock_prange:
if (prange != parent)
mutex_unlock(&prange->migrate_mutex);
mutex_unlock(&parent->migrate_mutex);
out_unlock_svms:
mutex_unlock(&p->svms.lock);
out_unref_process:
pr_debug("CPU fault svms 0x%p address 0x%lx done\n", &p->svms, addr);
kfd_unref_process(p);
out_mmput:
mmput(mm);
return r ? VM_FAULT_SIGBUS : 0;
}
static const struct dev_pagemap_ops svm_migrate_pgmap_ops = {
.page_free = svm_migrate_page_free,
.migrate_to_ram = svm_migrate_to_ram,
};
/* Each VRAM page uses sizeof(struct page) on system memory */
#define SVM_HMM_PAGE_STRUCT_SIZE(size) ((size)/PAGE_SIZE * sizeof(struct page))
int kgd2kfd_init_zone_device(struct amdgpu_device *adev)
{
struct amdgpu_kfd_dev *kfddev = &adev->kfd;
struct dev_pagemap *pgmap;
struct resource *res = NULL;
unsigned long size;
void *r;
/* Page migration works on gfx9 or newer */
if (adev->ip_versions[GC_HWIP][0] < IP_VERSION(9, 0, 1))
return -EINVAL;
if (adev->gmc.is_app_apu)
return 0;
pgmap = &kfddev->pgmap;
memset(pgmap, 0, sizeof(*pgmap));
/* TODO: register all vram to HMM for now.
* should remove reserved size
*/
size = ALIGN(adev->gmc.real_vram_size, 2ULL << 20);
if (adev->gmc.xgmi.connected_to_cpu) {
pgmap->range.start = adev->gmc.aper_base;
pgmap->range.end = adev->gmc.aper_base + adev->gmc.aper_size - 1;
pgmap->type = MEMORY_DEVICE_COHERENT;
} else {
res = devm_request_free_mem_region(adev->dev, &iomem_resource, size);
if (IS_ERR(res))
return -ENOMEM;
pgmap->range.start = res->start;
pgmap->range.end = res->end;
pgmap->type = MEMORY_DEVICE_PRIVATE;
}
pgmap->nr_range = 1;
pgmap->ops = &svm_migrate_pgmap_ops;
pgmap->owner = SVM_ADEV_PGMAP_OWNER(adev);
pgmap->flags = 0;
/* Device manager releases device-specific resources, memory region and
* pgmap when driver disconnects from device.
*/
r = devm_memremap_pages(adev->dev, pgmap);
if (IS_ERR(r)) {
pr_err("failed to register HMM device memory\n");
/* Disable SVM support capability */
pgmap->type = 0;
if (pgmap->type == MEMORY_DEVICE_PRIVATE)
devm_release_mem_region(adev->dev, res->start, resource_size(res));
return PTR_ERR(r);
}
pr_debug("reserve %ldMB system memory for VRAM pages struct\n",
SVM_HMM_PAGE_STRUCT_SIZE(size) >> 20);
amdgpu_amdkfd_reserve_system_mem(SVM_HMM_PAGE_STRUCT_SIZE(size));
pr_info("HMM registered %ldMB device memory\n", size >> 20);
return 0;
}
| linux-master | drivers/gpu/drm/amd/amdkfd/kfd_migrate.c |
// SPDX-License-Identifier: GPL-2.0 OR MIT
/*
* Copyright 2014-2022 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
*/
#include <linux/slab.h>
#include <linux/mutex.h>
#include "kfd_device_queue_manager.h"
#include "kfd_kernel_queue.h"
#include "kfd_priv.h"
static inline void inc_wptr(unsigned int *wptr, unsigned int increment_bytes,
unsigned int buffer_size_bytes)
{
unsigned int temp = *wptr + increment_bytes / sizeof(uint32_t);
WARN((temp * sizeof(uint32_t)) > buffer_size_bytes,
"Runlist IB overflow");
*wptr = temp;
}
static void pm_calc_rlib_size(struct packet_manager *pm,
unsigned int *rlib_size,
bool *over_subscription)
{
unsigned int process_count, queue_count, compute_queue_count, gws_queue_count;
unsigned int map_queue_size;
unsigned int max_proc_per_quantum = 1;
struct kfd_node *dev = pm->dqm->dev;
process_count = pm->dqm->processes_count;
queue_count = pm->dqm->active_queue_count;
compute_queue_count = pm->dqm->active_cp_queue_count;
gws_queue_count = pm->dqm->gws_queue_count;
/* check if there is over subscription
* Note: the arbitration between the number of VMIDs and
* hws_max_conc_proc has been done in
* kgd2kfd_device_init().
*/
*over_subscription = false;
if (dev->max_proc_per_quantum > 1)
max_proc_per_quantum = dev->max_proc_per_quantum;
if ((process_count > max_proc_per_quantum) ||
compute_queue_count > get_cp_queues_num(pm->dqm) ||
gws_queue_count > 1) {
*over_subscription = true;
pr_debug("Over subscribed runlist\n");
}
map_queue_size = pm->pmf->map_queues_size;
/* calculate run list ib allocation size */
*rlib_size = process_count * pm->pmf->map_process_size +
queue_count * map_queue_size;
/*
* Increase the allocation size in case we need a chained run list
* when over subscription
*/
if (*over_subscription)
*rlib_size += pm->pmf->runlist_size;
pr_debug("runlist ib size %d\n", *rlib_size);
}
static int pm_allocate_runlist_ib(struct packet_manager *pm,
unsigned int **rl_buffer,
uint64_t *rl_gpu_buffer,
unsigned int *rl_buffer_size,
bool *is_over_subscription)
{
int retval;
if (WARN_ON(pm->allocated))
return -EINVAL;
pm_calc_rlib_size(pm, rl_buffer_size, is_over_subscription);
mutex_lock(&pm->lock);
retval = kfd_gtt_sa_allocate(pm->dqm->dev, *rl_buffer_size,
&pm->ib_buffer_obj);
if (retval) {
pr_err("Failed to allocate runlist IB\n");
goto out;
}
*(void **)rl_buffer = pm->ib_buffer_obj->cpu_ptr;
*rl_gpu_buffer = pm->ib_buffer_obj->gpu_addr;
memset(*rl_buffer, 0, *rl_buffer_size);
pm->allocated = true;
out:
mutex_unlock(&pm->lock);
return retval;
}
static int pm_create_runlist_ib(struct packet_manager *pm,
struct list_head *queues,
uint64_t *rl_gpu_addr,
size_t *rl_size_bytes)
{
unsigned int alloc_size_bytes;
unsigned int *rl_buffer, rl_wptr, i;
int retval, processes_mapped;
struct device_process_node *cur;
struct qcm_process_device *qpd;
struct queue *q;
struct kernel_queue *kq;
bool is_over_subscription;
rl_wptr = retval = processes_mapped = 0;
retval = pm_allocate_runlist_ib(pm, &rl_buffer, rl_gpu_addr,
&alloc_size_bytes, &is_over_subscription);
if (retval)
return retval;
*rl_size_bytes = alloc_size_bytes;
pm->ib_size_bytes = alloc_size_bytes;
pr_debug("Building runlist ib process count: %d queues count %d\n",
pm->dqm->processes_count, pm->dqm->active_queue_count);
/* build the run list ib packet */
list_for_each_entry(cur, queues, list) {
qpd = cur->qpd;
/* build map process packet */
if (processes_mapped >= pm->dqm->processes_count) {
pr_debug("Not enough space left in runlist IB\n");
pm_release_ib(pm);
return -ENOMEM;
}
retval = pm->pmf->map_process(pm, &rl_buffer[rl_wptr], qpd);
if (retval)
return retval;
processes_mapped++;
inc_wptr(&rl_wptr, pm->pmf->map_process_size,
alloc_size_bytes);
list_for_each_entry(kq, &qpd->priv_queue_list, list) {
if (!kq->queue->properties.is_active)
continue;
pr_debug("static_queue, mapping kernel q %d, is debug status %d\n",
kq->queue->queue, qpd->is_debug);
retval = pm->pmf->map_queues(pm,
&rl_buffer[rl_wptr],
kq->queue,
qpd->is_debug);
if (retval)
return retval;
inc_wptr(&rl_wptr,
pm->pmf->map_queues_size,
alloc_size_bytes);
}
list_for_each_entry(q, &qpd->queues_list, list) {
if (!q->properties.is_active)
continue;
pr_debug("static_queue, mapping user queue %d, is debug status %d\n",
q->queue, qpd->is_debug);
retval = pm->pmf->map_queues(pm,
&rl_buffer[rl_wptr],
q,
qpd->is_debug);
if (retval)
return retval;
inc_wptr(&rl_wptr,
pm->pmf->map_queues_size,
alloc_size_bytes);
}
}
pr_debug("Finished map process and queues to runlist\n");
if (is_over_subscription) {
if (!pm->is_over_subscription)
pr_warn("Runlist is getting oversubscribed. Expect reduced ROCm performance.\n");
retval = pm->pmf->runlist(pm, &rl_buffer[rl_wptr],
*rl_gpu_addr,
alloc_size_bytes / sizeof(uint32_t),
true);
}
pm->is_over_subscription = is_over_subscription;
for (i = 0; i < alloc_size_bytes / sizeof(uint32_t); i++)
pr_debug("0x%2X ", rl_buffer[i]);
pr_debug("\n");
return retval;
}
int pm_init(struct packet_manager *pm, struct device_queue_manager *dqm)
{
switch (dqm->dev->adev->asic_type) {
case CHIP_KAVERI:
case CHIP_HAWAII:
/* PM4 packet structures on CIK are the same as on VI */
case CHIP_CARRIZO:
case CHIP_TONGA:
case CHIP_FIJI:
case CHIP_POLARIS10:
case CHIP_POLARIS11:
case CHIP_POLARIS12:
case CHIP_VEGAM:
pm->pmf = &kfd_vi_pm_funcs;
break;
default:
if (KFD_GC_VERSION(dqm->dev) == IP_VERSION(9, 4, 2) ||
KFD_GC_VERSION(dqm->dev) == IP_VERSION(9, 4, 3))
pm->pmf = &kfd_aldebaran_pm_funcs;
else if (KFD_GC_VERSION(dqm->dev) >= IP_VERSION(9, 0, 1))
pm->pmf = &kfd_v9_pm_funcs;
else {
WARN(1, "Unexpected ASIC family %u",
dqm->dev->adev->asic_type);
return -EINVAL;
}
}
pm->dqm = dqm;
mutex_init(&pm->lock);
pm->priv_queue = kernel_queue_init(dqm->dev, KFD_QUEUE_TYPE_HIQ);
if (!pm->priv_queue) {
mutex_destroy(&pm->lock);
return -ENOMEM;
}
pm->allocated = false;
return 0;
}
void pm_uninit(struct packet_manager *pm, bool hanging)
{
mutex_destroy(&pm->lock);
kernel_queue_uninit(pm->priv_queue, hanging);
pm->priv_queue = NULL;
}
int pm_send_set_resources(struct packet_manager *pm,
struct scheduling_resources *res)
{
uint32_t *buffer, size;
int retval = 0;
size = pm->pmf->set_resources_size;
mutex_lock(&pm->lock);
kq_acquire_packet_buffer(pm->priv_queue,
size / sizeof(uint32_t),
(unsigned int **)&buffer);
if (!buffer) {
pr_err("Failed to allocate buffer on kernel queue\n");
retval = -ENOMEM;
goto out;
}
retval = pm->pmf->set_resources(pm, buffer, res);
if (!retval)
kq_submit_packet(pm->priv_queue);
else
kq_rollback_packet(pm->priv_queue);
out:
mutex_unlock(&pm->lock);
return retval;
}
int pm_send_runlist(struct packet_manager *pm, struct list_head *dqm_queues)
{
uint64_t rl_gpu_ib_addr;
uint32_t *rl_buffer;
size_t rl_ib_size, packet_size_dwords;
int retval;
retval = pm_create_runlist_ib(pm, dqm_queues, &rl_gpu_ib_addr,
&rl_ib_size);
if (retval)
goto fail_create_runlist_ib;
pr_debug("runlist IB address: 0x%llX\n", rl_gpu_ib_addr);
packet_size_dwords = pm->pmf->runlist_size / sizeof(uint32_t);
mutex_lock(&pm->lock);
retval = kq_acquire_packet_buffer(pm->priv_queue,
packet_size_dwords, &rl_buffer);
if (retval)
goto fail_acquire_packet_buffer;
retval = pm->pmf->runlist(pm, rl_buffer, rl_gpu_ib_addr,
rl_ib_size / sizeof(uint32_t), false);
if (retval)
goto fail_create_runlist;
kq_submit_packet(pm->priv_queue);
mutex_unlock(&pm->lock);
return retval;
fail_create_runlist:
kq_rollback_packet(pm->priv_queue);
fail_acquire_packet_buffer:
mutex_unlock(&pm->lock);
fail_create_runlist_ib:
pm_release_ib(pm);
return retval;
}
int pm_send_query_status(struct packet_manager *pm, uint64_t fence_address,
uint64_t fence_value)
{
uint32_t *buffer, size;
int retval = 0;
if (WARN_ON(!fence_address))
return -EFAULT;
size = pm->pmf->query_status_size;
mutex_lock(&pm->lock);
kq_acquire_packet_buffer(pm->priv_queue,
size / sizeof(uint32_t), (unsigned int **)&buffer);
if (!buffer) {
pr_err("Failed to allocate buffer on kernel queue\n");
retval = -ENOMEM;
goto out;
}
retval = pm->pmf->query_status(pm, buffer, fence_address, fence_value);
if (!retval)
kq_submit_packet(pm->priv_queue);
else
kq_rollback_packet(pm->priv_queue);
out:
mutex_unlock(&pm->lock);
return retval;
}
int pm_update_grace_period(struct packet_manager *pm, uint32_t grace_period)
{
int retval = 0;
uint32_t *buffer, size;
size = pm->pmf->set_grace_period_size;
mutex_lock(&pm->lock);
if (size) {
kq_acquire_packet_buffer(pm->priv_queue,
size / sizeof(uint32_t),
(unsigned int **)&buffer);
if (!buffer) {
pr_err("Failed to allocate buffer on kernel queue\n");
retval = -ENOMEM;
goto out;
}
retval = pm->pmf->set_grace_period(pm, buffer, grace_period);
if (!retval)
kq_submit_packet(pm->priv_queue);
else
kq_rollback_packet(pm->priv_queue);
}
out:
mutex_unlock(&pm->lock);
return retval;
}
int pm_send_unmap_queue(struct packet_manager *pm,
enum kfd_unmap_queues_filter filter,
uint32_t filter_param, bool reset)
{
uint32_t *buffer, size;
int retval = 0;
size = pm->pmf->unmap_queues_size;
mutex_lock(&pm->lock);
kq_acquire_packet_buffer(pm->priv_queue,
size / sizeof(uint32_t), (unsigned int **)&buffer);
if (!buffer) {
pr_err("Failed to allocate buffer on kernel queue\n");
retval = -ENOMEM;
goto out;
}
retval = pm->pmf->unmap_queues(pm, buffer, filter, filter_param, reset);
if (!retval)
kq_submit_packet(pm->priv_queue);
else
kq_rollback_packet(pm->priv_queue);
out:
mutex_unlock(&pm->lock);
return retval;
}
void pm_release_ib(struct packet_manager *pm)
{
mutex_lock(&pm->lock);
if (pm->allocated) {
kfd_gtt_sa_free(pm->dqm->dev, pm->ib_buffer_obj);
pm->allocated = false;
}
mutex_unlock(&pm->lock);
}
#if defined(CONFIG_DEBUG_FS)
int pm_debugfs_runlist(struct seq_file *m, void *data)
{
struct packet_manager *pm = data;
mutex_lock(&pm->lock);
if (!pm->allocated) {
seq_puts(m, " No active runlist\n");
goto out;
}
seq_hex_dump(m, " ", DUMP_PREFIX_OFFSET, 32, 4,
pm->ib_buffer_obj->cpu_ptr, pm->ib_size_bytes, false);
out:
mutex_unlock(&pm->lock);
return 0;
}
int pm_debugfs_hang_hws(struct packet_manager *pm)
{
uint32_t *buffer, size;
int r = 0;
if (!pm->priv_queue)
return -EAGAIN;
size = pm->pmf->query_status_size;
mutex_lock(&pm->lock);
kq_acquire_packet_buffer(pm->priv_queue,
size / sizeof(uint32_t), (unsigned int **)&buffer);
if (!buffer) {
pr_err("Failed to allocate buffer on kernel queue\n");
r = -ENOMEM;
goto out;
}
memset(buffer, 0x55, size);
kq_submit_packet(pm->priv_queue);
pr_info("Submitting %x %x %x %x %x %x %x to HIQ to hang the HWS.",
buffer[0], buffer[1], buffer[2], buffer[3],
buffer[4], buffer[5], buffer[6]);
out:
mutex_unlock(&pm->lock);
return r;
}
#endif
| linux-master | drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c |
// SPDX-License-Identifier: GPL-2.0 OR MIT
/*
* Copyright 2014-2022 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
*/
#include "kfd_device_queue_manager.h"
#include "cik_regs.h"
#include "oss/oss_2_4_sh_mask.h"
#include "gca/gfx_7_2_sh_mask.h"
static bool set_cache_memory_policy_cik(struct device_queue_manager *dqm,
struct qcm_process_device *qpd,
enum cache_policy default_policy,
enum cache_policy alternate_policy,
void __user *alternate_aperture_base,
uint64_t alternate_aperture_size);
static int update_qpd_cik(struct device_queue_manager *dqm,
struct qcm_process_device *qpd);
static void init_sdma_vm(struct device_queue_manager *dqm,
struct queue *q,
struct qcm_process_device *qpd);
void device_queue_manager_init_cik(
struct device_queue_manager_asic_ops *asic_ops)
{
asic_ops->set_cache_memory_policy = set_cache_memory_policy_cik;
asic_ops->update_qpd = update_qpd_cik;
asic_ops->init_sdma_vm = init_sdma_vm;
asic_ops->mqd_manager_init = mqd_manager_init_cik;
}
static uint32_t compute_sh_mem_bases_64bit(unsigned int top_address_nybble)
{
/* In 64-bit mode, we can only control the top 3 bits of the LDS,
* scratch and GPUVM apertures.
* The hardware fills in the remaining 59 bits according to the
* following pattern:
* LDS: X0000000'00000000 - X0000001'00000000 (4GB)
* Scratch: X0000001'00000000 - X0000002'00000000 (4GB)
* GPUVM: Y0010000'00000000 - Y0020000'00000000 (1TB)
*
* (where X/Y is the configurable nybble with the low-bit 0)
*
* LDS and scratch will have the same top nybble programmed in the
* top 3 bits of SH_MEM_BASES.PRIVATE_BASE.
* GPUVM can have a different top nybble programmed in the
* top 3 bits of SH_MEM_BASES.SHARED_BASE.
* We don't bother to support different top nybbles
* for LDS/Scratch and GPUVM.
*/
WARN_ON((top_address_nybble & 1) || top_address_nybble > 0xE ||
top_address_nybble == 0);
return PRIVATE_BASE(top_address_nybble << 12) |
SHARED_BASE(top_address_nybble << 12);
}
static bool set_cache_memory_policy_cik(struct device_queue_manager *dqm,
struct qcm_process_device *qpd,
enum cache_policy default_policy,
enum cache_policy alternate_policy,
void __user *alternate_aperture_base,
uint64_t alternate_aperture_size)
{
uint32_t default_mtype;
uint32_t ape1_mtype;
default_mtype = (default_policy == cache_policy_coherent) ?
MTYPE_NONCACHED :
MTYPE_CACHED;
ape1_mtype = (alternate_policy == cache_policy_coherent) ?
MTYPE_NONCACHED :
MTYPE_CACHED;
qpd->sh_mem_config = (qpd->sh_mem_config & PTR32)
| ALIGNMENT_MODE(SH_MEM_ALIGNMENT_MODE_UNALIGNED)
| DEFAULT_MTYPE(default_mtype)
| APE1_MTYPE(ape1_mtype);
return true;
}
static int update_qpd_cik(struct device_queue_manager *dqm,
struct qcm_process_device *qpd)
{
struct kfd_process_device *pdd;
unsigned int temp;
pdd = qpd_to_pdd(qpd);
/* check if sh_mem_config register already configured */
if (qpd->sh_mem_config == 0) {
qpd->sh_mem_config =
ALIGNMENT_MODE(SH_MEM_ALIGNMENT_MODE_UNALIGNED) |
DEFAULT_MTYPE(MTYPE_NONCACHED) |
APE1_MTYPE(MTYPE_NONCACHED);
qpd->sh_mem_ape1_limit = 0;
qpd->sh_mem_ape1_base = 0;
}
/* On dGPU we're always in GPUVM64 addressing mode with 64-bit
* aperture addresses.
*/
temp = get_sh_mem_bases_nybble_64(pdd);
qpd->sh_mem_bases = compute_sh_mem_bases_64bit(temp);
pr_debug("is32bit process: %d sh_mem_bases nybble: 0x%X and register 0x%X\n",
qpd->pqm->process->is_32bit_user_mode, temp, qpd->sh_mem_bases);
return 0;
}
static void init_sdma_vm(struct device_queue_manager *dqm,
struct queue *q,
struct qcm_process_device *qpd)
{
/* On dGPU we're always in GPUVM64 addressing mode with 64-bit
* aperture addresses.
*/
q->properties.sdma_vm_addr =
((get_sh_mem_bases_nybble_64(qpd_to_pdd(qpd))) <<
SDMA0_RLC0_VIRTUAL_ADDR__SHARED_BASE__SHIFT) &
SDMA0_RLC0_VIRTUAL_ADDR__SHARED_BASE_MASK;
}
| linux-master | drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_cik.c |
// SPDX-License-Identifier: GPL-2.0 OR MIT
/*
* Copyright 2014-2022 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
#include <linux/sched.h>
#include <linux/device.h>
#include "kfd_priv.h"
#include "amdgpu_amdkfd.h"
static int kfd_init(void)
{
int err;
/* Verify module parameters */
if ((sched_policy < KFD_SCHED_POLICY_HWS) ||
(sched_policy > KFD_SCHED_POLICY_NO_HWS)) {
pr_err("sched_policy has invalid value\n");
return -EINVAL;
}
/* Verify module parameters */
if ((max_num_of_queues_per_device < 1) ||
(max_num_of_queues_per_device >
KFD_MAX_NUM_OF_QUEUES_PER_DEVICE)) {
pr_err("max_num_of_queues_per_device must be between 1 to KFD_MAX_NUM_OF_QUEUES_PER_DEVICE\n");
return -EINVAL;
}
err = kfd_chardev_init();
if (err < 0)
goto err_ioctl;
err = kfd_topology_init();
if (err < 0)
goto err_topology;
err = kfd_process_create_wq();
if (err < 0)
goto err_create_wq;
/* Ignore the return value, so that we can continue
* to init the KFD, even if procfs isn't craated
*/
kfd_procfs_init();
kfd_debugfs_init();
return 0;
err_create_wq:
kfd_topology_shutdown();
err_topology:
kfd_chardev_exit();
err_ioctl:
pr_err("KFD is disabled due to module initialization failure\n");
return err;
}
static void kfd_exit(void)
{
kfd_cleanup_processes();
kfd_debugfs_fini();
kfd_process_destroy_wq();
kfd_procfs_shutdown();
kfd_topology_shutdown();
kfd_chardev_exit();
}
int kgd2kfd_init(void)
{
return kfd_init();
}
void kgd2kfd_exit(void)
{
kfd_exit();
}
| linux-master | drivers/gpu/drm/amd/amdkfd/kfd_module.c |
// SPDX-License-Identifier: GPL-2.0 OR MIT
/*
* Copyright 2020-2021 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
#include <linux/types.h>
#include <linux/sched/task.h>
#include <linux/dynamic_debug.h>
#include <drm/ttm/ttm_tt.h>
#include <drm/drm_exec.h>
#include "amdgpu_sync.h"
#include "amdgpu_object.h"
#include "amdgpu_vm.h"
#include "amdgpu_hmm.h"
#include "amdgpu.h"
#include "amdgpu_xgmi.h"
#include "kfd_priv.h"
#include "kfd_svm.h"
#include "kfd_migrate.h"
#include "kfd_smi_events.h"
#ifdef dev_fmt
#undef dev_fmt
#endif
#define dev_fmt(fmt) "kfd_svm: %s: " fmt, __func__
#define AMDGPU_SVM_RANGE_RESTORE_DELAY_MS 1
/* Long enough to ensure no retry fault comes after svm range is restored and
* page table is updated.
*/
#define AMDGPU_SVM_RANGE_RETRY_FAULT_PENDING (2UL * NSEC_PER_MSEC)
#if IS_ENABLED(CONFIG_DYNAMIC_DEBUG)
#define dynamic_svm_range_dump(svms) \
_dynamic_func_call_no_desc("svm_range_dump", svm_range_debug_dump, svms)
#else
#define dynamic_svm_range_dump(svms) \
do { if (0) svm_range_debug_dump(svms); } while (0)
#endif
/* Giant svm range split into smaller ranges based on this, it is decided using
* minimum of all dGPU/APU 1/32 VRAM size, between 2MB to 1GB and alignment to
* power of 2MB.
*/
static uint64_t max_svm_range_pages;
struct criu_svm_metadata {
struct list_head list;
struct kfd_criu_svm_range_priv_data data;
};
static void svm_range_evict_svm_bo_worker(struct work_struct *work);
static bool
svm_range_cpu_invalidate_pagetables(struct mmu_interval_notifier *mni,
const struct mmu_notifier_range *range,
unsigned long cur_seq);
static int
svm_range_check_vm(struct kfd_process *p, uint64_t start, uint64_t last,
uint64_t *bo_s, uint64_t *bo_l);
static const struct mmu_interval_notifier_ops svm_range_mn_ops = {
.invalidate = svm_range_cpu_invalidate_pagetables,
};
/**
* svm_range_unlink - unlink svm_range from lists and interval tree
* @prange: svm range structure to be removed
*
* Remove the svm_range from the svms and svm_bo lists and the svms
* interval tree.
*
* Context: The caller must hold svms->lock
*/
static void svm_range_unlink(struct svm_range *prange)
{
pr_debug("svms 0x%p prange 0x%p [0x%lx 0x%lx]\n", prange->svms,
prange, prange->start, prange->last);
if (prange->svm_bo) {
spin_lock(&prange->svm_bo->list_lock);
list_del(&prange->svm_bo_list);
spin_unlock(&prange->svm_bo->list_lock);
}
list_del(&prange->list);
if (prange->it_node.start != 0 && prange->it_node.last != 0)
interval_tree_remove(&prange->it_node, &prange->svms->objects);
}
static void
svm_range_add_notifier_locked(struct mm_struct *mm, struct svm_range *prange)
{
pr_debug("svms 0x%p prange 0x%p [0x%lx 0x%lx]\n", prange->svms,
prange, prange->start, prange->last);
mmu_interval_notifier_insert_locked(&prange->notifier, mm,
prange->start << PAGE_SHIFT,
prange->npages << PAGE_SHIFT,
&svm_range_mn_ops);
}
/**
* svm_range_add_to_svms - add svm range to svms
* @prange: svm range structure to be added
*
* Add the svm range to svms interval tree and link list
*
* Context: The caller must hold svms->lock
*/
static void svm_range_add_to_svms(struct svm_range *prange)
{
pr_debug("svms 0x%p prange 0x%p [0x%lx 0x%lx]\n", prange->svms,
prange, prange->start, prange->last);
list_move_tail(&prange->list, &prange->svms->list);
prange->it_node.start = prange->start;
prange->it_node.last = prange->last;
interval_tree_insert(&prange->it_node, &prange->svms->objects);
}
static void svm_range_remove_notifier(struct svm_range *prange)
{
pr_debug("remove notifier svms 0x%p prange 0x%p [0x%lx 0x%lx]\n",
prange->svms, prange,
prange->notifier.interval_tree.start >> PAGE_SHIFT,
prange->notifier.interval_tree.last >> PAGE_SHIFT);
if (prange->notifier.interval_tree.start != 0 &&
prange->notifier.interval_tree.last != 0)
mmu_interval_notifier_remove(&prange->notifier);
}
static bool
svm_is_valid_dma_mapping_addr(struct device *dev, dma_addr_t dma_addr)
{
return dma_addr && !dma_mapping_error(dev, dma_addr) &&
!(dma_addr & SVM_RANGE_VRAM_DOMAIN);
}
static int
svm_range_dma_map_dev(struct amdgpu_device *adev, struct svm_range *prange,
unsigned long offset, unsigned long npages,
unsigned long *hmm_pfns, uint32_t gpuidx)
{
enum dma_data_direction dir = DMA_BIDIRECTIONAL;
dma_addr_t *addr = prange->dma_addr[gpuidx];
struct device *dev = adev->dev;
struct page *page;
int i, r;
if (!addr) {
addr = kvcalloc(prange->npages, sizeof(*addr), GFP_KERNEL);
if (!addr)
return -ENOMEM;
prange->dma_addr[gpuidx] = addr;
}
addr += offset;
for (i = 0; i < npages; i++) {
if (svm_is_valid_dma_mapping_addr(dev, addr[i]))
dma_unmap_page(dev, addr[i], PAGE_SIZE, dir);
page = hmm_pfn_to_page(hmm_pfns[i]);
if (is_zone_device_page(page)) {
struct amdgpu_device *bo_adev = prange->svm_bo->node->adev;
addr[i] = (hmm_pfns[i] << PAGE_SHIFT) +
bo_adev->vm_manager.vram_base_offset -
bo_adev->kfd.pgmap.range.start;
addr[i] |= SVM_RANGE_VRAM_DOMAIN;
pr_debug_ratelimited("vram address: 0x%llx\n", addr[i]);
continue;
}
addr[i] = dma_map_page(dev, page, 0, PAGE_SIZE, dir);
r = dma_mapping_error(dev, addr[i]);
if (r) {
dev_err(dev, "failed %d dma_map_page\n", r);
return r;
}
pr_debug_ratelimited("dma mapping 0x%llx for page addr 0x%lx\n",
addr[i] >> PAGE_SHIFT, page_to_pfn(page));
}
return 0;
}
static int
svm_range_dma_map(struct svm_range *prange, unsigned long *bitmap,
unsigned long offset, unsigned long npages,
unsigned long *hmm_pfns)
{
struct kfd_process *p;
uint32_t gpuidx;
int r;
p = container_of(prange->svms, struct kfd_process, svms);
for_each_set_bit(gpuidx, bitmap, MAX_GPU_INSTANCE) {
struct kfd_process_device *pdd;
pr_debug("mapping to gpu idx 0x%x\n", gpuidx);
pdd = kfd_process_device_from_gpuidx(p, gpuidx);
if (!pdd) {
pr_debug("failed to find device idx %d\n", gpuidx);
return -EINVAL;
}
r = svm_range_dma_map_dev(pdd->dev->adev, prange, offset, npages,
hmm_pfns, gpuidx);
if (r)
break;
}
return r;
}
void svm_range_dma_unmap(struct device *dev, dma_addr_t *dma_addr,
unsigned long offset, unsigned long npages)
{
enum dma_data_direction dir = DMA_BIDIRECTIONAL;
int i;
if (!dma_addr)
return;
for (i = offset; i < offset + npages; i++) {
if (!svm_is_valid_dma_mapping_addr(dev, dma_addr[i]))
continue;
pr_debug_ratelimited("unmap 0x%llx\n", dma_addr[i] >> PAGE_SHIFT);
dma_unmap_page(dev, dma_addr[i], PAGE_SIZE, dir);
dma_addr[i] = 0;
}
}
void svm_range_free_dma_mappings(struct svm_range *prange, bool unmap_dma)
{
struct kfd_process_device *pdd;
dma_addr_t *dma_addr;
struct device *dev;
struct kfd_process *p;
uint32_t gpuidx;
p = container_of(prange->svms, struct kfd_process, svms);
for (gpuidx = 0; gpuidx < MAX_GPU_INSTANCE; gpuidx++) {
dma_addr = prange->dma_addr[gpuidx];
if (!dma_addr)
continue;
pdd = kfd_process_device_from_gpuidx(p, gpuidx);
if (!pdd) {
pr_debug("failed to find device idx %d\n", gpuidx);
continue;
}
dev = &pdd->dev->adev->pdev->dev;
if (unmap_dma)
svm_range_dma_unmap(dev, dma_addr, 0, prange->npages);
kvfree(dma_addr);
prange->dma_addr[gpuidx] = NULL;
}
}
static void svm_range_free(struct svm_range *prange, bool do_unmap)
{
uint64_t size = (prange->last - prange->start + 1) << PAGE_SHIFT;
struct kfd_process *p = container_of(prange->svms, struct kfd_process, svms);
pr_debug("svms 0x%p prange 0x%p [0x%lx 0x%lx]\n", prange->svms, prange,
prange->start, prange->last);
svm_range_vram_node_free(prange);
svm_range_free_dma_mappings(prange, do_unmap);
if (do_unmap && !p->xnack_enabled) {
pr_debug("unreserve prange 0x%p size: 0x%llx\n", prange, size);
amdgpu_amdkfd_unreserve_mem_limit(NULL, size,
KFD_IOC_ALLOC_MEM_FLAGS_USERPTR, 0);
}
mutex_destroy(&prange->lock);
mutex_destroy(&prange->migrate_mutex);
kfree(prange);
}
static void
svm_range_set_default_attributes(int32_t *location, int32_t *prefetch_loc,
uint8_t *granularity, uint32_t *flags)
{
*location = KFD_IOCTL_SVM_LOCATION_UNDEFINED;
*prefetch_loc = KFD_IOCTL_SVM_LOCATION_UNDEFINED;
*granularity = 9;
*flags =
KFD_IOCTL_SVM_FLAG_HOST_ACCESS | KFD_IOCTL_SVM_FLAG_COHERENT;
}
static struct
svm_range *svm_range_new(struct svm_range_list *svms, uint64_t start,
uint64_t last, bool update_mem_usage)
{
uint64_t size = last - start + 1;
struct svm_range *prange;
struct kfd_process *p;
prange = kzalloc(sizeof(*prange), GFP_KERNEL);
if (!prange)
return NULL;
p = container_of(svms, struct kfd_process, svms);
if (!p->xnack_enabled && update_mem_usage &&
amdgpu_amdkfd_reserve_mem_limit(NULL, size << PAGE_SHIFT,
KFD_IOC_ALLOC_MEM_FLAGS_USERPTR, 0)) {
pr_info("SVM mapping failed, exceeds resident system memory limit\n");
kfree(prange);
return NULL;
}
prange->npages = size;
prange->svms = svms;
prange->start = start;
prange->last = last;
INIT_LIST_HEAD(&prange->list);
INIT_LIST_HEAD(&prange->update_list);
INIT_LIST_HEAD(&prange->svm_bo_list);
INIT_LIST_HEAD(&prange->deferred_list);
INIT_LIST_HEAD(&prange->child_list);
atomic_set(&prange->invalid, 0);
prange->validate_timestamp = 0;
mutex_init(&prange->migrate_mutex);
mutex_init(&prange->lock);
if (p->xnack_enabled)
bitmap_copy(prange->bitmap_access, svms->bitmap_supported,
MAX_GPU_INSTANCE);
svm_range_set_default_attributes(&prange->preferred_loc,
&prange->prefetch_loc,
&prange->granularity, &prange->flags);
pr_debug("svms 0x%p [0x%llx 0x%llx]\n", svms, start, last);
return prange;
}
static bool svm_bo_ref_unless_zero(struct svm_range_bo *svm_bo)
{
if (!svm_bo || !kref_get_unless_zero(&svm_bo->kref))
return false;
return true;
}
static void svm_range_bo_release(struct kref *kref)
{
struct svm_range_bo *svm_bo;
svm_bo = container_of(kref, struct svm_range_bo, kref);
pr_debug("svm_bo 0x%p\n", svm_bo);
spin_lock(&svm_bo->list_lock);
while (!list_empty(&svm_bo->range_list)) {
struct svm_range *prange =
list_first_entry(&svm_bo->range_list,
struct svm_range, svm_bo_list);
/* list_del_init tells a concurrent svm_range_vram_node_new when
* it's safe to reuse the svm_bo pointer and svm_bo_list head.
*/
list_del_init(&prange->svm_bo_list);
spin_unlock(&svm_bo->list_lock);
pr_debug("svms 0x%p [0x%lx 0x%lx]\n", prange->svms,
prange->start, prange->last);
mutex_lock(&prange->lock);
prange->svm_bo = NULL;
mutex_unlock(&prange->lock);
spin_lock(&svm_bo->list_lock);
}
spin_unlock(&svm_bo->list_lock);
if (!dma_fence_is_signaled(&svm_bo->eviction_fence->base)) {
/* We're not in the eviction worker.
* Signal the fence and synchronize with any
* pending eviction work.
*/
dma_fence_signal(&svm_bo->eviction_fence->base);
cancel_work_sync(&svm_bo->eviction_work);
}
dma_fence_put(&svm_bo->eviction_fence->base);
amdgpu_bo_unref(&svm_bo->bo);
kfree(svm_bo);
}
static void svm_range_bo_wq_release(struct work_struct *work)
{
struct svm_range_bo *svm_bo;
svm_bo = container_of(work, struct svm_range_bo, release_work);
svm_range_bo_release(&svm_bo->kref);
}
static void svm_range_bo_release_async(struct kref *kref)
{
struct svm_range_bo *svm_bo;
svm_bo = container_of(kref, struct svm_range_bo, kref);
pr_debug("svm_bo 0x%p\n", svm_bo);
INIT_WORK(&svm_bo->release_work, svm_range_bo_wq_release);
schedule_work(&svm_bo->release_work);
}
void svm_range_bo_unref_async(struct svm_range_bo *svm_bo)
{
kref_put(&svm_bo->kref, svm_range_bo_release_async);
}
static void svm_range_bo_unref(struct svm_range_bo *svm_bo)
{
if (svm_bo)
kref_put(&svm_bo->kref, svm_range_bo_release);
}
static bool
svm_range_validate_svm_bo(struct kfd_node *node, struct svm_range *prange)
{
mutex_lock(&prange->lock);
if (!prange->svm_bo) {
mutex_unlock(&prange->lock);
return false;
}
if (prange->ttm_res) {
/* We still have a reference, all is well */
mutex_unlock(&prange->lock);
return true;
}
if (svm_bo_ref_unless_zero(prange->svm_bo)) {
/*
* Migrate from GPU to GPU, remove range from source svm_bo->node
* range list, and return false to allocate svm_bo from destination
* node.
*/
if (prange->svm_bo->node != node) {
mutex_unlock(&prange->lock);
spin_lock(&prange->svm_bo->list_lock);
list_del_init(&prange->svm_bo_list);
spin_unlock(&prange->svm_bo->list_lock);
svm_range_bo_unref(prange->svm_bo);
return false;
}
if (READ_ONCE(prange->svm_bo->evicting)) {
struct dma_fence *f;
struct svm_range_bo *svm_bo;
/* The BO is getting evicted,
* we need to get a new one
*/
mutex_unlock(&prange->lock);
svm_bo = prange->svm_bo;
f = dma_fence_get(&svm_bo->eviction_fence->base);
svm_range_bo_unref(prange->svm_bo);
/* wait for the fence to avoid long spin-loop
* at list_empty_careful
*/
dma_fence_wait(f, false);
dma_fence_put(f);
} else {
/* The BO was still around and we got
* a new reference to it
*/
mutex_unlock(&prange->lock);
pr_debug("reuse old bo svms 0x%p [0x%lx 0x%lx]\n",
prange->svms, prange->start, prange->last);
prange->ttm_res = prange->svm_bo->bo->tbo.resource;
return true;
}
} else {
mutex_unlock(&prange->lock);
}
/* We need a new svm_bo. Spin-loop to wait for concurrent
* svm_range_bo_release to finish removing this range from
* its range list. After this, it is safe to reuse the
* svm_bo pointer and svm_bo_list head.
*/
while (!list_empty_careful(&prange->svm_bo_list))
;
return false;
}
static struct svm_range_bo *svm_range_bo_new(void)
{
struct svm_range_bo *svm_bo;
svm_bo = kzalloc(sizeof(*svm_bo), GFP_KERNEL);
if (!svm_bo)
return NULL;
kref_init(&svm_bo->kref);
INIT_LIST_HEAD(&svm_bo->range_list);
spin_lock_init(&svm_bo->list_lock);
return svm_bo;
}
int
svm_range_vram_node_new(struct kfd_node *node, struct svm_range *prange,
bool clear)
{
struct amdgpu_bo_param bp;
struct svm_range_bo *svm_bo;
struct amdgpu_bo_user *ubo;
struct amdgpu_bo *bo;
struct kfd_process *p;
struct mm_struct *mm;
int r;
p = container_of(prange->svms, struct kfd_process, svms);
pr_debug("pasid: %x svms 0x%p [0x%lx 0x%lx]\n", p->pasid, prange->svms,
prange->start, prange->last);
if (svm_range_validate_svm_bo(node, prange))
return 0;
svm_bo = svm_range_bo_new();
if (!svm_bo) {
pr_debug("failed to alloc svm bo\n");
return -ENOMEM;
}
mm = get_task_mm(p->lead_thread);
if (!mm) {
pr_debug("failed to get mm\n");
kfree(svm_bo);
return -ESRCH;
}
svm_bo->node = node;
svm_bo->eviction_fence =
amdgpu_amdkfd_fence_create(dma_fence_context_alloc(1),
mm,
svm_bo);
mmput(mm);
INIT_WORK(&svm_bo->eviction_work, svm_range_evict_svm_bo_worker);
svm_bo->evicting = 0;
memset(&bp, 0, sizeof(bp));
bp.size = prange->npages * PAGE_SIZE;
bp.byte_align = PAGE_SIZE;
bp.domain = AMDGPU_GEM_DOMAIN_VRAM;
bp.flags = AMDGPU_GEM_CREATE_NO_CPU_ACCESS;
bp.flags |= clear ? AMDGPU_GEM_CREATE_VRAM_CLEARED : 0;
bp.flags |= AMDGPU_GEM_CREATE_DISCARDABLE;
bp.type = ttm_bo_type_device;
bp.resv = NULL;
if (node->xcp)
bp.xcp_id_plus1 = node->xcp->id + 1;
r = amdgpu_bo_create_user(node->adev, &bp, &ubo);
if (r) {
pr_debug("failed %d to create bo\n", r);
goto create_bo_failed;
}
bo = &ubo->bo;
pr_debug("alloc bo at offset 0x%lx size 0x%lx on partition %d\n",
bo->tbo.resource->start << PAGE_SHIFT, bp.size,
bp.xcp_id_plus1 - 1);
r = amdgpu_bo_reserve(bo, true);
if (r) {
pr_debug("failed %d to reserve bo\n", r);
goto reserve_bo_failed;
}
if (clear) {
r = amdgpu_bo_sync_wait(bo, AMDGPU_FENCE_OWNER_KFD, false);
if (r) {
pr_debug("failed %d to sync bo\n", r);
amdgpu_bo_unreserve(bo);
goto reserve_bo_failed;
}
}
r = dma_resv_reserve_fences(bo->tbo.base.resv, 1);
if (r) {
pr_debug("failed %d to reserve bo\n", r);
amdgpu_bo_unreserve(bo);
goto reserve_bo_failed;
}
amdgpu_bo_fence(bo, &svm_bo->eviction_fence->base, true);
amdgpu_bo_unreserve(bo);
svm_bo->bo = bo;
prange->svm_bo = svm_bo;
prange->ttm_res = bo->tbo.resource;
prange->offset = 0;
spin_lock(&svm_bo->list_lock);
list_add(&prange->svm_bo_list, &svm_bo->range_list);
spin_unlock(&svm_bo->list_lock);
return 0;
reserve_bo_failed:
amdgpu_bo_unref(&bo);
create_bo_failed:
dma_fence_put(&svm_bo->eviction_fence->base);
kfree(svm_bo);
prange->ttm_res = NULL;
return r;
}
void svm_range_vram_node_free(struct svm_range *prange)
{
svm_range_bo_unref(prange->svm_bo);
prange->ttm_res = NULL;
}
struct kfd_node *
svm_range_get_node_by_id(struct svm_range *prange, uint32_t gpu_id)
{
struct kfd_process *p;
struct kfd_process_device *pdd;
p = container_of(prange->svms, struct kfd_process, svms);
pdd = kfd_process_device_data_by_id(p, gpu_id);
if (!pdd) {
pr_debug("failed to get kfd process device by id 0x%x\n", gpu_id);
return NULL;
}
return pdd->dev;
}
struct kfd_process_device *
svm_range_get_pdd_by_node(struct svm_range *prange, struct kfd_node *node)
{
struct kfd_process *p;
p = container_of(prange->svms, struct kfd_process, svms);
return kfd_get_process_device_data(node, p);
}
static int svm_range_bo_validate(void *param, struct amdgpu_bo *bo)
{
struct ttm_operation_ctx ctx = { false, false };
amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_VRAM);
return ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
}
static int
svm_range_check_attr(struct kfd_process *p,
uint32_t nattr, struct kfd_ioctl_svm_attribute *attrs)
{
uint32_t i;
for (i = 0; i < nattr; i++) {
uint32_t val = attrs[i].value;
int gpuidx = MAX_GPU_INSTANCE;
switch (attrs[i].type) {
case KFD_IOCTL_SVM_ATTR_PREFERRED_LOC:
if (val != KFD_IOCTL_SVM_LOCATION_SYSMEM &&
val != KFD_IOCTL_SVM_LOCATION_UNDEFINED)
gpuidx = kfd_process_gpuidx_from_gpuid(p, val);
break;
case KFD_IOCTL_SVM_ATTR_PREFETCH_LOC:
if (val != KFD_IOCTL_SVM_LOCATION_SYSMEM)
gpuidx = kfd_process_gpuidx_from_gpuid(p, val);
break;
case KFD_IOCTL_SVM_ATTR_ACCESS:
case KFD_IOCTL_SVM_ATTR_ACCESS_IN_PLACE:
case KFD_IOCTL_SVM_ATTR_NO_ACCESS:
gpuidx = kfd_process_gpuidx_from_gpuid(p, val);
break;
case KFD_IOCTL_SVM_ATTR_SET_FLAGS:
break;
case KFD_IOCTL_SVM_ATTR_CLR_FLAGS:
break;
case KFD_IOCTL_SVM_ATTR_GRANULARITY:
break;
default:
pr_debug("unknown attr type 0x%x\n", attrs[i].type);
return -EINVAL;
}
if (gpuidx < 0) {
pr_debug("no GPU 0x%x found\n", val);
return -EINVAL;
} else if (gpuidx < MAX_GPU_INSTANCE &&
!test_bit(gpuidx, p->svms.bitmap_supported)) {
pr_debug("GPU 0x%x not supported\n", val);
return -EINVAL;
}
}
return 0;
}
static void
svm_range_apply_attrs(struct kfd_process *p, struct svm_range *prange,
uint32_t nattr, struct kfd_ioctl_svm_attribute *attrs,
bool *update_mapping)
{
uint32_t i;
int gpuidx;
for (i = 0; i < nattr; i++) {
switch (attrs[i].type) {
case KFD_IOCTL_SVM_ATTR_PREFERRED_LOC:
prange->preferred_loc = attrs[i].value;
break;
case KFD_IOCTL_SVM_ATTR_PREFETCH_LOC:
prange->prefetch_loc = attrs[i].value;
break;
case KFD_IOCTL_SVM_ATTR_ACCESS:
case KFD_IOCTL_SVM_ATTR_ACCESS_IN_PLACE:
case KFD_IOCTL_SVM_ATTR_NO_ACCESS:
if (!p->xnack_enabled)
*update_mapping = true;
gpuidx = kfd_process_gpuidx_from_gpuid(p,
attrs[i].value);
if (attrs[i].type == KFD_IOCTL_SVM_ATTR_NO_ACCESS) {
bitmap_clear(prange->bitmap_access, gpuidx, 1);
bitmap_clear(prange->bitmap_aip, gpuidx, 1);
} else if (attrs[i].type == KFD_IOCTL_SVM_ATTR_ACCESS) {
bitmap_set(prange->bitmap_access, gpuidx, 1);
bitmap_clear(prange->bitmap_aip, gpuidx, 1);
} else {
bitmap_clear(prange->bitmap_access, gpuidx, 1);
bitmap_set(prange->bitmap_aip, gpuidx, 1);
}
break;
case KFD_IOCTL_SVM_ATTR_SET_FLAGS:
*update_mapping = true;
prange->flags |= attrs[i].value;
break;
case KFD_IOCTL_SVM_ATTR_CLR_FLAGS:
*update_mapping = true;
prange->flags &= ~attrs[i].value;
break;
case KFD_IOCTL_SVM_ATTR_GRANULARITY:
prange->granularity = attrs[i].value;
break;
default:
WARN_ONCE(1, "svm_range_check_attrs wasn't called?");
}
}
}
static bool
svm_range_is_same_attrs(struct kfd_process *p, struct svm_range *prange,
uint32_t nattr, struct kfd_ioctl_svm_attribute *attrs)
{
uint32_t i;
int gpuidx;
for (i = 0; i < nattr; i++) {
switch (attrs[i].type) {
case KFD_IOCTL_SVM_ATTR_PREFERRED_LOC:
if (prange->preferred_loc != attrs[i].value)
return false;
break;
case KFD_IOCTL_SVM_ATTR_PREFETCH_LOC:
/* Prefetch should always trigger a migration even
* if the value of the attribute didn't change.
*/
return false;
case KFD_IOCTL_SVM_ATTR_ACCESS:
case KFD_IOCTL_SVM_ATTR_ACCESS_IN_PLACE:
case KFD_IOCTL_SVM_ATTR_NO_ACCESS:
gpuidx = kfd_process_gpuidx_from_gpuid(p,
attrs[i].value);
if (attrs[i].type == KFD_IOCTL_SVM_ATTR_NO_ACCESS) {
if (test_bit(gpuidx, prange->bitmap_access) ||
test_bit(gpuidx, prange->bitmap_aip))
return false;
} else if (attrs[i].type == KFD_IOCTL_SVM_ATTR_ACCESS) {
if (!test_bit(gpuidx, prange->bitmap_access))
return false;
} else {
if (!test_bit(gpuidx, prange->bitmap_aip))
return false;
}
break;
case KFD_IOCTL_SVM_ATTR_SET_FLAGS:
if ((prange->flags & attrs[i].value) != attrs[i].value)
return false;
break;
case KFD_IOCTL_SVM_ATTR_CLR_FLAGS:
if ((prange->flags & attrs[i].value) != 0)
return false;
break;
case KFD_IOCTL_SVM_ATTR_GRANULARITY:
if (prange->granularity != attrs[i].value)
return false;
break;
default:
WARN_ONCE(1, "svm_range_check_attrs wasn't called?");
}
}
return !prange->is_error_flag;
}
/**
* svm_range_debug_dump - print all range information from svms
* @svms: svm range list header
*
* debug output svm range start, end, prefetch location from svms
* interval tree and link list
*
* Context: The caller must hold svms->lock
*/
static void svm_range_debug_dump(struct svm_range_list *svms)
{
struct interval_tree_node *node;
struct svm_range *prange;
pr_debug("dump svms 0x%p list\n", svms);
pr_debug("range\tstart\tpage\tend\t\tlocation\n");
list_for_each_entry(prange, &svms->list, list) {
pr_debug("0x%p 0x%lx\t0x%llx\t0x%llx\t0x%x\n",
prange, prange->start, prange->npages,
prange->start + prange->npages - 1,
prange->actual_loc);
}
pr_debug("dump svms 0x%p interval tree\n", svms);
pr_debug("range\tstart\tpage\tend\t\tlocation\n");
node = interval_tree_iter_first(&svms->objects, 0, ~0ULL);
while (node) {
prange = container_of(node, struct svm_range, it_node);
pr_debug("0x%p 0x%lx\t0x%llx\t0x%llx\t0x%x\n",
prange, prange->start, prange->npages,
prange->start + prange->npages - 1,
prange->actual_loc);
node = interval_tree_iter_next(node, 0, ~0ULL);
}
}
static void *
svm_range_copy_array(void *psrc, size_t size, uint64_t num_elements,
uint64_t offset)
{
unsigned char *dst;
dst = kvmalloc_array(num_elements, size, GFP_KERNEL);
if (!dst)
return NULL;
memcpy(dst, (unsigned char *)psrc + offset, num_elements * size);
return (void *)dst;
}
static int
svm_range_copy_dma_addrs(struct svm_range *dst, struct svm_range *src)
{
int i;
for (i = 0; i < MAX_GPU_INSTANCE; i++) {
if (!src->dma_addr[i])
continue;
dst->dma_addr[i] = svm_range_copy_array(src->dma_addr[i],
sizeof(*src->dma_addr[i]), src->npages, 0);
if (!dst->dma_addr[i])
return -ENOMEM;
}
return 0;
}
static int
svm_range_split_array(void *ppnew, void *ppold, size_t size,
uint64_t old_start, uint64_t old_n,
uint64_t new_start, uint64_t new_n)
{
unsigned char *new, *old, *pold;
uint64_t d;
if (!ppold)
return 0;
pold = *(unsigned char **)ppold;
if (!pold)
return 0;
d = (new_start - old_start) * size;
new = svm_range_copy_array(pold, size, new_n, d);
if (!new)
return -ENOMEM;
d = (new_start == old_start) ? new_n * size : 0;
old = svm_range_copy_array(pold, size, old_n, d);
if (!old) {
kvfree(new);
return -ENOMEM;
}
kvfree(pold);
*(void **)ppold = old;
*(void **)ppnew = new;
return 0;
}
static int
svm_range_split_pages(struct svm_range *new, struct svm_range *old,
uint64_t start, uint64_t last)
{
uint64_t npages = last - start + 1;
int i, r;
for (i = 0; i < MAX_GPU_INSTANCE; i++) {
r = svm_range_split_array(&new->dma_addr[i], &old->dma_addr[i],
sizeof(*old->dma_addr[i]), old->start,
npages, new->start, new->npages);
if (r)
return r;
}
return 0;
}
static int
svm_range_split_nodes(struct svm_range *new, struct svm_range *old,
uint64_t start, uint64_t last)
{
uint64_t npages = last - start + 1;
pr_debug("svms 0x%p new prange 0x%p start 0x%lx [0x%llx 0x%llx]\n",
new->svms, new, new->start, start, last);
if (new->start == old->start) {
new->offset = old->offset;
old->offset += new->npages;
} else {
new->offset = old->offset + npages;
}
new->svm_bo = svm_range_bo_ref(old->svm_bo);
new->ttm_res = old->ttm_res;
spin_lock(&new->svm_bo->list_lock);
list_add(&new->svm_bo_list, &new->svm_bo->range_list);
spin_unlock(&new->svm_bo->list_lock);
return 0;
}
/**
* svm_range_split_adjust - split range and adjust
*
* @new: new range
* @old: the old range
* @start: the old range adjust to start address in pages
* @last: the old range adjust to last address in pages
*
* Copy system memory dma_addr or vram ttm_res in old range to new
* range from new_start up to size new->npages, the remaining old range is from
* start to last
*
* Return:
* 0 - OK, -ENOMEM - out of memory
*/
static int
svm_range_split_adjust(struct svm_range *new, struct svm_range *old,
uint64_t start, uint64_t last)
{
int r;
pr_debug("svms 0x%p new 0x%lx old [0x%lx 0x%lx] => [0x%llx 0x%llx]\n",
new->svms, new->start, old->start, old->last, start, last);
if (new->start < old->start ||
new->last > old->last) {
WARN_ONCE(1, "invalid new range start or last\n");
return -EINVAL;
}
r = svm_range_split_pages(new, old, start, last);
if (r)
return r;
if (old->actual_loc && old->ttm_res) {
r = svm_range_split_nodes(new, old, start, last);
if (r)
return r;
}
old->npages = last - start + 1;
old->start = start;
old->last = last;
new->flags = old->flags;
new->preferred_loc = old->preferred_loc;
new->prefetch_loc = old->prefetch_loc;
new->actual_loc = old->actual_loc;
new->granularity = old->granularity;
new->mapped_to_gpu = old->mapped_to_gpu;
bitmap_copy(new->bitmap_access, old->bitmap_access, MAX_GPU_INSTANCE);
bitmap_copy(new->bitmap_aip, old->bitmap_aip, MAX_GPU_INSTANCE);
return 0;
}
/**
* svm_range_split - split a range in 2 ranges
*
* @prange: the svm range to split
* @start: the remaining range start address in pages
* @last: the remaining range last address in pages
* @new: the result new range generated
*
* Two cases only:
* case 1: if start == prange->start
* prange ==> prange[start, last]
* new range [last + 1, prange->last]
*
* case 2: if last == prange->last
* prange ==> prange[start, last]
* new range [prange->start, start - 1]
*
* Return:
* 0 - OK, -ENOMEM - out of memory, -EINVAL - invalid start, last
*/
static int
svm_range_split(struct svm_range *prange, uint64_t start, uint64_t last,
struct svm_range **new)
{
uint64_t old_start = prange->start;
uint64_t old_last = prange->last;
struct svm_range_list *svms;
int r = 0;
pr_debug("svms 0x%p [0x%llx 0x%llx] to [0x%llx 0x%llx]\n", prange->svms,
old_start, old_last, start, last);
if (old_start != start && old_last != last)
return -EINVAL;
if (start < old_start || last > old_last)
return -EINVAL;
svms = prange->svms;
if (old_start == start)
*new = svm_range_new(svms, last + 1, old_last, false);
else
*new = svm_range_new(svms, old_start, start - 1, false);
if (!*new)
return -ENOMEM;
r = svm_range_split_adjust(*new, prange, start, last);
if (r) {
pr_debug("failed %d split [0x%llx 0x%llx] to [0x%llx 0x%llx]\n",
r, old_start, old_last, start, last);
svm_range_free(*new, false);
*new = NULL;
}
return r;
}
static int
svm_range_split_tail(struct svm_range *prange,
uint64_t new_last, struct list_head *insert_list)
{
struct svm_range *tail;
int r = svm_range_split(prange, prange->start, new_last, &tail);
if (!r)
list_add(&tail->list, insert_list);
return r;
}
static int
svm_range_split_head(struct svm_range *prange,
uint64_t new_start, struct list_head *insert_list)
{
struct svm_range *head;
int r = svm_range_split(prange, new_start, prange->last, &head);
if (!r)
list_add(&head->list, insert_list);
return r;
}
static void
svm_range_add_child(struct svm_range *prange, struct mm_struct *mm,
struct svm_range *pchild, enum svm_work_list_ops op)
{
pr_debug("add child 0x%p [0x%lx 0x%lx] to prange 0x%p child list %d\n",
pchild, pchild->start, pchild->last, prange, op);
pchild->work_item.mm = mm;
pchild->work_item.op = op;
list_add_tail(&pchild->child_list, &prange->child_list);
}
/**
* svm_range_split_by_granularity - collect ranges within granularity boundary
*
* @p: the process with svms list
* @mm: mm structure
* @addr: the vm fault address in pages, to split the prange
* @parent: parent range if prange is from child list
* @prange: prange to split
*
* Trims @prange to be a single aligned block of prange->granularity if
* possible. The head and tail are added to the child_list in @parent.
*
* Context: caller must hold mmap_read_lock and prange->lock
*
* Return:
* 0 - OK, otherwise error code
*/
int
svm_range_split_by_granularity(struct kfd_process *p, struct mm_struct *mm,
unsigned long addr, struct svm_range *parent,
struct svm_range *prange)
{
struct svm_range *head, *tail;
unsigned long start, last, size;
int r;
/* Align splited range start and size to granularity size, then a single
* PTE will be used for whole range, this reduces the number of PTE
* updated and the L1 TLB space used for translation.
*/
size = 1UL << prange->granularity;
start = ALIGN_DOWN(addr, size);
last = ALIGN(addr + 1, size) - 1;
pr_debug("svms 0x%p split [0x%lx 0x%lx] to [0x%lx 0x%lx] size 0x%lx\n",
prange->svms, prange->start, prange->last, start, last, size);
if (start > prange->start) {
r = svm_range_split(prange, start, prange->last, &head);
if (r)
return r;
svm_range_add_child(parent, mm, head, SVM_OP_ADD_RANGE);
}
if (last < prange->last) {
r = svm_range_split(prange, prange->start, last, &tail);
if (r)
return r;
svm_range_add_child(parent, mm, tail, SVM_OP_ADD_RANGE);
}
/* xnack on, update mapping on GPUs with ACCESS_IN_PLACE */
if (p->xnack_enabled && prange->work_item.op == SVM_OP_ADD_RANGE) {
prange->work_item.op = SVM_OP_ADD_RANGE_AND_MAP;
pr_debug("change prange 0x%p [0x%lx 0x%lx] op %d\n",
prange, prange->start, prange->last,
SVM_OP_ADD_RANGE_AND_MAP);
}
return 0;
}
static bool
svm_nodes_in_same_hive(struct kfd_node *node_a, struct kfd_node *node_b)
{
return (node_a->adev == node_b->adev ||
amdgpu_xgmi_same_hive(node_a->adev, node_b->adev));
}
static uint64_t
svm_range_get_pte_flags(struct kfd_node *node,
struct svm_range *prange, int domain)
{
struct kfd_node *bo_node;
uint32_t flags = prange->flags;
uint32_t mapping_flags = 0;
uint64_t pte_flags;
bool snoop = (domain != SVM_RANGE_VRAM_DOMAIN);
bool coherent = flags & KFD_IOCTL_SVM_FLAG_COHERENT;
bool uncached = false; /*flags & KFD_IOCTL_SVM_FLAG_UNCACHED;*/
unsigned int mtype_local;
if (domain == SVM_RANGE_VRAM_DOMAIN)
bo_node = prange->svm_bo->node;
switch (node->adev->ip_versions[GC_HWIP][0]) {
case IP_VERSION(9, 4, 1):
if (domain == SVM_RANGE_VRAM_DOMAIN) {
if (bo_node == node) {
mapping_flags |= coherent ?
AMDGPU_VM_MTYPE_CC : AMDGPU_VM_MTYPE_RW;
} else {
mapping_flags |= coherent ?
AMDGPU_VM_MTYPE_UC : AMDGPU_VM_MTYPE_NC;
if (svm_nodes_in_same_hive(node, bo_node))
snoop = true;
}
} else {
mapping_flags |= coherent ?
AMDGPU_VM_MTYPE_UC : AMDGPU_VM_MTYPE_NC;
}
break;
case IP_VERSION(9, 4, 2):
if (domain == SVM_RANGE_VRAM_DOMAIN) {
if (bo_node == node) {
mapping_flags |= coherent ?
AMDGPU_VM_MTYPE_CC : AMDGPU_VM_MTYPE_RW;
if (node->adev->gmc.xgmi.connected_to_cpu)
snoop = true;
} else {
mapping_flags |= coherent ?
AMDGPU_VM_MTYPE_UC : AMDGPU_VM_MTYPE_NC;
if (svm_nodes_in_same_hive(node, bo_node))
snoop = true;
}
} else {
mapping_flags |= coherent ?
AMDGPU_VM_MTYPE_UC : AMDGPU_VM_MTYPE_NC;
}
break;
case IP_VERSION(9, 4, 3):
mtype_local = amdgpu_mtype_local == 1 ? AMDGPU_VM_MTYPE_NC :
(amdgpu_mtype_local == 2 ? AMDGPU_VM_MTYPE_CC : AMDGPU_VM_MTYPE_RW);
snoop = true;
if (uncached) {
mapping_flags |= AMDGPU_VM_MTYPE_UC;
} else if (domain == SVM_RANGE_VRAM_DOMAIN) {
/* local HBM region close to partition */
if (bo_node->adev == node->adev &&
(!bo_node->xcp || !node->xcp || bo_node->xcp->mem_id == node->xcp->mem_id))
mapping_flags |= mtype_local;
/* local HBM region far from partition or remote XGMI GPU */
else if (svm_nodes_in_same_hive(bo_node, node))
mapping_flags |= AMDGPU_VM_MTYPE_NC;
/* PCIe P2P */
else
mapping_flags |= AMDGPU_VM_MTYPE_UC;
/* system memory accessed by the APU */
} else if (node->adev->flags & AMD_IS_APU) {
/* On NUMA systems, locality is determined per-page
* in amdgpu_gmc_override_vm_pte_flags
*/
if (num_possible_nodes() <= 1)
mapping_flags |= mtype_local;
else
mapping_flags |= AMDGPU_VM_MTYPE_NC;
/* system memory accessed by the dGPU */
} else {
mapping_flags |= AMDGPU_VM_MTYPE_UC;
}
break;
default:
mapping_flags |= coherent ?
AMDGPU_VM_MTYPE_UC : AMDGPU_VM_MTYPE_NC;
}
mapping_flags |= AMDGPU_VM_PAGE_READABLE | AMDGPU_VM_PAGE_WRITEABLE;
if (flags & KFD_IOCTL_SVM_FLAG_GPU_RO)
mapping_flags &= ~AMDGPU_VM_PAGE_WRITEABLE;
if (flags & KFD_IOCTL_SVM_FLAG_GPU_EXEC)
mapping_flags |= AMDGPU_VM_PAGE_EXECUTABLE;
pte_flags = AMDGPU_PTE_VALID;
pte_flags |= (domain == SVM_RANGE_VRAM_DOMAIN) ? 0 : AMDGPU_PTE_SYSTEM;
pte_flags |= snoop ? AMDGPU_PTE_SNOOPED : 0;
pte_flags |= amdgpu_gem_va_map_flags(node->adev, mapping_flags);
return pte_flags;
}
static int
svm_range_unmap_from_gpu(struct amdgpu_device *adev, struct amdgpu_vm *vm,
uint64_t start, uint64_t last,
struct dma_fence **fence)
{
uint64_t init_pte_value = 0;
pr_debug("[0x%llx 0x%llx]\n", start, last);
return amdgpu_vm_update_range(adev, vm, false, true, true, NULL, start,
last, init_pte_value, 0, 0, NULL, NULL,
fence);
}
static int
svm_range_unmap_from_gpus(struct svm_range *prange, unsigned long start,
unsigned long last, uint32_t trigger)
{
DECLARE_BITMAP(bitmap, MAX_GPU_INSTANCE);
struct kfd_process_device *pdd;
struct dma_fence *fence = NULL;
struct kfd_process *p;
uint32_t gpuidx;
int r = 0;
if (!prange->mapped_to_gpu) {
pr_debug("prange 0x%p [0x%lx 0x%lx] not mapped to GPU\n",
prange, prange->start, prange->last);
return 0;
}
if (prange->start == start && prange->last == last) {
pr_debug("unmap svms 0x%p prange 0x%p\n", prange->svms, prange);
prange->mapped_to_gpu = false;
}
bitmap_or(bitmap, prange->bitmap_access, prange->bitmap_aip,
MAX_GPU_INSTANCE);
p = container_of(prange->svms, struct kfd_process, svms);
for_each_set_bit(gpuidx, bitmap, MAX_GPU_INSTANCE) {
pr_debug("unmap from gpu idx 0x%x\n", gpuidx);
pdd = kfd_process_device_from_gpuidx(p, gpuidx);
if (!pdd) {
pr_debug("failed to find device idx %d\n", gpuidx);
return -EINVAL;
}
kfd_smi_event_unmap_from_gpu(pdd->dev, p->lead_thread->pid,
start, last, trigger);
r = svm_range_unmap_from_gpu(pdd->dev->adev,
drm_priv_to_vm(pdd->drm_priv),
start, last, &fence);
if (r)
break;
if (fence) {
r = dma_fence_wait(fence, false);
dma_fence_put(fence);
fence = NULL;
if (r)
break;
}
kfd_flush_tlb(pdd, TLB_FLUSH_HEAVYWEIGHT);
}
return r;
}
static int
svm_range_map_to_gpu(struct kfd_process_device *pdd, struct svm_range *prange,
unsigned long offset, unsigned long npages, bool readonly,
dma_addr_t *dma_addr, struct amdgpu_device *bo_adev,
struct dma_fence **fence, bool flush_tlb)
{
struct amdgpu_device *adev = pdd->dev->adev;
struct amdgpu_vm *vm = drm_priv_to_vm(pdd->drm_priv);
uint64_t pte_flags;
unsigned long last_start;
int last_domain;
int r = 0;
int64_t i, j;
last_start = prange->start + offset;
pr_debug("svms 0x%p [0x%lx 0x%lx] readonly %d\n", prange->svms,
last_start, last_start + npages - 1, readonly);
for (i = offset; i < offset + npages; i++) {
last_domain = dma_addr[i] & SVM_RANGE_VRAM_DOMAIN;
dma_addr[i] &= ~SVM_RANGE_VRAM_DOMAIN;
/* Collect all pages in the same address range and memory domain
* that can be mapped with a single call to update mapping.
*/
if (i < offset + npages - 1 &&
last_domain == (dma_addr[i + 1] & SVM_RANGE_VRAM_DOMAIN))
continue;
pr_debug("Mapping range [0x%lx 0x%llx] on domain: %s\n",
last_start, prange->start + i, last_domain ? "GPU" : "CPU");
pte_flags = svm_range_get_pte_flags(pdd->dev, prange, last_domain);
if (readonly)
pte_flags &= ~AMDGPU_PTE_WRITEABLE;
pr_debug("svms 0x%p map [0x%lx 0x%llx] vram %d PTE 0x%llx\n",
prange->svms, last_start, prange->start + i,
(last_domain == SVM_RANGE_VRAM_DOMAIN) ? 1 : 0,
pte_flags);
/* For dGPU mode, we use same vm_manager to allocate VRAM for
* different memory partition based on fpfn/lpfn, we should use
* same vm_manager.vram_base_offset regardless memory partition.
*/
r = amdgpu_vm_update_range(adev, vm, false, false, flush_tlb, NULL,
last_start, prange->start + i,
pte_flags,
(last_start - prange->start) << PAGE_SHIFT,
bo_adev ? bo_adev->vm_manager.vram_base_offset : 0,
NULL, dma_addr, &vm->last_update);
for (j = last_start - prange->start; j <= i; j++)
dma_addr[j] |= last_domain;
if (r) {
pr_debug("failed %d to map to gpu 0x%lx\n", r, prange->start);
goto out;
}
last_start = prange->start + i + 1;
}
r = amdgpu_vm_update_pdes(adev, vm, false);
if (r) {
pr_debug("failed %d to update directories 0x%lx\n", r,
prange->start);
goto out;
}
if (fence)
*fence = dma_fence_get(vm->last_update);
out:
return r;
}
static int
svm_range_map_to_gpus(struct svm_range *prange, unsigned long offset,
unsigned long npages, bool readonly,
unsigned long *bitmap, bool wait, bool flush_tlb)
{
struct kfd_process_device *pdd;
struct amdgpu_device *bo_adev = NULL;
struct kfd_process *p;
struct dma_fence *fence = NULL;
uint32_t gpuidx;
int r = 0;
if (prange->svm_bo && prange->ttm_res)
bo_adev = prange->svm_bo->node->adev;
p = container_of(prange->svms, struct kfd_process, svms);
for_each_set_bit(gpuidx, bitmap, MAX_GPU_INSTANCE) {
pr_debug("mapping to gpu idx 0x%x\n", gpuidx);
pdd = kfd_process_device_from_gpuidx(p, gpuidx);
if (!pdd) {
pr_debug("failed to find device idx %d\n", gpuidx);
return -EINVAL;
}
pdd = kfd_bind_process_to_device(pdd->dev, p);
if (IS_ERR(pdd))
return -EINVAL;
if (bo_adev && pdd->dev->adev != bo_adev &&
!amdgpu_xgmi_same_hive(pdd->dev->adev, bo_adev)) {
pr_debug("cannot map to device idx %d\n", gpuidx);
continue;
}
r = svm_range_map_to_gpu(pdd, prange, offset, npages, readonly,
prange->dma_addr[gpuidx],
bo_adev, wait ? &fence : NULL,
flush_tlb);
if (r)
break;
if (fence) {
r = dma_fence_wait(fence, false);
dma_fence_put(fence);
fence = NULL;
if (r) {
pr_debug("failed %d to dma fence wait\n", r);
break;
}
}
kfd_flush_tlb(pdd, TLB_FLUSH_LEGACY);
}
return r;
}
struct svm_validate_context {
struct kfd_process *process;
struct svm_range *prange;
bool intr;
DECLARE_BITMAP(bitmap, MAX_GPU_INSTANCE);
struct drm_exec exec;
};
static int svm_range_reserve_bos(struct svm_validate_context *ctx, bool intr)
{
struct kfd_process_device *pdd;
struct amdgpu_vm *vm;
uint32_t gpuidx;
int r;
drm_exec_init(&ctx->exec, intr ? DRM_EXEC_INTERRUPTIBLE_WAIT: 0);
drm_exec_until_all_locked(&ctx->exec) {
for_each_set_bit(gpuidx, ctx->bitmap, MAX_GPU_INSTANCE) {
pdd = kfd_process_device_from_gpuidx(ctx->process, gpuidx);
if (!pdd) {
pr_debug("failed to find device idx %d\n", gpuidx);
r = -EINVAL;
goto unreserve_out;
}
vm = drm_priv_to_vm(pdd->drm_priv);
r = amdgpu_vm_lock_pd(vm, &ctx->exec, 2);
drm_exec_retry_on_contention(&ctx->exec);
if (unlikely(r)) {
pr_debug("failed %d to reserve bo\n", r);
goto unreserve_out;
}
}
}
for_each_set_bit(gpuidx, ctx->bitmap, MAX_GPU_INSTANCE) {
pdd = kfd_process_device_from_gpuidx(ctx->process, gpuidx);
if (!pdd) {
pr_debug("failed to find device idx %d\n", gpuidx);
r = -EINVAL;
goto unreserve_out;
}
r = amdgpu_vm_validate_pt_bos(pdd->dev->adev,
drm_priv_to_vm(pdd->drm_priv),
svm_range_bo_validate, NULL);
if (r) {
pr_debug("failed %d validate pt bos\n", r);
goto unreserve_out;
}
}
return 0;
unreserve_out:
drm_exec_fini(&ctx->exec);
return r;
}
static void svm_range_unreserve_bos(struct svm_validate_context *ctx)
{
drm_exec_fini(&ctx->exec);
}
static void *kfd_svm_page_owner(struct kfd_process *p, int32_t gpuidx)
{
struct kfd_process_device *pdd;
pdd = kfd_process_device_from_gpuidx(p, gpuidx);
if (!pdd)
return NULL;
return SVM_ADEV_PGMAP_OWNER(pdd->dev->adev);
}
/*
* Validation+GPU mapping with concurrent invalidation (MMU notifiers)
*
* To prevent concurrent destruction or change of range attributes, the
* svm_read_lock must be held. The caller must not hold the svm_write_lock
* because that would block concurrent evictions and lead to deadlocks. To
* serialize concurrent migrations or validations of the same range, the
* prange->migrate_mutex must be held.
*
* For VRAM ranges, the SVM BO must be allocated and valid (protected by its
* eviction fence.
*
* The following sequence ensures race-free validation and GPU mapping:
*
* 1. Reserve page table (and SVM BO if range is in VRAM)
* 2. hmm_range_fault to get page addresses (if system memory)
* 3. DMA-map pages (if system memory)
* 4-a. Take notifier lock
* 4-b. Check that pages still valid (mmu_interval_read_retry)
* 4-c. Check that the range was not split or otherwise invalidated
* 4-d. Update GPU page table
* 4.e. Release notifier lock
* 5. Release page table (and SVM BO) reservation
*/
static int svm_range_validate_and_map(struct mm_struct *mm,
struct svm_range *prange, int32_t gpuidx,
bool intr, bool wait, bool flush_tlb)
{
struct svm_validate_context *ctx;
unsigned long start, end, addr;
struct kfd_process *p;
void *owner;
int32_t idx;
int r = 0;
ctx = kzalloc(sizeof(struct svm_validate_context), GFP_KERNEL);
if (!ctx)
return -ENOMEM;
ctx->process = container_of(prange->svms, struct kfd_process, svms);
ctx->prange = prange;
ctx->intr = intr;
if (gpuidx < MAX_GPU_INSTANCE) {
bitmap_zero(ctx->bitmap, MAX_GPU_INSTANCE);
bitmap_set(ctx->bitmap, gpuidx, 1);
} else if (ctx->process->xnack_enabled) {
bitmap_copy(ctx->bitmap, prange->bitmap_aip, MAX_GPU_INSTANCE);
/* If prefetch range to GPU, or GPU retry fault migrate range to
* GPU, which has ACCESS attribute to the range, create mapping
* on that GPU.
*/
if (prange->actual_loc) {
gpuidx = kfd_process_gpuidx_from_gpuid(ctx->process,
prange->actual_loc);
if (gpuidx < 0) {
WARN_ONCE(1, "failed get device by id 0x%x\n",
prange->actual_loc);
r = -EINVAL;
goto free_ctx;
}
if (test_bit(gpuidx, prange->bitmap_access))
bitmap_set(ctx->bitmap, gpuidx, 1);
}
} else {
bitmap_or(ctx->bitmap, prange->bitmap_access,
prange->bitmap_aip, MAX_GPU_INSTANCE);
}
if (bitmap_empty(ctx->bitmap, MAX_GPU_INSTANCE)) {
bitmap_copy(ctx->bitmap, prange->bitmap_access, MAX_GPU_INSTANCE);
if (!prange->mapped_to_gpu ||
bitmap_empty(ctx->bitmap, MAX_GPU_INSTANCE)) {
r = 0;
goto free_ctx;
}
}
if (prange->actual_loc && !prange->ttm_res) {
/* This should never happen. actual_loc gets set by
* svm_migrate_ram_to_vram after allocating a BO.
*/
WARN_ONCE(1, "VRAM BO missing during validation\n");
r = -EINVAL;
goto free_ctx;
}
svm_range_reserve_bos(ctx, intr);
p = container_of(prange->svms, struct kfd_process, svms);
owner = kfd_svm_page_owner(p, find_first_bit(ctx->bitmap,
MAX_GPU_INSTANCE));
for_each_set_bit(idx, ctx->bitmap, MAX_GPU_INSTANCE) {
if (kfd_svm_page_owner(p, idx) != owner) {
owner = NULL;
break;
}
}
start = prange->start << PAGE_SHIFT;
end = (prange->last + 1) << PAGE_SHIFT;
for (addr = start; addr < end && !r; ) {
struct hmm_range *hmm_range;
struct vm_area_struct *vma;
unsigned long next;
unsigned long offset;
unsigned long npages;
bool readonly;
vma = vma_lookup(mm, addr);
if (!vma) {
r = -EFAULT;
goto unreserve_out;
}
readonly = !(vma->vm_flags & VM_WRITE);
next = min(vma->vm_end, end);
npages = (next - addr) >> PAGE_SHIFT;
WRITE_ONCE(p->svms.faulting_task, current);
r = amdgpu_hmm_range_get_pages(&prange->notifier, addr, npages,
readonly, owner, NULL,
&hmm_range);
WRITE_ONCE(p->svms.faulting_task, NULL);
if (r) {
pr_debug("failed %d to get svm range pages\n", r);
if (r == -EBUSY)
r = -EAGAIN;
goto unreserve_out;
}
offset = (addr - start) >> PAGE_SHIFT;
r = svm_range_dma_map(prange, ctx->bitmap, offset, npages,
hmm_range->hmm_pfns);
if (r) {
pr_debug("failed %d to dma map range\n", r);
goto unreserve_out;
}
svm_range_lock(prange);
if (amdgpu_hmm_range_get_pages_done(hmm_range)) {
pr_debug("hmm update the range, need validate again\n");
r = -EAGAIN;
goto unlock_out;
}
if (!list_empty(&prange->child_list)) {
pr_debug("range split by unmap in parallel, validate again\n");
r = -EAGAIN;
goto unlock_out;
}
r = svm_range_map_to_gpus(prange, offset, npages, readonly,
ctx->bitmap, wait, flush_tlb);
unlock_out:
svm_range_unlock(prange);
addr = next;
}
if (addr == end) {
prange->validated_once = true;
prange->mapped_to_gpu = true;
}
unreserve_out:
svm_range_unreserve_bos(ctx);
prange->is_error_flag = !!r;
if (!r)
prange->validate_timestamp = ktime_get_boottime();
free_ctx:
kfree(ctx);
return r;
}
/**
* svm_range_list_lock_and_flush_work - flush pending deferred work
*
* @svms: the svm range list
* @mm: the mm structure
*
* Context: Returns with mmap write lock held, pending deferred work flushed
*
*/
void
svm_range_list_lock_and_flush_work(struct svm_range_list *svms,
struct mm_struct *mm)
{
retry_flush_work:
flush_work(&svms->deferred_list_work);
mmap_write_lock(mm);
if (list_empty(&svms->deferred_range_list))
return;
mmap_write_unlock(mm);
pr_debug("retry flush\n");
goto retry_flush_work;
}
static void svm_range_restore_work(struct work_struct *work)
{
struct delayed_work *dwork = to_delayed_work(work);
struct amdkfd_process_info *process_info;
struct svm_range_list *svms;
struct svm_range *prange;
struct kfd_process *p;
struct mm_struct *mm;
int evicted_ranges;
int invalid;
int r;
svms = container_of(dwork, struct svm_range_list, restore_work);
evicted_ranges = atomic_read(&svms->evicted_ranges);
if (!evicted_ranges)
return;
pr_debug("restore svm ranges\n");
p = container_of(svms, struct kfd_process, svms);
process_info = p->kgd_process_info;
/* Keep mm reference when svm_range_validate_and_map ranges */
mm = get_task_mm(p->lead_thread);
if (!mm) {
pr_debug("svms 0x%p process mm gone\n", svms);
return;
}
mutex_lock(&process_info->lock);
svm_range_list_lock_and_flush_work(svms, mm);
mutex_lock(&svms->lock);
evicted_ranges = atomic_read(&svms->evicted_ranges);
list_for_each_entry(prange, &svms->list, list) {
invalid = atomic_read(&prange->invalid);
if (!invalid)
continue;
pr_debug("restoring svms 0x%p prange 0x%p [0x%lx %lx] inv %d\n",
prange->svms, prange, prange->start, prange->last,
invalid);
/*
* If range is migrating, wait for migration is done.
*/
mutex_lock(&prange->migrate_mutex);
r = svm_range_validate_and_map(mm, prange, MAX_GPU_INSTANCE,
false, true, false);
if (r)
pr_debug("failed %d to map 0x%lx to gpus\n", r,
prange->start);
mutex_unlock(&prange->migrate_mutex);
if (r)
goto out_reschedule;
if (atomic_cmpxchg(&prange->invalid, invalid, 0) != invalid)
goto out_reschedule;
}
if (atomic_cmpxchg(&svms->evicted_ranges, evicted_ranges, 0) !=
evicted_ranges)
goto out_reschedule;
evicted_ranges = 0;
r = kgd2kfd_resume_mm(mm);
if (r) {
/* No recovery from this failure. Probably the CP is
* hanging. No point trying again.
*/
pr_debug("failed %d to resume KFD\n", r);
}
pr_debug("restore svm ranges successfully\n");
out_reschedule:
mutex_unlock(&svms->lock);
mmap_write_unlock(mm);
mutex_unlock(&process_info->lock);
/* If validation failed, reschedule another attempt */
if (evicted_ranges) {
pr_debug("reschedule to restore svm range\n");
schedule_delayed_work(&svms->restore_work,
msecs_to_jiffies(AMDGPU_SVM_RANGE_RESTORE_DELAY_MS));
kfd_smi_event_queue_restore_rescheduled(mm);
}
mmput(mm);
}
/**
* svm_range_evict - evict svm range
* @prange: svm range structure
* @mm: current process mm_struct
* @start: starting process queue number
* @last: last process queue number
* @event: mmu notifier event when range is evicted or migrated
*
* Stop all queues of the process to ensure GPU doesn't access the memory, then
* return to let CPU evict the buffer and proceed CPU pagetable update.
*
* Don't need use lock to sync cpu pagetable invalidation with GPU execution.
* If invalidation happens while restore work is running, restore work will
* restart to ensure to get the latest CPU pages mapping to GPU, then start
* the queues.
*/
static int
svm_range_evict(struct svm_range *prange, struct mm_struct *mm,
unsigned long start, unsigned long last,
enum mmu_notifier_event event)
{
struct svm_range_list *svms = prange->svms;
struct svm_range *pchild;
struct kfd_process *p;
int r = 0;
p = container_of(svms, struct kfd_process, svms);
pr_debug("invalidate svms 0x%p prange [0x%lx 0x%lx] [0x%lx 0x%lx]\n",
svms, prange->start, prange->last, start, last);
if (!p->xnack_enabled ||
(prange->flags & KFD_IOCTL_SVM_FLAG_GPU_ALWAYS_MAPPED)) {
int evicted_ranges;
bool mapped = prange->mapped_to_gpu;
list_for_each_entry(pchild, &prange->child_list, child_list) {
if (!pchild->mapped_to_gpu)
continue;
mapped = true;
mutex_lock_nested(&pchild->lock, 1);
if (pchild->start <= last && pchild->last >= start) {
pr_debug("increment pchild invalid [0x%lx 0x%lx]\n",
pchild->start, pchild->last);
atomic_inc(&pchild->invalid);
}
mutex_unlock(&pchild->lock);
}
if (!mapped)
return r;
if (prange->start <= last && prange->last >= start)
atomic_inc(&prange->invalid);
evicted_ranges = atomic_inc_return(&svms->evicted_ranges);
if (evicted_ranges != 1)
return r;
pr_debug("evicting svms 0x%p range [0x%lx 0x%lx]\n",
prange->svms, prange->start, prange->last);
/* First eviction, stop the queues */
r = kgd2kfd_quiesce_mm(mm, KFD_QUEUE_EVICTION_TRIGGER_SVM);
if (r)
pr_debug("failed to quiesce KFD\n");
pr_debug("schedule to restore svm %p ranges\n", svms);
schedule_delayed_work(&svms->restore_work,
msecs_to_jiffies(AMDGPU_SVM_RANGE_RESTORE_DELAY_MS));
} else {
unsigned long s, l;
uint32_t trigger;
if (event == MMU_NOTIFY_MIGRATE)
trigger = KFD_SVM_UNMAP_TRIGGER_MMU_NOTIFY_MIGRATE;
else
trigger = KFD_SVM_UNMAP_TRIGGER_MMU_NOTIFY;
pr_debug("invalidate unmap svms 0x%p [0x%lx 0x%lx] from GPUs\n",
prange->svms, start, last);
list_for_each_entry(pchild, &prange->child_list, child_list) {
mutex_lock_nested(&pchild->lock, 1);
s = max(start, pchild->start);
l = min(last, pchild->last);
if (l >= s)
svm_range_unmap_from_gpus(pchild, s, l, trigger);
mutex_unlock(&pchild->lock);
}
s = max(start, prange->start);
l = min(last, prange->last);
if (l >= s)
svm_range_unmap_from_gpus(prange, s, l, trigger);
}
return r;
}
static struct svm_range *svm_range_clone(struct svm_range *old)
{
struct svm_range *new;
new = svm_range_new(old->svms, old->start, old->last, false);
if (!new)
return NULL;
if (svm_range_copy_dma_addrs(new, old)) {
svm_range_free(new, false);
return NULL;
}
if (old->svm_bo) {
new->ttm_res = old->ttm_res;
new->offset = old->offset;
new->svm_bo = svm_range_bo_ref(old->svm_bo);
spin_lock(&new->svm_bo->list_lock);
list_add(&new->svm_bo_list, &new->svm_bo->range_list);
spin_unlock(&new->svm_bo->list_lock);
}
new->flags = old->flags;
new->preferred_loc = old->preferred_loc;
new->prefetch_loc = old->prefetch_loc;
new->actual_loc = old->actual_loc;
new->granularity = old->granularity;
new->mapped_to_gpu = old->mapped_to_gpu;
bitmap_copy(new->bitmap_access, old->bitmap_access, MAX_GPU_INSTANCE);
bitmap_copy(new->bitmap_aip, old->bitmap_aip, MAX_GPU_INSTANCE);
return new;
}
void svm_range_set_max_pages(struct amdgpu_device *adev)
{
uint64_t max_pages;
uint64_t pages, _pages;
uint64_t min_pages = 0;
int i, id;
for (i = 0; i < adev->kfd.dev->num_nodes; i++) {
if (adev->kfd.dev->nodes[i]->xcp)
id = adev->kfd.dev->nodes[i]->xcp->id;
else
id = -1;
pages = KFD_XCP_MEMORY_SIZE(adev, id) >> 17;
pages = clamp(pages, 1ULL << 9, 1ULL << 18);
pages = rounddown_pow_of_two(pages);
min_pages = min_not_zero(min_pages, pages);
}
do {
max_pages = READ_ONCE(max_svm_range_pages);
_pages = min_not_zero(max_pages, min_pages);
} while (cmpxchg(&max_svm_range_pages, max_pages, _pages) != max_pages);
}
static int
svm_range_split_new(struct svm_range_list *svms, uint64_t start, uint64_t last,
uint64_t max_pages, struct list_head *insert_list,
struct list_head *update_list)
{
struct svm_range *prange;
uint64_t l;
pr_debug("max_svm_range_pages 0x%llx adding [0x%llx 0x%llx]\n",
max_pages, start, last);
while (last >= start) {
l = min(last, ALIGN_DOWN(start + max_pages, max_pages) - 1);
prange = svm_range_new(svms, start, l, true);
if (!prange)
return -ENOMEM;
list_add(&prange->list, insert_list);
list_add(&prange->update_list, update_list);
start = l + 1;
}
return 0;
}
/**
* svm_range_add - add svm range and handle overlap
* @p: the range add to this process svms
* @start: page size aligned
* @size: page size aligned
* @nattr: number of attributes
* @attrs: array of attributes
* @update_list: output, the ranges need validate and update GPU mapping
* @insert_list: output, the ranges need insert to svms
* @remove_list: output, the ranges are replaced and need remove from svms
*
* Check if the virtual address range has overlap with any existing ranges,
* split partly overlapping ranges and add new ranges in the gaps. All changes
* should be applied to the range_list and interval tree transactionally. If
* any range split or allocation fails, the entire update fails. Therefore any
* existing overlapping svm_ranges are cloned and the original svm_ranges left
* unchanged.
*
* If the transaction succeeds, the caller can update and insert clones and
* new ranges, then free the originals.
*
* Otherwise the caller can free the clones and new ranges, while the old
* svm_ranges remain unchanged.
*
* Context: Process context, caller must hold svms->lock
*
* Return:
* 0 - OK, otherwise error code
*/
static int
svm_range_add(struct kfd_process *p, uint64_t start, uint64_t size,
uint32_t nattr, struct kfd_ioctl_svm_attribute *attrs,
struct list_head *update_list, struct list_head *insert_list,
struct list_head *remove_list)
{
unsigned long last = start + size - 1UL;
struct svm_range_list *svms = &p->svms;
struct interval_tree_node *node;
struct svm_range *prange;
struct svm_range *tmp;
struct list_head new_list;
int r = 0;
pr_debug("svms 0x%p [0x%llx 0x%lx]\n", &p->svms, start, last);
INIT_LIST_HEAD(update_list);
INIT_LIST_HEAD(insert_list);
INIT_LIST_HEAD(remove_list);
INIT_LIST_HEAD(&new_list);
node = interval_tree_iter_first(&svms->objects, start, last);
while (node) {
struct interval_tree_node *next;
unsigned long next_start;
pr_debug("found overlap node [0x%lx 0x%lx]\n", node->start,
node->last);
prange = container_of(node, struct svm_range, it_node);
next = interval_tree_iter_next(node, start, last);
next_start = min(node->last, last) + 1;
if (svm_range_is_same_attrs(p, prange, nattr, attrs)) {
/* nothing to do */
} else if (node->start < start || node->last > last) {
/* node intersects the update range and its attributes
* will change. Clone and split it, apply updates only
* to the overlapping part
*/
struct svm_range *old = prange;
prange = svm_range_clone(old);
if (!prange) {
r = -ENOMEM;
goto out;
}
list_add(&old->update_list, remove_list);
list_add(&prange->list, insert_list);
list_add(&prange->update_list, update_list);
if (node->start < start) {
pr_debug("change old range start\n");
r = svm_range_split_head(prange, start,
insert_list);
if (r)
goto out;
}
if (node->last > last) {
pr_debug("change old range last\n");
r = svm_range_split_tail(prange, last,
insert_list);
if (r)
goto out;
}
} else {
/* The node is contained within start..last,
* just update it
*/
list_add(&prange->update_list, update_list);
}
/* insert a new node if needed */
if (node->start > start) {
r = svm_range_split_new(svms, start, node->start - 1,
READ_ONCE(max_svm_range_pages),
&new_list, update_list);
if (r)
goto out;
}
node = next;
start = next_start;
}
/* add a final range at the end if needed */
if (start <= last)
r = svm_range_split_new(svms, start, last,
READ_ONCE(max_svm_range_pages),
&new_list, update_list);
out:
if (r) {
list_for_each_entry_safe(prange, tmp, insert_list, list)
svm_range_free(prange, false);
list_for_each_entry_safe(prange, tmp, &new_list, list)
svm_range_free(prange, true);
} else {
list_splice(&new_list, insert_list);
}
return r;
}
static void
svm_range_update_notifier_and_interval_tree(struct mm_struct *mm,
struct svm_range *prange)
{
unsigned long start;
unsigned long last;
start = prange->notifier.interval_tree.start >> PAGE_SHIFT;
last = prange->notifier.interval_tree.last >> PAGE_SHIFT;
if (prange->start == start && prange->last == last)
return;
pr_debug("up notifier 0x%p prange 0x%p [0x%lx 0x%lx] [0x%lx 0x%lx]\n",
prange->svms, prange, start, last, prange->start,
prange->last);
if (start != 0 && last != 0) {
interval_tree_remove(&prange->it_node, &prange->svms->objects);
svm_range_remove_notifier(prange);
}
prange->it_node.start = prange->start;
prange->it_node.last = prange->last;
interval_tree_insert(&prange->it_node, &prange->svms->objects);
svm_range_add_notifier_locked(mm, prange);
}
static void
svm_range_handle_list_op(struct svm_range_list *svms, struct svm_range *prange,
struct mm_struct *mm)
{
switch (prange->work_item.op) {
case SVM_OP_NULL:
pr_debug("NULL OP 0x%p prange 0x%p [0x%lx 0x%lx]\n",
svms, prange, prange->start, prange->last);
break;
case SVM_OP_UNMAP_RANGE:
pr_debug("remove 0x%p prange 0x%p [0x%lx 0x%lx]\n",
svms, prange, prange->start, prange->last);
svm_range_unlink(prange);
svm_range_remove_notifier(prange);
svm_range_free(prange, true);
break;
case SVM_OP_UPDATE_RANGE_NOTIFIER:
pr_debug("update notifier 0x%p prange 0x%p [0x%lx 0x%lx]\n",
svms, prange, prange->start, prange->last);
svm_range_update_notifier_and_interval_tree(mm, prange);
break;
case SVM_OP_UPDATE_RANGE_NOTIFIER_AND_MAP:
pr_debug("update and map 0x%p prange 0x%p [0x%lx 0x%lx]\n",
svms, prange, prange->start, prange->last);
svm_range_update_notifier_and_interval_tree(mm, prange);
/* TODO: implement deferred validation and mapping */
break;
case SVM_OP_ADD_RANGE:
pr_debug("add 0x%p prange 0x%p [0x%lx 0x%lx]\n", svms, prange,
prange->start, prange->last);
svm_range_add_to_svms(prange);
svm_range_add_notifier_locked(mm, prange);
break;
case SVM_OP_ADD_RANGE_AND_MAP:
pr_debug("add and map 0x%p prange 0x%p [0x%lx 0x%lx]\n", svms,
prange, prange->start, prange->last);
svm_range_add_to_svms(prange);
svm_range_add_notifier_locked(mm, prange);
/* TODO: implement deferred validation and mapping */
break;
default:
WARN_ONCE(1, "Unknown prange 0x%p work op %d\n", prange,
prange->work_item.op);
}
}
static void svm_range_drain_retry_fault(struct svm_range_list *svms)
{
struct kfd_process_device *pdd;
struct kfd_process *p;
int drain;
uint32_t i;
p = container_of(svms, struct kfd_process, svms);
restart:
drain = atomic_read(&svms->drain_pagefaults);
if (!drain)
return;
for_each_set_bit(i, svms->bitmap_supported, p->n_pdds) {
pdd = p->pdds[i];
if (!pdd)
continue;
pr_debug("drain retry fault gpu %d svms %p\n", i, svms);
amdgpu_ih_wait_on_checkpoint_process_ts(pdd->dev->adev,
pdd->dev->adev->irq.retry_cam_enabled ?
&pdd->dev->adev->irq.ih :
&pdd->dev->adev->irq.ih1);
if (pdd->dev->adev->irq.retry_cam_enabled)
amdgpu_ih_wait_on_checkpoint_process_ts(pdd->dev->adev,
&pdd->dev->adev->irq.ih_soft);
pr_debug("drain retry fault gpu %d svms 0x%p done\n", i, svms);
}
if (atomic_cmpxchg(&svms->drain_pagefaults, drain, 0) != drain)
goto restart;
}
static void svm_range_deferred_list_work(struct work_struct *work)
{
struct svm_range_list *svms;
struct svm_range *prange;
struct mm_struct *mm;
svms = container_of(work, struct svm_range_list, deferred_list_work);
pr_debug("enter svms 0x%p\n", svms);
spin_lock(&svms->deferred_list_lock);
while (!list_empty(&svms->deferred_range_list)) {
prange = list_first_entry(&svms->deferred_range_list,
struct svm_range, deferred_list);
spin_unlock(&svms->deferred_list_lock);
pr_debug("prange 0x%p [0x%lx 0x%lx] op %d\n", prange,
prange->start, prange->last, prange->work_item.op);
mm = prange->work_item.mm;
retry:
mmap_write_lock(mm);
/* Checking for the need to drain retry faults must be inside
* mmap write lock to serialize with munmap notifiers.
*/
if (unlikely(atomic_read(&svms->drain_pagefaults))) {
mmap_write_unlock(mm);
svm_range_drain_retry_fault(svms);
goto retry;
}
/* Remove from deferred_list must be inside mmap write lock, for
* two race cases:
* 1. unmap_from_cpu may change work_item.op and add the range
* to deferred_list again, cause use after free bug.
* 2. svm_range_list_lock_and_flush_work may hold mmap write
* lock and continue because deferred_list is empty, but
* deferred_list work is actually waiting for mmap lock.
*/
spin_lock(&svms->deferred_list_lock);
list_del_init(&prange->deferred_list);
spin_unlock(&svms->deferred_list_lock);
mutex_lock(&svms->lock);
mutex_lock(&prange->migrate_mutex);
while (!list_empty(&prange->child_list)) {
struct svm_range *pchild;
pchild = list_first_entry(&prange->child_list,
struct svm_range, child_list);
pr_debug("child prange 0x%p op %d\n", pchild,
pchild->work_item.op);
list_del_init(&pchild->child_list);
svm_range_handle_list_op(svms, pchild, mm);
}
mutex_unlock(&prange->migrate_mutex);
svm_range_handle_list_op(svms, prange, mm);
mutex_unlock(&svms->lock);
mmap_write_unlock(mm);
/* Pairs with mmget in svm_range_add_list_work */
mmput(mm);
spin_lock(&svms->deferred_list_lock);
}
spin_unlock(&svms->deferred_list_lock);
pr_debug("exit svms 0x%p\n", svms);
}
void
svm_range_add_list_work(struct svm_range_list *svms, struct svm_range *prange,
struct mm_struct *mm, enum svm_work_list_ops op)
{
spin_lock(&svms->deferred_list_lock);
/* if prange is on the deferred list */
if (!list_empty(&prange->deferred_list)) {
pr_debug("update exist prange 0x%p work op %d\n", prange, op);
WARN_ONCE(prange->work_item.mm != mm, "unmatch mm\n");
if (op != SVM_OP_NULL &&
prange->work_item.op != SVM_OP_UNMAP_RANGE)
prange->work_item.op = op;
} else {
prange->work_item.op = op;
/* Pairs with mmput in deferred_list_work */
mmget(mm);
prange->work_item.mm = mm;
list_add_tail(&prange->deferred_list,
&prange->svms->deferred_range_list);
pr_debug("add prange 0x%p [0x%lx 0x%lx] to work list op %d\n",
prange, prange->start, prange->last, op);
}
spin_unlock(&svms->deferred_list_lock);
}
void schedule_deferred_list_work(struct svm_range_list *svms)
{
spin_lock(&svms->deferred_list_lock);
if (!list_empty(&svms->deferred_range_list))
schedule_work(&svms->deferred_list_work);
spin_unlock(&svms->deferred_list_lock);
}
static void
svm_range_unmap_split(struct mm_struct *mm, struct svm_range *parent,
struct svm_range *prange, unsigned long start,
unsigned long last)
{
struct svm_range *head;
struct svm_range *tail;
if (prange->work_item.op == SVM_OP_UNMAP_RANGE) {
pr_debug("prange 0x%p [0x%lx 0x%lx] is already freed\n", prange,
prange->start, prange->last);
return;
}
if (start > prange->last || last < prange->start)
return;
head = tail = prange;
if (start > prange->start)
svm_range_split(prange, prange->start, start - 1, &tail);
if (last < tail->last)
svm_range_split(tail, last + 1, tail->last, &head);
if (head != prange && tail != prange) {
svm_range_add_child(parent, mm, head, SVM_OP_UNMAP_RANGE);
svm_range_add_child(parent, mm, tail, SVM_OP_ADD_RANGE);
} else if (tail != prange) {
svm_range_add_child(parent, mm, tail, SVM_OP_UNMAP_RANGE);
} else if (head != prange) {
svm_range_add_child(parent, mm, head, SVM_OP_UNMAP_RANGE);
} else if (parent != prange) {
prange->work_item.op = SVM_OP_UNMAP_RANGE;
}
}
static void
svm_range_unmap_from_cpu(struct mm_struct *mm, struct svm_range *prange,
unsigned long start, unsigned long last)
{
uint32_t trigger = KFD_SVM_UNMAP_TRIGGER_UNMAP_FROM_CPU;
struct svm_range_list *svms;
struct svm_range *pchild;
struct kfd_process *p;
unsigned long s, l;
bool unmap_parent;
p = kfd_lookup_process_by_mm(mm);
if (!p)
return;
svms = &p->svms;
pr_debug("svms 0x%p prange 0x%p [0x%lx 0x%lx] [0x%lx 0x%lx]\n", svms,
prange, prange->start, prange->last, start, last);
/* Make sure pending page faults are drained in the deferred worker
* before the range is freed to avoid straggler interrupts on
* unmapped memory causing "phantom faults".
*/
atomic_inc(&svms->drain_pagefaults);
unmap_parent = start <= prange->start && last >= prange->last;
list_for_each_entry(pchild, &prange->child_list, child_list) {
mutex_lock_nested(&pchild->lock, 1);
s = max(start, pchild->start);
l = min(last, pchild->last);
if (l >= s)
svm_range_unmap_from_gpus(pchild, s, l, trigger);
svm_range_unmap_split(mm, prange, pchild, start, last);
mutex_unlock(&pchild->lock);
}
s = max(start, prange->start);
l = min(last, prange->last);
if (l >= s)
svm_range_unmap_from_gpus(prange, s, l, trigger);
svm_range_unmap_split(mm, prange, prange, start, last);
if (unmap_parent)
svm_range_add_list_work(svms, prange, mm, SVM_OP_UNMAP_RANGE);
else
svm_range_add_list_work(svms, prange, mm,
SVM_OP_UPDATE_RANGE_NOTIFIER);
schedule_deferred_list_work(svms);
kfd_unref_process(p);
}
/**
* svm_range_cpu_invalidate_pagetables - interval notifier callback
* @mni: mmu_interval_notifier struct
* @range: mmu_notifier_range struct
* @cur_seq: value to pass to mmu_interval_set_seq()
*
* If event is MMU_NOTIFY_UNMAP, this is from CPU unmap range, otherwise, it
* is from migration, or CPU page invalidation callback.
*
* For unmap event, unmap range from GPUs, remove prange from svms in a delayed
* work thread, and split prange if only part of prange is unmapped.
*
* For invalidation event, if GPU retry fault is not enabled, evict the queues,
* then schedule svm_range_restore_work to update GPU mapping and resume queues.
* If GPU retry fault is enabled, unmap the svm range from GPU, retry fault will
* update GPU mapping to recover.
*
* Context: mmap lock, notifier_invalidate_start lock are held
* for invalidate event, prange lock is held if this is from migration
*/
static bool
svm_range_cpu_invalidate_pagetables(struct mmu_interval_notifier *mni,
const struct mmu_notifier_range *range,
unsigned long cur_seq)
{
struct svm_range *prange;
unsigned long start;
unsigned long last;
if (range->event == MMU_NOTIFY_RELEASE)
return true;
if (!mmget_not_zero(mni->mm))
return true;
start = mni->interval_tree.start;
last = mni->interval_tree.last;
start = max(start, range->start) >> PAGE_SHIFT;
last = min(last, range->end - 1) >> PAGE_SHIFT;
pr_debug("[0x%lx 0x%lx] range[0x%lx 0x%lx] notifier[0x%lx 0x%lx] %d\n",
start, last, range->start >> PAGE_SHIFT,
(range->end - 1) >> PAGE_SHIFT,
mni->interval_tree.start >> PAGE_SHIFT,
mni->interval_tree.last >> PAGE_SHIFT, range->event);
prange = container_of(mni, struct svm_range, notifier);
svm_range_lock(prange);
mmu_interval_set_seq(mni, cur_seq);
switch (range->event) {
case MMU_NOTIFY_UNMAP:
svm_range_unmap_from_cpu(mni->mm, prange, start, last);
break;
default:
svm_range_evict(prange, mni->mm, start, last, range->event);
break;
}
svm_range_unlock(prange);
mmput(mni->mm);
return true;
}
/**
* svm_range_from_addr - find svm range from fault address
* @svms: svm range list header
* @addr: address to search range interval tree, in pages
* @parent: parent range if range is on child list
*
* Context: The caller must hold svms->lock
*
* Return: the svm_range found or NULL
*/
struct svm_range *
svm_range_from_addr(struct svm_range_list *svms, unsigned long addr,
struct svm_range **parent)
{
struct interval_tree_node *node;
struct svm_range *prange;
struct svm_range *pchild;
node = interval_tree_iter_first(&svms->objects, addr, addr);
if (!node)
return NULL;
prange = container_of(node, struct svm_range, it_node);
pr_debug("address 0x%lx prange [0x%lx 0x%lx] node [0x%lx 0x%lx]\n",
addr, prange->start, prange->last, node->start, node->last);
if (addr >= prange->start && addr <= prange->last) {
if (parent)
*parent = prange;
return prange;
}
list_for_each_entry(pchild, &prange->child_list, child_list)
if (addr >= pchild->start && addr <= pchild->last) {
pr_debug("found address 0x%lx pchild [0x%lx 0x%lx]\n",
addr, pchild->start, pchild->last);
if (parent)
*parent = prange;
return pchild;
}
return NULL;
}
/* svm_range_best_restore_location - decide the best fault restore location
* @prange: svm range structure
* @adev: the GPU on which vm fault happened
*
* This is only called when xnack is on, to decide the best location to restore
* the range mapping after GPU vm fault. Caller uses the best location to do
* migration if actual loc is not best location, then update GPU page table
* mapping to the best location.
*
* If the preferred loc is accessible by faulting GPU, use preferred loc.
* If vm fault gpu idx is on range ACCESSIBLE bitmap, best_loc is vm fault gpu
* If vm fault gpu idx is on range ACCESSIBLE_IN_PLACE bitmap, then
* if range actual loc is cpu, best_loc is cpu
* if vm fault gpu is on xgmi same hive of range actual loc gpu, best_loc is
* range actual loc.
* Otherwise, GPU no access, best_loc is -1.
*
* Return:
* -1 means vm fault GPU no access
* 0 for CPU or GPU id
*/
static int32_t
svm_range_best_restore_location(struct svm_range *prange,
struct kfd_node *node,
int32_t *gpuidx)
{
struct kfd_node *bo_node, *preferred_node;
struct kfd_process *p;
uint32_t gpuid;
int r;
p = container_of(prange->svms, struct kfd_process, svms);
r = kfd_process_gpuid_from_node(p, node, &gpuid, gpuidx);
if (r < 0) {
pr_debug("failed to get gpuid from kgd\n");
return -1;
}
if (node->adev->gmc.is_app_apu)
return 0;
if (prange->preferred_loc == gpuid ||
prange->preferred_loc == KFD_IOCTL_SVM_LOCATION_SYSMEM) {
return prange->preferred_loc;
} else if (prange->preferred_loc != KFD_IOCTL_SVM_LOCATION_UNDEFINED) {
preferred_node = svm_range_get_node_by_id(prange, prange->preferred_loc);
if (preferred_node && svm_nodes_in_same_hive(node, preferred_node))
return prange->preferred_loc;
/* fall through */
}
if (test_bit(*gpuidx, prange->bitmap_access))
return gpuid;
if (test_bit(*gpuidx, prange->bitmap_aip)) {
if (!prange->actual_loc)
return 0;
bo_node = svm_range_get_node_by_id(prange, prange->actual_loc);
if (bo_node && svm_nodes_in_same_hive(node, bo_node))
return prange->actual_loc;
else
return 0;
}
return -1;
}
static int
svm_range_get_range_boundaries(struct kfd_process *p, int64_t addr,
unsigned long *start, unsigned long *last,
bool *is_heap_stack)
{
struct vm_area_struct *vma;
struct interval_tree_node *node;
unsigned long start_limit, end_limit;
vma = vma_lookup(p->mm, addr << PAGE_SHIFT);
if (!vma) {
pr_debug("VMA does not exist in address [0x%llx]\n", addr);
return -EFAULT;
}
*is_heap_stack = vma_is_initial_heap(vma) || vma_is_initial_stack(vma);
start_limit = max(vma->vm_start >> PAGE_SHIFT,
(unsigned long)ALIGN_DOWN(addr, 2UL << 8));
end_limit = min(vma->vm_end >> PAGE_SHIFT,
(unsigned long)ALIGN(addr + 1, 2UL << 8));
/* First range that starts after the fault address */
node = interval_tree_iter_first(&p->svms.objects, addr + 1, ULONG_MAX);
if (node) {
end_limit = min(end_limit, node->start);
/* Last range that ends before the fault address */
node = container_of(rb_prev(&node->rb),
struct interval_tree_node, rb);
} else {
/* Last range must end before addr because
* there was no range after addr
*/
node = container_of(rb_last(&p->svms.objects.rb_root),
struct interval_tree_node, rb);
}
if (node) {
if (node->last >= addr) {
WARN(1, "Overlap with prev node and page fault addr\n");
return -EFAULT;
}
start_limit = max(start_limit, node->last + 1);
}
*start = start_limit;
*last = end_limit - 1;
pr_debug("vma [0x%lx 0x%lx] range [0x%lx 0x%lx] is_heap_stack %d\n",
vma->vm_start >> PAGE_SHIFT, vma->vm_end >> PAGE_SHIFT,
*start, *last, *is_heap_stack);
return 0;
}
static int
svm_range_check_vm_userptr(struct kfd_process *p, uint64_t start, uint64_t last,
uint64_t *bo_s, uint64_t *bo_l)
{
struct amdgpu_bo_va_mapping *mapping;
struct interval_tree_node *node;
struct amdgpu_bo *bo = NULL;
unsigned long userptr;
uint32_t i;
int r;
for (i = 0; i < p->n_pdds; i++) {
struct amdgpu_vm *vm;
if (!p->pdds[i]->drm_priv)
continue;
vm = drm_priv_to_vm(p->pdds[i]->drm_priv);
r = amdgpu_bo_reserve(vm->root.bo, false);
if (r)
return r;
/* Check userptr by searching entire vm->va interval tree */
node = interval_tree_iter_first(&vm->va, 0, ~0ULL);
while (node) {
mapping = container_of((struct rb_node *)node,
struct amdgpu_bo_va_mapping, rb);
bo = mapping->bo_va->base.bo;
if (!amdgpu_ttm_tt_affect_userptr(bo->tbo.ttm,
start << PAGE_SHIFT,
last << PAGE_SHIFT,
&userptr)) {
node = interval_tree_iter_next(node, 0, ~0ULL);
continue;
}
pr_debug("[0x%llx 0x%llx] already userptr mapped\n",
start, last);
if (bo_s && bo_l) {
*bo_s = userptr >> PAGE_SHIFT;
*bo_l = *bo_s + bo->tbo.ttm->num_pages - 1;
}
amdgpu_bo_unreserve(vm->root.bo);
return -EADDRINUSE;
}
amdgpu_bo_unreserve(vm->root.bo);
}
return 0;
}
static struct
svm_range *svm_range_create_unregistered_range(struct kfd_node *node,
struct kfd_process *p,
struct mm_struct *mm,
int64_t addr)
{
struct svm_range *prange = NULL;
unsigned long start, last;
uint32_t gpuid, gpuidx;
bool is_heap_stack;
uint64_t bo_s = 0;
uint64_t bo_l = 0;
int r;
if (svm_range_get_range_boundaries(p, addr, &start, &last,
&is_heap_stack))
return NULL;
r = svm_range_check_vm(p, start, last, &bo_s, &bo_l);
if (r != -EADDRINUSE)
r = svm_range_check_vm_userptr(p, start, last, &bo_s, &bo_l);
if (r == -EADDRINUSE) {
if (addr >= bo_s && addr <= bo_l)
return NULL;
/* Create one page svm range if 2MB range overlapping */
start = addr;
last = addr;
}
prange = svm_range_new(&p->svms, start, last, true);
if (!prange) {
pr_debug("Failed to create prange in address [0x%llx]\n", addr);
return NULL;
}
if (kfd_process_gpuid_from_node(p, node, &gpuid, &gpuidx)) {
pr_debug("failed to get gpuid from kgd\n");
svm_range_free(prange, true);
return NULL;
}
if (is_heap_stack)
prange->preferred_loc = KFD_IOCTL_SVM_LOCATION_SYSMEM;
svm_range_add_to_svms(prange);
svm_range_add_notifier_locked(mm, prange);
return prange;
}
/* svm_range_skip_recover - decide if prange can be recovered
* @prange: svm range structure
*
* GPU vm retry fault handle skip recover the range for cases:
* 1. prange is on deferred list to be removed after unmap, it is stale fault,
* deferred list work will drain the stale fault before free the prange.
* 2. prange is on deferred list to add interval notifier after split, or
* 3. prange is child range, it is split from parent prange, recover later
* after interval notifier is added.
*
* Return: true to skip recover, false to recover
*/
static bool svm_range_skip_recover(struct svm_range *prange)
{
struct svm_range_list *svms = prange->svms;
spin_lock(&svms->deferred_list_lock);
if (list_empty(&prange->deferred_list) &&
list_empty(&prange->child_list)) {
spin_unlock(&svms->deferred_list_lock);
return false;
}
spin_unlock(&svms->deferred_list_lock);
if (prange->work_item.op == SVM_OP_UNMAP_RANGE) {
pr_debug("svms 0x%p prange 0x%p [0x%lx 0x%lx] unmapped\n",
svms, prange, prange->start, prange->last);
return true;
}
if (prange->work_item.op == SVM_OP_ADD_RANGE_AND_MAP ||
prange->work_item.op == SVM_OP_ADD_RANGE) {
pr_debug("svms 0x%p prange 0x%p [0x%lx 0x%lx] not added yet\n",
svms, prange, prange->start, prange->last);
return true;
}
return false;
}
static void
svm_range_count_fault(struct kfd_node *node, struct kfd_process *p,
int32_t gpuidx)
{
struct kfd_process_device *pdd;
/* fault is on different page of same range
* or fault is skipped to recover later
* or fault is on invalid virtual address
*/
if (gpuidx == MAX_GPU_INSTANCE) {
uint32_t gpuid;
int r;
r = kfd_process_gpuid_from_node(p, node, &gpuid, &gpuidx);
if (r < 0)
return;
}
/* fault is recovered
* or fault cannot recover because GPU no access on the range
*/
pdd = kfd_process_device_from_gpuidx(p, gpuidx);
if (pdd)
WRITE_ONCE(pdd->faults, pdd->faults + 1);
}
static bool
svm_fault_allowed(struct vm_area_struct *vma, bool write_fault)
{
unsigned long requested = VM_READ;
if (write_fault)
requested |= VM_WRITE;
pr_debug("requested 0x%lx, vma permission flags 0x%lx\n", requested,
vma->vm_flags);
return (vma->vm_flags & requested) == requested;
}
int
svm_range_restore_pages(struct amdgpu_device *adev, unsigned int pasid,
uint32_t vmid, uint32_t node_id,
uint64_t addr, bool write_fault)
{
struct mm_struct *mm = NULL;
struct svm_range_list *svms;
struct svm_range *prange;
struct kfd_process *p;
ktime_t timestamp = ktime_get_boottime();
struct kfd_node *node;
int32_t best_loc;
int32_t gpuidx = MAX_GPU_INSTANCE;
bool write_locked = false;
struct vm_area_struct *vma;
bool migration = false;
int r = 0;
if (!KFD_IS_SVM_API_SUPPORTED(adev)) {
pr_debug("device does not support SVM\n");
return -EFAULT;
}
p = kfd_lookup_process_by_pasid(pasid);
if (!p) {
pr_debug("kfd process not founded pasid 0x%x\n", pasid);
return 0;
}
svms = &p->svms;
pr_debug("restoring svms 0x%p fault address 0x%llx\n", svms, addr);
if (atomic_read(&svms->drain_pagefaults)) {
pr_debug("draining retry fault, drop fault 0x%llx\n", addr);
r = 0;
goto out;
}
if (!p->xnack_enabled) {
pr_debug("XNACK not enabled for pasid 0x%x\n", pasid);
r = -EFAULT;
goto out;
}
/* p->lead_thread is available as kfd_process_wq_release flush the work
* before releasing task ref.
*/
mm = get_task_mm(p->lead_thread);
if (!mm) {
pr_debug("svms 0x%p failed to get mm\n", svms);
r = 0;
goto out;
}
node = kfd_node_by_irq_ids(adev, node_id, vmid);
if (!node) {
pr_debug("kfd node does not exist node_id: %d, vmid: %d\n", node_id,
vmid);
r = -EFAULT;
goto out;
}
mmap_read_lock(mm);
retry_write_locked:
mutex_lock(&svms->lock);
prange = svm_range_from_addr(svms, addr, NULL);
if (!prange) {
pr_debug("failed to find prange svms 0x%p address [0x%llx]\n",
svms, addr);
if (!write_locked) {
/* Need the write lock to create new range with MMU notifier.
* Also flush pending deferred work to make sure the interval
* tree is up to date before we add a new range
*/
mutex_unlock(&svms->lock);
mmap_read_unlock(mm);
mmap_write_lock(mm);
write_locked = true;
goto retry_write_locked;
}
prange = svm_range_create_unregistered_range(node, p, mm, addr);
if (!prange) {
pr_debug("failed to create unregistered range svms 0x%p address [0x%llx]\n",
svms, addr);
mmap_write_downgrade(mm);
r = -EFAULT;
goto out_unlock_svms;
}
}
if (write_locked)
mmap_write_downgrade(mm);
mutex_lock(&prange->migrate_mutex);
if (svm_range_skip_recover(prange)) {
amdgpu_gmc_filter_faults_remove(node->adev, addr, pasid);
r = 0;
goto out_unlock_range;
}
/* skip duplicate vm fault on different pages of same range */
if (ktime_before(timestamp, ktime_add_ns(prange->validate_timestamp,
AMDGPU_SVM_RANGE_RETRY_FAULT_PENDING))) {
pr_debug("svms 0x%p [0x%lx %lx] already restored\n",
svms, prange->start, prange->last);
r = 0;
goto out_unlock_range;
}
/* __do_munmap removed VMA, return success as we are handling stale
* retry fault.
*/
vma = vma_lookup(mm, addr << PAGE_SHIFT);
if (!vma) {
pr_debug("address 0x%llx VMA is removed\n", addr);
r = 0;
goto out_unlock_range;
}
if (!svm_fault_allowed(vma, write_fault)) {
pr_debug("fault addr 0x%llx no %s permission\n", addr,
write_fault ? "write" : "read");
r = -EPERM;
goto out_unlock_range;
}
best_loc = svm_range_best_restore_location(prange, node, &gpuidx);
if (best_loc == -1) {
pr_debug("svms %p failed get best restore loc [0x%lx 0x%lx]\n",
svms, prange->start, prange->last);
r = -EACCES;
goto out_unlock_range;
}
pr_debug("svms %p [0x%lx 0x%lx] best restore 0x%x, actual loc 0x%x\n",
svms, prange->start, prange->last, best_loc,
prange->actual_loc);
kfd_smi_event_page_fault_start(node, p->lead_thread->pid, addr,
write_fault, timestamp);
if (prange->actual_loc != best_loc) {
migration = true;
if (best_loc) {
r = svm_migrate_to_vram(prange, best_loc, mm,
KFD_MIGRATE_TRIGGER_PAGEFAULT_GPU);
if (r) {
pr_debug("svm_migrate_to_vram failed (%d) at %llx, falling back to system memory\n",
r, addr);
/* Fallback to system memory if migration to
* VRAM failed
*/
if (prange->actual_loc)
r = svm_migrate_vram_to_ram(prange, mm,
KFD_MIGRATE_TRIGGER_PAGEFAULT_GPU,
NULL);
else
r = 0;
}
} else {
r = svm_migrate_vram_to_ram(prange, mm,
KFD_MIGRATE_TRIGGER_PAGEFAULT_GPU,
NULL);
}
if (r) {
pr_debug("failed %d to migrate svms %p [0x%lx 0x%lx]\n",
r, svms, prange->start, prange->last);
goto out_unlock_range;
}
}
r = svm_range_validate_and_map(mm, prange, gpuidx, false, false, false);
if (r)
pr_debug("failed %d to map svms 0x%p [0x%lx 0x%lx] to gpus\n",
r, svms, prange->start, prange->last);
kfd_smi_event_page_fault_end(node, p->lead_thread->pid, addr,
migration);
out_unlock_range:
mutex_unlock(&prange->migrate_mutex);
out_unlock_svms:
mutex_unlock(&svms->lock);
mmap_read_unlock(mm);
svm_range_count_fault(node, p, gpuidx);
mmput(mm);
out:
kfd_unref_process(p);
if (r == -EAGAIN) {
pr_debug("recover vm fault later\n");
amdgpu_gmc_filter_faults_remove(node->adev, addr, pasid);
r = 0;
}
return r;
}
int
svm_range_switch_xnack_reserve_mem(struct kfd_process *p, bool xnack_enabled)
{
struct svm_range *prange, *pchild;
uint64_t reserved_size = 0;
uint64_t size;
int r = 0;
pr_debug("switching xnack from %d to %d\n", p->xnack_enabled, xnack_enabled);
mutex_lock(&p->svms.lock);
list_for_each_entry(prange, &p->svms.list, list) {
svm_range_lock(prange);
list_for_each_entry(pchild, &prange->child_list, child_list) {
size = (pchild->last - pchild->start + 1) << PAGE_SHIFT;
if (xnack_enabled) {
amdgpu_amdkfd_unreserve_mem_limit(NULL, size,
KFD_IOC_ALLOC_MEM_FLAGS_USERPTR, 0);
} else {
r = amdgpu_amdkfd_reserve_mem_limit(NULL, size,
KFD_IOC_ALLOC_MEM_FLAGS_USERPTR, 0);
if (r)
goto out_unlock;
reserved_size += size;
}
}
size = (prange->last - prange->start + 1) << PAGE_SHIFT;
if (xnack_enabled) {
amdgpu_amdkfd_unreserve_mem_limit(NULL, size,
KFD_IOC_ALLOC_MEM_FLAGS_USERPTR, 0);
} else {
r = amdgpu_amdkfd_reserve_mem_limit(NULL, size,
KFD_IOC_ALLOC_MEM_FLAGS_USERPTR, 0);
if (r)
goto out_unlock;
reserved_size += size;
}
out_unlock:
svm_range_unlock(prange);
if (r)
break;
}
if (r)
amdgpu_amdkfd_unreserve_mem_limit(NULL, reserved_size,
KFD_IOC_ALLOC_MEM_FLAGS_USERPTR, 0);
else
/* Change xnack mode must be inside svms lock, to avoid race with
* svm_range_deferred_list_work unreserve memory in parallel.
*/
p->xnack_enabled = xnack_enabled;
mutex_unlock(&p->svms.lock);
return r;
}
void svm_range_list_fini(struct kfd_process *p)
{
struct svm_range *prange;
struct svm_range *next;
pr_debug("pasid 0x%x svms 0x%p\n", p->pasid, &p->svms);
cancel_delayed_work_sync(&p->svms.restore_work);
/* Ensure list work is finished before process is destroyed */
flush_work(&p->svms.deferred_list_work);
/*
* Ensure no retry fault comes in afterwards, as page fault handler will
* not find kfd process and take mm lock to recover fault.
*/
atomic_inc(&p->svms.drain_pagefaults);
svm_range_drain_retry_fault(&p->svms);
list_for_each_entry_safe(prange, next, &p->svms.list, list) {
svm_range_unlink(prange);
svm_range_remove_notifier(prange);
svm_range_free(prange, true);
}
mutex_destroy(&p->svms.lock);
pr_debug("pasid 0x%x svms 0x%p done\n", p->pasid, &p->svms);
}
int svm_range_list_init(struct kfd_process *p)
{
struct svm_range_list *svms = &p->svms;
int i;
svms->objects = RB_ROOT_CACHED;
mutex_init(&svms->lock);
INIT_LIST_HEAD(&svms->list);
atomic_set(&svms->evicted_ranges, 0);
atomic_set(&svms->drain_pagefaults, 0);
INIT_DELAYED_WORK(&svms->restore_work, svm_range_restore_work);
INIT_WORK(&svms->deferred_list_work, svm_range_deferred_list_work);
INIT_LIST_HEAD(&svms->deferred_range_list);
INIT_LIST_HEAD(&svms->criu_svm_metadata_list);
spin_lock_init(&svms->deferred_list_lock);
for (i = 0; i < p->n_pdds; i++)
if (KFD_IS_SVM_API_SUPPORTED(p->pdds[i]->dev->adev))
bitmap_set(svms->bitmap_supported, i, 1);
return 0;
}
/**
* svm_range_check_vm - check if virtual address range mapped already
* @p: current kfd_process
* @start: range start address, in pages
* @last: range last address, in pages
* @bo_s: mapping start address in pages if address range already mapped
* @bo_l: mapping last address in pages if address range already mapped
*
* The purpose is to avoid virtual address ranges already allocated by
* kfd_ioctl_alloc_memory_of_gpu ioctl.
* It looks for each pdd in the kfd_process.
*
* Context: Process context
*
* Return 0 - OK, if the range is not mapped.
* Otherwise error code:
* -EADDRINUSE - if address is mapped already by kfd_ioctl_alloc_memory_of_gpu
* -ERESTARTSYS - A wait for the buffer to become unreserved was interrupted by
* a signal. Release all buffer reservations and return to user-space.
*/
static int
svm_range_check_vm(struct kfd_process *p, uint64_t start, uint64_t last,
uint64_t *bo_s, uint64_t *bo_l)
{
struct amdgpu_bo_va_mapping *mapping;
struct interval_tree_node *node;
uint32_t i;
int r;
for (i = 0; i < p->n_pdds; i++) {
struct amdgpu_vm *vm;
if (!p->pdds[i]->drm_priv)
continue;
vm = drm_priv_to_vm(p->pdds[i]->drm_priv);
r = amdgpu_bo_reserve(vm->root.bo, false);
if (r)
return r;
node = interval_tree_iter_first(&vm->va, start, last);
if (node) {
pr_debug("range [0x%llx 0x%llx] already TTM mapped\n",
start, last);
mapping = container_of((struct rb_node *)node,
struct amdgpu_bo_va_mapping, rb);
if (bo_s && bo_l) {
*bo_s = mapping->start;
*bo_l = mapping->last;
}
amdgpu_bo_unreserve(vm->root.bo);
return -EADDRINUSE;
}
amdgpu_bo_unreserve(vm->root.bo);
}
return 0;
}
/**
* svm_range_is_valid - check if virtual address range is valid
* @p: current kfd_process
* @start: range start address, in pages
* @size: range size, in pages
*
* Valid virtual address range means it belongs to one or more VMAs
*
* Context: Process context
*
* Return:
* 0 - OK, otherwise error code
*/
static int
svm_range_is_valid(struct kfd_process *p, uint64_t start, uint64_t size)
{
const unsigned long device_vma = VM_IO | VM_PFNMAP | VM_MIXEDMAP;
struct vm_area_struct *vma;
unsigned long end;
unsigned long start_unchg = start;
start <<= PAGE_SHIFT;
end = start + (size << PAGE_SHIFT);
do {
vma = vma_lookup(p->mm, start);
if (!vma || (vma->vm_flags & device_vma))
return -EFAULT;
start = min(end, vma->vm_end);
} while (start < end);
return svm_range_check_vm(p, start_unchg, (end - 1) >> PAGE_SHIFT, NULL,
NULL);
}
/**
* svm_range_best_prefetch_location - decide the best prefetch location
* @prange: svm range structure
*
* For xnack off:
* If range map to single GPU, the best prefetch location is prefetch_loc, which
* can be CPU or GPU.
*
* If range is ACCESS or ACCESS_IN_PLACE by mGPUs, only if mGPU connection on
* XGMI same hive, the best prefetch location is prefetch_loc GPU, othervise
* the best prefetch location is always CPU, because GPU can not have coherent
* mapping VRAM of other GPUs even with large-BAR PCIe connection.
*
* For xnack on:
* If range is not ACCESS_IN_PLACE by mGPUs, the best prefetch location is
* prefetch_loc, other GPU access will generate vm fault and trigger migration.
*
* If range is ACCESS_IN_PLACE by mGPUs, only if mGPU connection on XGMI same
* hive, the best prefetch location is prefetch_loc GPU, otherwise the best
* prefetch location is always CPU.
*
* Context: Process context
*
* Return:
* 0 for CPU or GPU id
*/
static uint32_t
svm_range_best_prefetch_location(struct svm_range *prange)
{
DECLARE_BITMAP(bitmap, MAX_GPU_INSTANCE);
uint32_t best_loc = prange->prefetch_loc;
struct kfd_process_device *pdd;
struct kfd_node *bo_node;
struct kfd_process *p;
uint32_t gpuidx;
p = container_of(prange->svms, struct kfd_process, svms);
if (!best_loc || best_loc == KFD_IOCTL_SVM_LOCATION_UNDEFINED)
goto out;
bo_node = svm_range_get_node_by_id(prange, best_loc);
if (!bo_node) {
WARN_ONCE(1, "failed to get valid kfd node at id%x\n", best_loc);
best_loc = 0;
goto out;
}
if (bo_node->adev->gmc.is_app_apu) {
best_loc = 0;
goto out;
}
if (p->xnack_enabled)
bitmap_copy(bitmap, prange->bitmap_aip, MAX_GPU_INSTANCE);
else
bitmap_or(bitmap, prange->bitmap_access, prange->bitmap_aip,
MAX_GPU_INSTANCE);
for_each_set_bit(gpuidx, bitmap, MAX_GPU_INSTANCE) {
pdd = kfd_process_device_from_gpuidx(p, gpuidx);
if (!pdd) {
pr_debug("failed to get device by idx 0x%x\n", gpuidx);
continue;
}
if (pdd->dev->adev == bo_node->adev)
continue;
if (!svm_nodes_in_same_hive(pdd->dev, bo_node)) {
best_loc = 0;
break;
}
}
out:
pr_debug("xnack %d svms 0x%p [0x%lx 0x%lx] best loc 0x%x\n",
p->xnack_enabled, &p->svms, prange->start, prange->last,
best_loc);
return best_loc;
}
/* svm_range_trigger_migration - start page migration if prefetch loc changed
* @mm: current process mm_struct
* @prange: svm range structure
* @migrated: output, true if migration is triggered
*
* If range perfetch_loc is GPU, actual loc is cpu 0, then migrate the range
* from ram to vram.
* If range prefetch_loc is cpu 0, actual loc is GPU, then migrate the range
* from vram to ram.
*
* If GPU vm fault retry is not enabled, migration interact with MMU notifier
* and restore work:
* 1. migrate_vma_setup invalidate pages, MMU notifier callback svm_range_evict
* stops all queues, schedule restore work
* 2. svm_range_restore_work wait for migration is done by
* a. svm_range_validate_vram takes prange->migrate_mutex
* b. svm_range_validate_ram HMM get pages wait for CPU fault handle returns
* 3. restore work update mappings of GPU, resume all queues.
*
* Context: Process context
*
* Return:
* 0 - OK, otherwise - error code of migration
*/
static int
svm_range_trigger_migration(struct mm_struct *mm, struct svm_range *prange,
bool *migrated)
{
uint32_t best_loc;
int r = 0;
*migrated = false;
best_loc = svm_range_best_prefetch_location(prange);
if (best_loc == KFD_IOCTL_SVM_LOCATION_UNDEFINED ||
best_loc == prange->actual_loc)
return 0;
if (!best_loc) {
r = svm_migrate_vram_to_ram(prange, mm,
KFD_MIGRATE_TRIGGER_PREFETCH, NULL);
*migrated = !r;
return r;
}
r = svm_migrate_to_vram(prange, best_loc, mm, KFD_MIGRATE_TRIGGER_PREFETCH);
*migrated = !r;
return r;
}
int svm_range_schedule_evict_svm_bo(struct amdgpu_amdkfd_fence *fence)
{
if (!fence)
return -EINVAL;
if (dma_fence_is_signaled(&fence->base))
return 0;
if (fence->svm_bo) {
WRITE_ONCE(fence->svm_bo->evicting, 1);
schedule_work(&fence->svm_bo->eviction_work);
}
return 0;
}
static void svm_range_evict_svm_bo_worker(struct work_struct *work)
{
struct svm_range_bo *svm_bo;
struct mm_struct *mm;
int r = 0;
svm_bo = container_of(work, struct svm_range_bo, eviction_work);
if (!svm_bo_ref_unless_zero(svm_bo))
return; /* svm_bo was freed while eviction was pending */
if (mmget_not_zero(svm_bo->eviction_fence->mm)) {
mm = svm_bo->eviction_fence->mm;
} else {
svm_range_bo_unref(svm_bo);
return;
}
mmap_read_lock(mm);
spin_lock(&svm_bo->list_lock);
while (!list_empty(&svm_bo->range_list) && !r) {
struct svm_range *prange =
list_first_entry(&svm_bo->range_list,
struct svm_range, svm_bo_list);
int retries = 3;
list_del_init(&prange->svm_bo_list);
spin_unlock(&svm_bo->list_lock);
pr_debug("svms 0x%p [0x%lx 0x%lx]\n", prange->svms,
prange->start, prange->last);
mutex_lock(&prange->migrate_mutex);
do {
r = svm_migrate_vram_to_ram(prange, mm,
KFD_MIGRATE_TRIGGER_TTM_EVICTION, NULL);
} while (!r && prange->actual_loc && --retries);
if (!r && prange->actual_loc)
pr_info_once("Migration failed during eviction");
if (!prange->actual_loc) {
mutex_lock(&prange->lock);
prange->svm_bo = NULL;
mutex_unlock(&prange->lock);
}
mutex_unlock(&prange->migrate_mutex);
spin_lock(&svm_bo->list_lock);
}
spin_unlock(&svm_bo->list_lock);
mmap_read_unlock(mm);
mmput(mm);
dma_fence_signal(&svm_bo->eviction_fence->base);
/* This is the last reference to svm_bo, after svm_range_vram_node_free
* has been called in svm_migrate_vram_to_ram
*/
WARN_ONCE(!r && kref_read(&svm_bo->kref) != 1, "This was not the last reference\n");
svm_range_bo_unref(svm_bo);
}
static int
svm_range_set_attr(struct kfd_process *p, struct mm_struct *mm,
uint64_t start, uint64_t size, uint32_t nattr,
struct kfd_ioctl_svm_attribute *attrs)
{
struct amdkfd_process_info *process_info = p->kgd_process_info;
struct list_head update_list;
struct list_head insert_list;
struct list_head remove_list;
struct svm_range_list *svms;
struct svm_range *prange;
struct svm_range *next;
bool update_mapping = false;
bool flush_tlb;
int r = 0;
pr_debug("pasid 0x%x svms 0x%p [0x%llx 0x%llx] pages 0x%llx\n",
p->pasid, &p->svms, start, start + size - 1, size);
r = svm_range_check_attr(p, nattr, attrs);
if (r)
return r;
svms = &p->svms;
mutex_lock(&process_info->lock);
svm_range_list_lock_and_flush_work(svms, mm);
r = svm_range_is_valid(p, start, size);
if (r) {
pr_debug("invalid range r=%d\n", r);
mmap_write_unlock(mm);
goto out;
}
mutex_lock(&svms->lock);
/* Add new range and split existing ranges as needed */
r = svm_range_add(p, start, size, nattr, attrs, &update_list,
&insert_list, &remove_list);
if (r) {
mutex_unlock(&svms->lock);
mmap_write_unlock(mm);
goto out;
}
/* Apply changes as a transaction */
list_for_each_entry_safe(prange, next, &insert_list, list) {
svm_range_add_to_svms(prange);
svm_range_add_notifier_locked(mm, prange);
}
list_for_each_entry(prange, &update_list, update_list) {
svm_range_apply_attrs(p, prange, nattr, attrs, &update_mapping);
/* TODO: unmap ranges from GPU that lost access */
}
list_for_each_entry_safe(prange, next, &remove_list, update_list) {
pr_debug("unlink old 0x%p prange 0x%p [0x%lx 0x%lx]\n",
prange->svms, prange, prange->start,
prange->last);
svm_range_unlink(prange);
svm_range_remove_notifier(prange);
svm_range_free(prange, false);
}
mmap_write_downgrade(mm);
/* Trigger migrations and revalidate and map to GPUs as needed. If
* this fails we may be left with partially completed actions. There
* is no clean way of rolling back to the previous state in such a
* case because the rollback wouldn't be guaranteed to work either.
*/
list_for_each_entry(prange, &update_list, update_list) {
bool migrated;
mutex_lock(&prange->migrate_mutex);
r = svm_range_trigger_migration(mm, prange, &migrated);
if (r)
goto out_unlock_range;
if (migrated && (!p->xnack_enabled ||
(prange->flags & KFD_IOCTL_SVM_FLAG_GPU_ALWAYS_MAPPED)) &&
prange->mapped_to_gpu) {
pr_debug("restore_work will update mappings of GPUs\n");
mutex_unlock(&prange->migrate_mutex);
continue;
}
if (!migrated && !update_mapping) {
mutex_unlock(&prange->migrate_mutex);
continue;
}
flush_tlb = !migrated && update_mapping && prange->mapped_to_gpu;
r = svm_range_validate_and_map(mm, prange, MAX_GPU_INSTANCE,
true, true, flush_tlb);
if (r)
pr_debug("failed %d to map svm range\n", r);
out_unlock_range:
mutex_unlock(&prange->migrate_mutex);
if (r)
break;
}
dynamic_svm_range_dump(svms);
mutex_unlock(&svms->lock);
mmap_read_unlock(mm);
out:
mutex_unlock(&process_info->lock);
pr_debug("pasid 0x%x svms 0x%p [0x%llx 0x%llx] done, r=%d\n", p->pasid,
&p->svms, start, start + size - 1, r);
return r;
}
static int
svm_range_get_attr(struct kfd_process *p, struct mm_struct *mm,
uint64_t start, uint64_t size, uint32_t nattr,
struct kfd_ioctl_svm_attribute *attrs)
{
DECLARE_BITMAP(bitmap_access, MAX_GPU_INSTANCE);
DECLARE_BITMAP(bitmap_aip, MAX_GPU_INSTANCE);
bool get_preferred_loc = false;
bool get_prefetch_loc = false;
bool get_granularity = false;
bool get_accessible = false;
bool get_flags = false;
uint64_t last = start + size - 1UL;
uint8_t granularity = 0xff;
struct interval_tree_node *node;
struct svm_range_list *svms;
struct svm_range *prange;
uint32_t prefetch_loc = KFD_IOCTL_SVM_LOCATION_UNDEFINED;
uint32_t location = KFD_IOCTL_SVM_LOCATION_UNDEFINED;
uint32_t flags_and = 0xffffffff;
uint32_t flags_or = 0;
int gpuidx;
uint32_t i;
int r = 0;
pr_debug("svms 0x%p [0x%llx 0x%llx] nattr 0x%x\n", &p->svms, start,
start + size - 1, nattr);
/* Flush pending deferred work to avoid racing with deferred actions from
* previous memory map changes (e.g. munmap). Concurrent memory map changes
* can still race with get_attr because we don't hold the mmap lock. But that
* would be a race condition in the application anyway, and undefined
* behaviour is acceptable in that case.
*/
flush_work(&p->svms.deferred_list_work);
mmap_read_lock(mm);
r = svm_range_is_valid(p, start, size);
mmap_read_unlock(mm);
if (r) {
pr_debug("invalid range r=%d\n", r);
return r;
}
for (i = 0; i < nattr; i++) {
switch (attrs[i].type) {
case KFD_IOCTL_SVM_ATTR_PREFERRED_LOC:
get_preferred_loc = true;
break;
case KFD_IOCTL_SVM_ATTR_PREFETCH_LOC:
get_prefetch_loc = true;
break;
case KFD_IOCTL_SVM_ATTR_ACCESS:
get_accessible = true;
break;
case KFD_IOCTL_SVM_ATTR_SET_FLAGS:
case KFD_IOCTL_SVM_ATTR_CLR_FLAGS:
get_flags = true;
break;
case KFD_IOCTL_SVM_ATTR_GRANULARITY:
get_granularity = true;
break;
case KFD_IOCTL_SVM_ATTR_ACCESS_IN_PLACE:
case KFD_IOCTL_SVM_ATTR_NO_ACCESS:
fallthrough;
default:
pr_debug("get invalid attr type 0x%x\n", attrs[i].type);
return -EINVAL;
}
}
svms = &p->svms;
mutex_lock(&svms->lock);
node = interval_tree_iter_first(&svms->objects, start, last);
if (!node) {
pr_debug("range attrs not found return default values\n");
svm_range_set_default_attributes(&location, &prefetch_loc,
&granularity, &flags_and);
flags_or = flags_and;
if (p->xnack_enabled)
bitmap_copy(bitmap_access, svms->bitmap_supported,
MAX_GPU_INSTANCE);
else
bitmap_zero(bitmap_access, MAX_GPU_INSTANCE);
bitmap_zero(bitmap_aip, MAX_GPU_INSTANCE);
goto fill_values;
}
bitmap_copy(bitmap_access, svms->bitmap_supported, MAX_GPU_INSTANCE);
bitmap_copy(bitmap_aip, svms->bitmap_supported, MAX_GPU_INSTANCE);
while (node) {
struct interval_tree_node *next;
prange = container_of(node, struct svm_range, it_node);
next = interval_tree_iter_next(node, start, last);
if (get_preferred_loc) {
if (prange->preferred_loc ==
KFD_IOCTL_SVM_LOCATION_UNDEFINED ||
(location != KFD_IOCTL_SVM_LOCATION_UNDEFINED &&
location != prange->preferred_loc)) {
location = KFD_IOCTL_SVM_LOCATION_UNDEFINED;
get_preferred_loc = false;
} else {
location = prange->preferred_loc;
}
}
if (get_prefetch_loc) {
if (prange->prefetch_loc ==
KFD_IOCTL_SVM_LOCATION_UNDEFINED ||
(prefetch_loc != KFD_IOCTL_SVM_LOCATION_UNDEFINED &&
prefetch_loc != prange->prefetch_loc)) {
prefetch_loc = KFD_IOCTL_SVM_LOCATION_UNDEFINED;
get_prefetch_loc = false;
} else {
prefetch_loc = prange->prefetch_loc;
}
}
if (get_accessible) {
bitmap_and(bitmap_access, bitmap_access,
prange->bitmap_access, MAX_GPU_INSTANCE);
bitmap_and(bitmap_aip, bitmap_aip,
prange->bitmap_aip, MAX_GPU_INSTANCE);
}
if (get_flags) {
flags_and &= prange->flags;
flags_or |= prange->flags;
}
if (get_granularity && prange->granularity < granularity)
granularity = prange->granularity;
node = next;
}
fill_values:
mutex_unlock(&svms->lock);
for (i = 0; i < nattr; i++) {
switch (attrs[i].type) {
case KFD_IOCTL_SVM_ATTR_PREFERRED_LOC:
attrs[i].value = location;
break;
case KFD_IOCTL_SVM_ATTR_PREFETCH_LOC:
attrs[i].value = prefetch_loc;
break;
case KFD_IOCTL_SVM_ATTR_ACCESS:
gpuidx = kfd_process_gpuidx_from_gpuid(p,
attrs[i].value);
if (gpuidx < 0) {
pr_debug("invalid gpuid %x\n", attrs[i].value);
return -EINVAL;
}
if (test_bit(gpuidx, bitmap_access))
attrs[i].type = KFD_IOCTL_SVM_ATTR_ACCESS;
else if (test_bit(gpuidx, bitmap_aip))
attrs[i].type =
KFD_IOCTL_SVM_ATTR_ACCESS_IN_PLACE;
else
attrs[i].type = KFD_IOCTL_SVM_ATTR_NO_ACCESS;
break;
case KFD_IOCTL_SVM_ATTR_SET_FLAGS:
attrs[i].value = flags_and;
break;
case KFD_IOCTL_SVM_ATTR_CLR_FLAGS:
attrs[i].value = ~flags_or;
break;
case KFD_IOCTL_SVM_ATTR_GRANULARITY:
attrs[i].value = (uint32_t)granularity;
break;
}
}
return 0;
}
int kfd_criu_resume_svm(struct kfd_process *p)
{
struct kfd_ioctl_svm_attribute *set_attr_new, *set_attr = NULL;
int nattr_common = 4, nattr_accessibility = 1;
struct criu_svm_metadata *criu_svm_md = NULL;
struct svm_range_list *svms = &p->svms;
struct criu_svm_metadata *next = NULL;
uint32_t set_flags = 0xffffffff;
int i, j, num_attrs, ret = 0;
uint64_t set_attr_size;
struct mm_struct *mm;
if (list_empty(&svms->criu_svm_metadata_list)) {
pr_debug("No SVM data from CRIU restore stage 2\n");
return ret;
}
mm = get_task_mm(p->lead_thread);
if (!mm) {
pr_err("failed to get mm for the target process\n");
return -ESRCH;
}
num_attrs = nattr_common + (nattr_accessibility * p->n_pdds);
i = j = 0;
list_for_each_entry(criu_svm_md, &svms->criu_svm_metadata_list, list) {
pr_debug("criu_svm_md[%d]\n\tstart: 0x%llx size: 0x%llx (npages)\n",
i, criu_svm_md->data.start_addr, criu_svm_md->data.size);
for (j = 0; j < num_attrs; j++) {
pr_debug("\ncriu_svm_md[%d]->attrs[%d].type : 0x%x\ncriu_svm_md[%d]->attrs[%d].value : 0x%x\n",
i, j, criu_svm_md->data.attrs[j].type,
i, j, criu_svm_md->data.attrs[j].value);
switch (criu_svm_md->data.attrs[j].type) {
/* During Checkpoint operation, the query for
* KFD_IOCTL_SVM_ATTR_PREFETCH_LOC attribute might
* return KFD_IOCTL_SVM_LOCATION_UNDEFINED if they were
* not used by the range which was checkpointed. Care
* must be taken to not restore with an invalid value
* otherwise the gpuidx value will be invalid and
* set_attr would eventually fail so just replace those
* with another dummy attribute such as
* KFD_IOCTL_SVM_ATTR_SET_FLAGS.
*/
case KFD_IOCTL_SVM_ATTR_PREFETCH_LOC:
if (criu_svm_md->data.attrs[j].value ==
KFD_IOCTL_SVM_LOCATION_UNDEFINED) {
criu_svm_md->data.attrs[j].type =
KFD_IOCTL_SVM_ATTR_SET_FLAGS;
criu_svm_md->data.attrs[j].value = 0;
}
break;
case KFD_IOCTL_SVM_ATTR_SET_FLAGS:
set_flags = criu_svm_md->data.attrs[j].value;
break;
default:
break;
}
}
/* CLR_FLAGS is not available via get_attr during checkpoint but
* it needs to be inserted before restoring the ranges so
* allocate extra space for it before calling set_attr
*/
set_attr_size = sizeof(struct kfd_ioctl_svm_attribute) *
(num_attrs + 1);
set_attr_new = krealloc(set_attr, set_attr_size,
GFP_KERNEL);
if (!set_attr_new) {
ret = -ENOMEM;
goto exit;
}
set_attr = set_attr_new;
memcpy(set_attr, criu_svm_md->data.attrs, num_attrs *
sizeof(struct kfd_ioctl_svm_attribute));
set_attr[num_attrs].type = KFD_IOCTL_SVM_ATTR_CLR_FLAGS;
set_attr[num_attrs].value = ~set_flags;
ret = svm_range_set_attr(p, mm, criu_svm_md->data.start_addr,
criu_svm_md->data.size, num_attrs + 1,
set_attr);
if (ret) {
pr_err("CRIU: failed to set range attributes\n");
goto exit;
}
i++;
}
exit:
kfree(set_attr);
list_for_each_entry_safe(criu_svm_md, next, &svms->criu_svm_metadata_list, list) {
pr_debug("freeing criu_svm_md[]\n\tstart: 0x%llx\n",
criu_svm_md->data.start_addr);
kfree(criu_svm_md);
}
mmput(mm);
return ret;
}
int kfd_criu_restore_svm(struct kfd_process *p,
uint8_t __user *user_priv_ptr,
uint64_t *priv_data_offset,
uint64_t max_priv_data_size)
{
uint64_t svm_priv_data_size, svm_object_md_size, svm_attrs_size;
int nattr_common = 4, nattr_accessibility = 1;
struct criu_svm_metadata *criu_svm_md = NULL;
struct svm_range_list *svms = &p->svms;
uint32_t num_devices;
int ret = 0;
num_devices = p->n_pdds;
/* Handle one SVM range object at a time, also the number of gpus are
* assumed to be same on the restore node, checking must be done while
* evaluating the topology earlier
*/
svm_attrs_size = sizeof(struct kfd_ioctl_svm_attribute) *
(nattr_common + nattr_accessibility * num_devices);
svm_object_md_size = sizeof(struct criu_svm_metadata) + svm_attrs_size;
svm_priv_data_size = sizeof(struct kfd_criu_svm_range_priv_data) +
svm_attrs_size;
criu_svm_md = kzalloc(svm_object_md_size, GFP_KERNEL);
if (!criu_svm_md) {
pr_err("failed to allocate memory to store svm metadata\n");
return -ENOMEM;
}
if (*priv_data_offset + svm_priv_data_size > max_priv_data_size) {
ret = -EINVAL;
goto exit;
}
ret = copy_from_user(&criu_svm_md->data, user_priv_ptr + *priv_data_offset,
svm_priv_data_size);
if (ret) {
ret = -EFAULT;
goto exit;
}
*priv_data_offset += svm_priv_data_size;
list_add_tail(&criu_svm_md->list, &svms->criu_svm_metadata_list);
return 0;
exit:
kfree(criu_svm_md);
return ret;
}
int svm_range_get_info(struct kfd_process *p, uint32_t *num_svm_ranges,
uint64_t *svm_priv_data_size)
{
uint64_t total_size, accessibility_size, common_attr_size;
int nattr_common = 4, nattr_accessibility = 1;
int num_devices = p->n_pdds;
struct svm_range_list *svms;
struct svm_range *prange;
uint32_t count = 0;
*svm_priv_data_size = 0;
svms = &p->svms;
if (!svms)
return -EINVAL;
mutex_lock(&svms->lock);
list_for_each_entry(prange, &svms->list, list) {
pr_debug("prange: 0x%p start: 0x%lx\t npages: 0x%llx\t end: 0x%llx\n",
prange, prange->start, prange->npages,
prange->start + prange->npages - 1);
count++;
}
mutex_unlock(&svms->lock);
*num_svm_ranges = count;
/* Only the accessbility attributes need to be queried for all the gpus
* individually, remaining ones are spanned across the entire process
* regardless of the various gpu nodes. Of the remaining attributes,
* KFD_IOCTL_SVM_ATTR_CLR_FLAGS need not be saved.
*
* KFD_IOCTL_SVM_ATTR_PREFERRED_LOC
* KFD_IOCTL_SVM_ATTR_PREFETCH_LOC
* KFD_IOCTL_SVM_ATTR_SET_FLAGS
* KFD_IOCTL_SVM_ATTR_GRANULARITY
*
* ** ACCESSBILITY ATTRIBUTES **
* (Considered as one, type is altered during query, value is gpuid)
* KFD_IOCTL_SVM_ATTR_ACCESS
* KFD_IOCTL_SVM_ATTR_ACCESS_IN_PLACE
* KFD_IOCTL_SVM_ATTR_NO_ACCESS
*/
if (*num_svm_ranges > 0) {
common_attr_size = sizeof(struct kfd_ioctl_svm_attribute) *
nattr_common;
accessibility_size = sizeof(struct kfd_ioctl_svm_attribute) *
nattr_accessibility * num_devices;
total_size = sizeof(struct kfd_criu_svm_range_priv_data) +
common_attr_size + accessibility_size;
*svm_priv_data_size = *num_svm_ranges * total_size;
}
pr_debug("num_svm_ranges %u total_priv_size %llu\n", *num_svm_ranges,
*svm_priv_data_size);
return 0;
}
int kfd_criu_checkpoint_svm(struct kfd_process *p,
uint8_t __user *user_priv_data,
uint64_t *priv_data_offset)
{
struct kfd_criu_svm_range_priv_data *svm_priv = NULL;
struct kfd_ioctl_svm_attribute *query_attr = NULL;
uint64_t svm_priv_data_size, query_attr_size = 0;
int index, nattr_common = 4, ret = 0;
struct svm_range_list *svms;
int num_devices = p->n_pdds;
struct svm_range *prange;
struct mm_struct *mm;
svms = &p->svms;
if (!svms)
return -EINVAL;
mm = get_task_mm(p->lead_thread);
if (!mm) {
pr_err("failed to get mm for the target process\n");
return -ESRCH;
}
query_attr_size = sizeof(struct kfd_ioctl_svm_attribute) *
(nattr_common + num_devices);
query_attr = kzalloc(query_attr_size, GFP_KERNEL);
if (!query_attr) {
ret = -ENOMEM;
goto exit;
}
query_attr[0].type = KFD_IOCTL_SVM_ATTR_PREFERRED_LOC;
query_attr[1].type = KFD_IOCTL_SVM_ATTR_PREFETCH_LOC;
query_attr[2].type = KFD_IOCTL_SVM_ATTR_SET_FLAGS;
query_attr[3].type = KFD_IOCTL_SVM_ATTR_GRANULARITY;
for (index = 0; index < num_devices; index++) {
struct kfd_process_device *pdd = p->pdds[index];
query_attr[index + nattr_common].type =
KFD_IOCTL_SVM_ATTR_ACCESS;
query_attr[index + nattr_common].value = pdd->user_gpu_id;
}
svm_priv_data_size = sizeof(*svm_priv) + query_attr_size;
svm_priv = kzalloc(svm_priv_data_size, GFP_KERNEL);
if (!svm_priv) {
ret = -ENOMEM;
goto exit_query;
}
index = 0;
list_for_each_entry(prange, &svms->list, list) {
svm_priv->object_type = KFD_CRIU_OBJECT_TYPE_SVM_RANGE;
svm_priv->start_addr = prange->start;
svm_priv->size = prange->npages;
memcpy(&svm_priv->attrs, query_attr, query_attr_size);
pr_debug("CRIU: prange: 0x%p start: 0x%lx\t npages: 0x%llx end: 0x%llx\t size: 0x%llx\n",
prange, prange->start, prange->npages,
prange->start + prange->npages - 1,
prange->npages * PAGE_SIZE);
ret = svm_range_get_attr(p, mm, svm_priv->start_addr,
svm_priv->size,
(nattr_common + num_devices),
svm_priv->attrs);
if (ret) {
pr_err("CRIU: failed to obtain range attributes\n");
goto exit_priv;
}
if (copy_to_user(user_priv_data + *priv_data_offset, svm_priv,
svm_priv_data_size)) {
pr_err("Failed to copy svm priv to user\n");
ret = -EFAULT;
goto exit_priv;
}
*priv_data_offset += svm_priv_data_size;
}
exit_priv:
kfree(svm_priv);
exit_query:
kfree(query_attr);
exit:
mmput(mm);
return ret;
}
int
svm_ioctl(struct kfd_process *p, enum kfd_ioctl_svm_op op, uint64_t start,
uint64_t size, uint32_t nattrs, struct kfd_ioctl_svm_attribute *attrs)
{
struct mm_struct *mm = current->mm;
int r;
start >>= PAGE_SHIFT;
size >>= PAGE_SHIFT;
switch (op) {
case KFD_IOCTL_SVM_OP_SET_ATTR:
r = svm_range_set_attr(p, mm, start, size, nattrs, attrs);
break;
case KFD_IOCTL_SVM_OP_GET_ATTR:
r = svm_range_get_attr(p, mm, start, size, nattrs, attrs);
break;
default:
r = EINVAL;
break;
}
return r;
}
| linux-master | drivers/gpu/drm/amd/amdkfd/kfd_svm.c |
// SPDX-License-Identifier: GPL-2.0 OR MIT
/*
* Copyright 2014-2022 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
*/
#include <linux/ratelimit.h>
#include <linux/printk.h>
#include <linux/slab.h>
#include <linux/list.h>
#include <linux/types.h>
#include <linux/bitops.h>
#include <linux/sched.h>
#include "kfd_priv.h"
#include "kfd_device_queue_manager.h"
#include "kfd_mqd_manager.h"
#include "cik_regs.h"
#include "kfd_kernel_queue.h"
#include "amdgpu_amdkfd.h"
#include "mes_api_def.h"
#include "kfd_debug.h"
/* Size of the per-pipe EOP queue */
#define CIK_HPD_EOP_BYTES_LOG2 11
#define CIK_HPD_EOP_BYTES (1U << CIK_HPD_EOP_BYTES_LOG2)
static int set_pasid_vmid_mapping(struct device_queue_manager *dqm,
u32 pasid, unsigned int vmid);
static int execute_queues_cpsch(struct device_queue_manager *dqm,
enum kfd_unmap_queues_filter filter,
uint32_t filter_param,
uint32_t grace_period);
static int unmap_queues_cpsch(struct device_queue_manager *dqm,
enum kfd_unmap_queues_filter filter,
uint32_t filter_param,
uint32_t grace_period,
bool reset);
static int map_queues_cpsch(struct device_queue_manager *dqm);
static void deallocate_sdma_queue(struct device_queue_manager *dqm,
struct queue *q);
static inline void deallocate_hqd(struct device_queue_manager *dqm,
struct queue *q);
static int allocate_hqd(struct device_queue_manager *dqm, struct queue *q);
static int allocate_sdma_queue(struct device_queue_manager *dqm,
struct queue *q, const uint32_t *restore_sdma_id);
static void kfd_process_hw_exception(struct work_struct *work);
static inline
enum KFD_MQD_TYPE get_mqd_type_from_queue_type(enum kfd_queue_type type)
{
if (type == KFD_QUEUE_TYPE_SDMA || type == KFD_QUEUE_TYPE_SDMA_XGMI)
return KFD_MQD_TYPE_SDMA;
return KFD_MQD_TYPE_CP;
}
static bool is_pipe_enabled(struct device_queue_manager *dqm, int mec, int pipe)
{
int i;
int pipe_offset = (mec * dqm->dev->kfd->shared_resources.num_pipe_per_mec
+ pipe) * dqm->dev->kfd->shared_resources.num_queue_per_pipe;
/* queue is available for KFD usage if bit is 1 */
for (i = 0; i < dqm->dev->kfd->shared_resources.num_queue_per_pipe; ++i)
if (test_bit(pipe_offset + i,
dqm->dev->kfd->shared_resources.cp_queue_bitmap))
return true;
return false;
}
unsigned int get_cp_queues_num(struct device_queue_manager *dqm)
{
return bitmap_weight(dqm->dev->kfd->shared_resources.cp_queue_bitmap,
KGD_MAX_QUEUES);
}
unsigned int get_queues_per_pipe(struct device_queue_manager *dqm)
{
return dqm->dev->kfd->shared_resources.num_queue_per_pipe;
}
unsigned int get_pipes_per_mec(struct device_queue_manager *dqm)
{
return dqm->dev->kfd->shared_resources.num_pipe_per_mec;
}
static unsigned int get_num_all_sdma_engines(struct device_queue_manager *dqm)
{
return kfd_get_num_sdma_engines(dqm->dev) +
kfd_get_num_xgmi_sdma_engines(dqm->dev);
}
unsigned int get_num_sdma_queues(struct device_queue_manager *dqm)
{
return kfd_get_num_sdma_engines(dqm->dev) *
dqm->dev->kfd->device_info.num_sdma_queues_per_engine;
}
unsigned int get_num_xgmi_sdma_queues(struct device_queue_manager *dqm)
{
return kfd_get_num_xgmi_sdma_engines(dqm->dev) *
dqm->dev->kfd->device_info.num_sdma_queues_per_engine;
}
static void init_sdma_bitmaps(struct device_queue_manager *dqm)
{
bitmap_zero(dqm->sdma_bitmap, KFD_MAX_SDMA_QUEUES);
bitmap_set(dqm->sdma_bitmap, 0, get_num_sdma_queues(dqm));
bitmap_zero(dqm->xgmi_sdma_bitmap, KFD_MAX_SDMA_QUEUES);
bitmap_set(dqm->xgmi_sdma_bitmap, 0, get_num_xgmi_sdma_queues(dqm));
/* Mask out the reserved queues */
bitmap_andnot(dqm->sdma_bitmap, dqm->sdma_bitmap,
dqm->dev->kfd->device_info.reserved_sdma_queues_bitmap,
KFD_MAX_SDMA_QUEUES);
}
void program_sh_mem_settings(struct device_queue_manager *dqm,
struct qcm_process_device *qpd)
{
uint32_t xcc_mask = dqm->dev->xcc_mask;
int xcc_id;
for_each_inst(xcc_id, xcc_mask)
dqm->dev->kfd2kgd->program_sh_mem_settings(
dqm->dev->adev, qpd->vmid, qpd->sh_mem_config,
qpd->sh_mem_ape1_base, qpd->sh_mem_ape1_limit,
qpd->sh_mem_bases, xcc_id);
}
static void kfd_hws_hang(struct device_queue_manager *dqm)
{
/*
* Issue a GPU reset if HWS is unresponsive
*/
dqm->is_hws_hang = true;
/* It's possible we're detecting a HWS hang in the
* middle of a GPU reset. No need to schedule another
* reset in this case.
*/
if (!dqm->is_resetting)
schedule_work(&dqm->hw_exception_work);
}
static int convert_to_mes_queue_type(int queue_type)
{
int mes_queue_type;
switch (queue_type) {
case KFD_QUEUE_TYPE_COMPUTE:
mes_queue_type = MES_QUEUE_TYPE_COMPUTE;
break;
case KFD_QUEUE_TYPE_SDMA:
mes_queue_type = MES_QUEUE_TYPE_SDMA;
break;
default:
WARN(1, "Invalid queue type %d", queue_type);
mes_queue_type = -EINVAL;
break;
}
return mes_queue_type;
}
static int add_queue_mes(struct device_queue_manager *dqm, struct queue *q,
struct qcm_process_device *qpd)
{
struct amdgpu_device *adev = (struct amdgpu_device *)dqm->dev->adev;
struct kfd_process_device *pdd = qpd_to_pdd(qpd);
struct mes_add_queue_input queue_input;
int r, queue_type;
uint64_t wptr_addr_off;
if (dqm->is_hws_hang)
return -EIO;
memset(&queue_input, 0x0, sizeof(struct mes_add_queue_input));
queue_input.process_id = qpd->pqm->process->pasid;
queue_input.page_table_base_addr = qpd->page_table_base;
queue_input.process_va_start = 0;
queue_input.process_va_end = adev->vm_manager.max_pfn - 1;
/* MES unit for quantum is 100ns */
queue_input.process_quantum = KFD_MES_PROCESS_QUANTUM; /* Equivalent to 10ms. */
queue_input.process_context_addr = pdd->proc_ctx_gpu_addr;
queue_input.gang_quantum = KFD_MES_GANG_QUANTUM; /* Equivalent to 1ms */
queue_input.gang_context_addr = q->gang_ctx_gpu_addr;
queue_input.inprocess_gang_priority = q->properties.priority;
queue_input.gang_global_priority_level =
AMDGPU_MES_PRIORITY_LEVEL_NORMAL;
queue_input.doorbell_offset = q->properties.doorbell_off;
queue_input.mqd_addr = q->gart_mqd_addr;
queue_input.wptr_addr = (uint64_t)q->properties.write_ptr;
if (q->wptr_bo) {
wptr_addr_off = (uint64_t)q->properties.write_ptr & (PAGE_SIZE - 1);
queue_input.wptr_mc_addr = amdgpu_bo_gpu_offset(q->wptr_bo) + wptr_addr_off;
}
queue_input.is_kfd_process = 1;
queue_input.is_aql_queue = (q->properties.format == KFD_QUEUE_FORMAT_AQL);
queue_input.queue_size = q->properties.queue_size >> 2;
queue_input.paging = false;
queue_input.tba_addr = qpd->tba_addr;
queue_input.tma_addr = qpd->tma_addr;
queue_input.trap_en = !kfd_dbg_has_cwsr_workaround(q->device);
queue_input.skip_process_ctx_clear = qpd->pqm->process->debug_trap_enabled ||
kfd_dbg_has_ttmps_always_setup(q->device);
queue_type = convert_to_mes_queue_type(q->properties.type);
if (queue_type < 0) {
pr_err("Queue type not supported with MES, queue:%d\n",
q->properties.type);
return -EINVAL;
}
queue_input.queue_type = (uint32_t)queue_type;
queue_input.exclusively_scheduled = q->properties.is_gws;
amdgpu_mes_lock(&adev->mes);
r = adev->mes.funcs->add_hw_queue(&adev->mes, &queue_input);
amdgpu_mes_unlock(&adev->mes);
if (r) {
pr_err("failed to add hardware queue to MES, doorbell=0x%x\n",
q->properties.doorbell_off);
pr_err("MES might be in unrecoverable state, issue a GPU reset\n");
kfd_hws_hang(dqm);
}
return r;
}
static int remove_queue_mes(struct device_queue_manager *dqm, struct queue *q,
struct qcm_process_device *qpd)
{
struct amdgpu_device *adev = (struct amdgpu_device *)dqm->dev->adev;
int r;
struct mes_remove_queue_input queue_input;
if (dqm->is_hws_hang)
return -EIO;
memset(&queue_input, 0x0, sizeof(struct mes_remove_queue_input));
queue_input.doorbell_offset = q->properties.doorbell_off;
queue_input.gang_context_addr = q->gang_ctx_gpu_addr;
amdgpu_mes_lock(&adev->mes);
r = adev->mes.funcs->remove_hw_queue(&adev->mes, &queue_input);
amdgpu_mes_unlock(&adev->mes);
if (r) {
pr_err("failed to remove hardware queue from MES, doorbell=0x%x\n",
q->properties.doorbell_off);
pr_err("MES might be in unrecoverable state, issue a GPU reset\n");
kfd_hws_hang(dqm);
}
return r;
}
static int remove_all_queues_mes(struct device_queue_manager *dqm)
{
struct device_process_node *cur;
struct qcm_process_device *qpd;
struct queue *q;
int retval = 0;
list_for_each_entry(cur, &dqm->queues, list) {
qpd = cur->qpd;
list_for_each_entry(q, &qpd->queues_list, list) {
if (q->properties.is_active) {
retval = remove_queue_mes(dqm, q, qpd);
if (retval) {
pr_err("%s: Failed to remove queue %d for dev %d",
__func__,
q->properties.queue_id,
dqm->dev->id);
return retval;
}
}
}
}
return retval;
}
static void increment_queue_count(struct device_queue_manager *dqm,
struct qcm_process_device *qpd,
struct queue *q)
{
dqm->active_queue_count++;
if (q->properties.type == KFD_QUEUE_TYPE_COMPUTE ||
q->properties.type == KFD_QUEUE_TYPE_DIQ)
dqm->active_cp_queue_count++;
if (q->properties.is_gws) {
dqm->gws_queue_count++;
qpd->mapped_gws_queue = true;
}
}
static void decrement_queue_count(struct device_queue_manager *dqm,
struct qcm_process_device *qpd,
struct queue *q)
{
dqm->active_queue_count--;
if (q->properties.type == KFD_QUEUE_TYPE_COMPUTE ||
q->properties.type == KFD_QUEUE_TYPE_DIQ)
dqm->active_cp_queue_count--;
if (q->properties.is_gws) {
dqm->gws_queue_count--;
qpd->mapped_gws_queue = false;
}
}
/*
* Allocate a doorbell ID to this queue.
* If doorbell_id is passed in, make sure requested ID is valid then allocate it.
*/
static int allocate_doorbell(struct qcm_process_device *qpd,
struct queue *q,
uint32_t const *restore_id)
{
struct kfd_node *dev = qpd->dqm->dev;
if (!KFD_IS_SOC15(dev)) {
/* On pre-SOC15 chips we need to use the queue ID to
* preserve the user mode ABI.
*/
if (restore_id && *restore_id != q->properties.queue_id)
return -EINVAL;
q->doorbell_id = q->properties.queue_id;
} else if (q->properties.type == KFD_QUEUE_TYPE_SDMA ||
q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI) {
/* For SDMA queues on SOC15 with 8-byte doorbell, use static
* doorbell assignments based on the engine and queue id.
* The doobell index distance between RLC (2*i) and (2*i+1)
* for a SDMA engine is 512.
*/
uint32_t *idx_offset = dev->kfd->shared_resources.sdma_doorbell_idx;
/*
* q->properties.sdma_engine_id corresponds to the virtual
* sdma engine number. However, for doorbell allocation,
* we need the physical sdma engine id in order to get the
* correct doorbell offset.
*/
uint32_t valid_id = idx_offset[qpd->dqm->dev->node_id *
get_num_all_sdma_engines(qpd->dqm) +
q->properties.sdma_engine_id]
+ (q->properties.sdma_queue_id & 1)
* KFD_QUEUE_DOORBELL_MIRROR_OFFSET
+ (q->properties.sdma_queue_id >> 1);
if (restore_id && *restore_id != valid_id)
return -EINVAL;
q->doorbell_id = valid_id;
} else {
/* For CP queues on SOC15 */
if (restore_id) {
/* make sure that ID is free */
if (__test_and_set_bit(*restore_id, qpd->doorbell_bitmap))
return -EINVAL;
q->doorbell_id = *restore_id;
} else {
/* or reserve a free doorbell ID */
unsigned int found;
found = find_first_zero_bit(qpd->doorbell_bitmap,
KFD_MAX_NUM_OF_QUEUES_PER_PROCESS);
if (found >= KFD_MAX_NUM_OF_QUEUES_PER_PROCESS) {
pr_debug("No doorbells available");
return -EBUSY;
}
set_bit(found, qpd->doorbell_bitmap);
q->doorbell_id = found;
}
}
q->properties.doorbell_off = amdgpu_doorbell_index_on_bar(dev->adev,
qpd->proc_doorbells,
q->doorbell_id);
return 0;
}
static void deallocate_doorbell(struct qcm_process_device *qpd,
struct queue *q)
{
unsigned int old;
struct kfd_node *dev = qpd->dqm->dev;
if (!KFD_IS_SOC15(dev) ||
q->properties.type == KFD_QUEUE_TYPE_SDMA ||
q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI)
return;
old = test_and_clear_bit(q->doorbell_id, qpd->doorbell_bitmap);
WARN_ON(!old);
}
static void program_trap_handler_settings(struct device_queue_manager *dqm,
struct qcm_process_device *qpd)
{
uint32_t xcc_mask = dqm->dev->xcc_mask;
int xcc_id;
if (dqm->dev->kfd2kgd->program_trap_handler_settings)
for_each_inst(xcc_id, xcc_mask)
dqm->dev->kfd2kgd->program_trap_handler_settings(
dqm->dev->adev, qpd->vmid, qpd->tba_addr,
qpd->tma_addr, xcc_id);
}
static int allocate_vmid(struct device_queue_manager *dqm,
struct qcm_process_device *qpd,
struct queue *q)
{
int allocated_vmid = -1, i;
for (i = dqm->dev->vm_info.first_vmid_kfd;
i <= dqm->dev->vm_info.last_vmid_kfd; i++) {
if (!dqm->vmid_pasid[i]) {
allocated_vmid = i;
break;
}
}
if (allocated_vmid < 0) {
pr_err("no more vmid to allocate\n");
return -ENOSPC;
}
pr_debug("vmid allocated: %d\n", allocated_vmid);
dqm->vmid_pasid[allocated_vmid] = q->process->pasid;
set_pasid_vmid_mapping(dqm, q->process->pasid, allocated_vmid);
qpd->vmid = allocated_vmid;
q->properties.vmid = allocated_vmid;
program_sh_mem_settings(dqm, qpd);
if (KFD_IS_SOC15(dqm->dev) && dqm->dev->kfd->cwsr_enabled)
program_trap_handler_settings(dqm, qpd);
/* qpd->page_table_base is set earlier when register_process()
* is called, i.e. when the first queue is created.
*/
dqm->dev->kfd2kgd->set_vm_context_page_table_base(dqm->dev->adev,
qpd->vmid,
qpd->page_table_base);
/* invalidate the VM context after pasid and vmid mapping is set up */
kfd_flush_tlb(qpd_to_pdd(qpd), TLB_FLUSH_LEGACY);
if (dqm->dev->kfd2kgd->set_scratch_backing_va)
dqm->dev->kfd2kgd->set_scratch_backing_va(dqm->dev->adev,
qpd->sh_hidden_private_base, qpd->vmid);
return 0;
}
static int flush_texture_cache_nocpsch(struct kfd_node *kdev,
struct qcm_process_device *qpd)
{
const struct packet_manager_funcs *pmf = qpd->dqm->packet_mgr.pmf;
int ret;
if (!qpd->ib_kaddr)
return -ENOMEM;
ret = pmf->release_mem(qpd->ib_base, (uint32_t *)qpd->ib_kaddr);
if (ret)
return ret;
return amdgpu_amdkfd_submit_ib(kdev->adev, KGD_ENGINE_MEC1, qpd->vmid,
qpd->ib_base, (uint32_t *)qpd->ib_kaddr,
pmf->release_mem_size / sizeof(uint32_t));
}
static void deallocate_vmid(struct device_queue_manager *dqm,
struct qcm_process_device *qpd,
struct queue *q)
{
/* On GFX v7, CP doesn't flush TC at dequeue */
if (q->device->adev->asic_type == CHIP_HAWAII)
if (flush_texture_cache_nocpsch(q->device, qpd))
pr_err("Failed to flush TC\n");
kfd_flush_tlb(qpd_to_pdd(qpd), TLB_FLUSH_LEGACY);
/* Release the vmid mapping */
set_pasid_vmid_mapping(dqm, 0, qpd->vmid);
dqm->vmid_pasid[qpd->vmid] = 0;
qpd->vmid = 0;
q->properties.vmid = 0;
}
static int create_queue_nocpsch(struct device_queue_manager *dqm,
struct queue *q,
struct qcm_process_device *qpd,
const struct kfd_criu_queue_priv_data *qd,
const void *restore_mqd, const void *restore_ctl_stack)
{
struct mqd_manager *mqd_mgr;
int retval;
dqm_lock(dqm);
if (dqm->total_queue_count >= max_num_of_queues_per_device) {
pr_warn("Can't create new usermode queue because %d queues were already created\n",
dqm->total_queue_count);
retval = -EPERM;
goto out_unlock;
}
if (list_empty(&qpd->queues_list)) {
retval = allocate_vmid(dqm, qpd, q);
if (retval)
goto out_unlock;
}
q->properties.vmid = qpd->vmid;
/*
* Eviction state logic: mark all queues as evicted, even ones
* not currently active. Restoring inactive queues later only
* updates the is_evicted flag but is a no-op otherwise.
*/
q->properties.is_evicted = !!qpd->evicted;
q->properties.tba_addr = qpd->tba_addr;
q->properties.tma_addr = qpd->tma_addr;
mqd_mgr = dqm->mqd_mgrs[get_mqd_type_from_queue_type(
q->properties.type)];
if (q->properties.type == KFD_QUEUE_TYPE_COMPUTE) {
retval = allocate_hqd(dqm, q);
if (retval)
goto deallocate_vmid;
pr_debug("Loading mqd to hqd on pipe %d, queue %d\n",
q->pipe, q->queue);
} else if (q->properties.type == KFD_QUEUE_TYPE_SDMA ||
q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI) {
retval = allocate_sdma_queue(dqm, q, qd ? &qd->sdma_id : NULL);
if (retval)
goto deallocate_vmid;
dqm->asic_ops.init_sdma_vm(dqm, q, qpd);
}
retval = allocate_doorbell(qpd, q, qd ? &qd->doorbell_id : NULL);
if (retval)
goto out_deallocate_hqd;
/* Temporarily release dqm lock to avoid a circular lock dependency */
dqm_unlock(dqm);
q->mqd_mem_obj = mqd_mgr->allocate_mqd(mqd_mgr->dev, &q->properties);
dqm_lock(dqm);
if (!q->mqd_mem_obj) {
retval = -ENOMEM;
goto out_deallocate_doorbell;
}
if (qd)
mqd_mgr->restore_mqd(mqd_mgr, &q->mqd, q->mqd_mem_obj, &q->gart_mqd_addr,
&q->properties, restore_mqd, restore_ctl_stack,
qd->ctl_stack_size);
else
mqd_mgr->init_mqd(mqd_mgr, &q->mqd, q->mqd_mem_obj,
&q->gart_mqd_addr, &q->properties);
if (q->properties.is_active) {
if (!dqm->sched_running) {
WARN_ONCE(1, "Load non-HWS mqd while stopped\n");
goto add_queue_to_list;
}
if (WARN(q->process->mm != current->mm,
"should only run in user thread"))
retval = -EFAULT;
else
retval = mqd_mgr->load_mqd(mqd_mgr, q->mqd, q->pipe,
q->queue, &q->properties, current->mm);
if (retval)
goto out_free_mqd;
}
add_queue_to_list:
list_add(&q->list, &qpd->queues_list);
qpd->queue_count++;
if (q->properties.is_active)
increment_queue_count(dqm, qpd, q);
/*
* Unconditionally increment this counter, regardless of the queue's
* type or whether the queue is active.
*/
dqm->total_queue_count++;
pr_debug("Total of %d queues are accountable so far\n",
dqm->total_queue_count);
goto out_unlock;
out_free_mqd:
mqd_mgr->free_mqd(mqd_mgr, q->mqd, q->mqd_mem_obj);
out_deallocate_doorbell:
deallocate_doorbell(qpd, q);
out_deallocate_hqd:
if (q->properties.type == KFD_QUEUE_TYPE_COMPUTE)
deallocate_hqd(dqm, q);
else if (q->properties.type == KFD_QUEUE_TYPE_SDMA ||
q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI)
deallocate_sdma_queue(dqm, q);
deallocate_vmid:
if (list_empty(&qpd->queues_list))
deallocate_vmid(dqm, qpd, q);
out_unlock:
dqm_unlock(dqm);
return retval;
}
static int allocate_hqd(struct device_queue_manager *dqm, struct queue *q)
{
bool set;
int pipe, bit, i;
set = false;
for (pipe = dqm->next_pipe_to_allocate, i = 0;
i < get_pipes_per_mec(dqm);
pipe = ((pipe + 1) % get_pipes_per_mec(dqm)), ++i) {
if (!is_pipe_enabled(dqm, 0, pipe))
continue;
if (dqm->allocated_queues[pipe] != 0) {
bit = ffs(dqm->allocated_queues[pipe]) - 1;
dqm->allocated_queues[pipe] &= ~(1 << bit);
q->pipe = pipe;
q->queue = bit;
set = true;
break;
}
}
if (!set)
return -EBUSY;
pr_debug("hqd slot - pipe %d, queue %d\n", q->pipe, q->queue);
/* horizontal hqd allocation */
dqm->next_pipe_to_allocate = (pipe + 1) % get_pipes_per_mec(dqm);
return 0;
}
static inline void deallocate_hqd(struct device_queue_manager *dqm,
struct queue *q)
{
dqm->allocated_queues[q->pipe] |= (1 << q->queue);
}
#define SQ_IND_CMD_CMD_KILL 0x00000003
#define SQ_IND_CMD_MODE_BROADCAST 0x00000001
static int dbgdev_wave_reset_wavefronts(struct kfd_node *dev, struct kfd_process *p)
{
int status = 0;
unsigned int vmid;
uint16_t queried_pasid;
union SQ_CMD_BITS reg_sq_cmd;
union GRBM_GFX_INDEX_BITS reg_gfx_index;
struct kfd_process_device *pdd;
int first_vmid_to_scan = dev->vm_info.first_vmid_kfd;
int last_vmid_to_scan = dev->vm_info.last_vmid_kfd;
uint32_t xcc_mask = dev->xcc_mask;
int xcc_id;
reg_sq_cmd.u32All = 0;
reg_gfx_index.u32All = 0;
pr_debug("Killing all process wavefronts\n");
if (!dev->kfd2kgd->get_atc_vmid_pasid_mapping_info) {
pr_err("no vmid pasid mapping supported \n");
return -EOPNOTSUPP;
}
/* Scan all registers in the range ATC_VMID8_PASID_MAPPING ..
* ATC_VMID15_PASID_MAPPING
* to check which VMID the current process is mapped to.
*/
for (vmid = first_vmid_to_scan; vmid <= last_vmid_to_scan; vmid++) {
status = dev->kfd2kgd->get_atc_vmid_pasid_mapping_info
(dev->adev, vmid, &queried_pasid);
if (status && queried_pasid == p->pasid) {
pr_debug("Killing wave fronts of vmid %d and pasid 0x%x\n",
vmid, p->pasid);
break;
}
}
if (vmid > last_vmid_to_scan) {
pr_err("Didn't find vmid for pasid 0x%x\n", p->pasid);
return -EFAULT;
}
/* taking the VMID for that process on the safe way using PDD */
pdd = kfd_get_process_device_data(dev, p);
if (!pdd)
return -EFAULT;
reg_gfx_index.bits.sh_broadcast_writes = 1;
reg_gfx_index.bits.se_broadcast_writes = 1;
reg_gfx_index.bits.instance_broadcast_writes = 1;
reg_sq_cmd.bits.mode = SQ_IND_CMD_MODE_BROADCAST;
reg_sq_cmd.bits.cmd = SQ_IND_CMD_CMD_KILL;
reg_sq_cmd.bits.vm_id = vmid;
for_each_inst(xcc_id, xcc_mask)
dev->kfd2kgd->wave_control_execute(
dev->adev, reg_gfx_index.u32All,
reg_sq_cmd.u32All, xcc_id);
return 0;
}
/* Access to DQM has to be locked before calling destroy_queue_nocpsch_locked
* to avoid asynchronized access
*/
static int destroy_queue_nocpsch_locked(struct device_queue_manager *dqm,
struct qcm_process_device *qpd,
struct queue *q)
{
int retval;
struct mqd_manager *mqd_mgr;
mqd_mgr = dqm->mqd_mgrs[get_mqd_type_from_queue_type(
q->properties.type)];
if (q->properties.type == KFD_QUEUE_TYPE_COMPUTE)
deallocate_hqd(dqm, q);
else if (q->properties.type == KFD_QUEUE_TYPE_SDMA)
deallocate_sdma_queue(dqm, q);
else if (q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI)
deallocate_sdma_queue(dqm, q);
else {
pr_debug("q->properties.type %d is invalid\n",
q->properties.type);
return -EINVAL;
}
dqm->total_queue_count--;
deallocate_doorbell(qpd, q);
if (!dqm->sched_running) {
WARN_ONCE(1, "Destroy non-HWS queue while stopped\n");
return 0;
}
retval = mqd_mgr->destroy_mqd(mqd_mgr, q->mqd,
KFD_PREEMPT_TYPE_WAVEFRONT_RESET,
KFD_UNMAP_LATENCY_MS,
q->pipe, q->queue);
if (retval == -ETIME)
qpd->reset_wavefronts = true;
list_del(&q->list);
if (list_empty(&qpd->queues_list)) {
if (qpd->reset_wavefronts) {
pr_warn("Resetting wave fronts (nocpsch) on dev %p\n",
dqm->dev);
/* dbgdev_wave_reset_wavefronts has to be called before
* deallocate_vmid(), i.e. when vmid is still in use.
*/
dbgdev_wave_reset_wavefronts(dqm->dev,
qpd->pqm->process);
qpd->reset_wavefronts = false;
}
deallocate_vmid(dqm, qpd, q);
}
qpd->queue_count--;
if (q->properties.is_active)
decrement_queue_count(dqm, qpd, q);
return retval;
}
static int destroy_queue_nocpsch(struct device_queue_manager *dqm,
struct qcm_process_device *qpd,
struct queue *q)
{
int retval;
uint64_t sdma_val = 0;
struct kfd_process_device *pdd = qpd_to_pdd(qpd);
struct mqd_manager *mqd_mgr =
dqm->mqd_mgrs[get_mqd_type_from_queue_type(q->properties.type)];
/* Get the SDMA queue stats */
if ((q->properties.type == KFD_QUEUE_TYPE_SDMA) ||
(q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI)) {
retval = read_sdma_queue_counter((uint64_t __user *)q->properties.read_ptr,
&sdma_val);
if (retval)
pr_err("Failed to read SDMA queue counter for queue: %d\n",
q->properties.queue_id);
}
dqm_lock(dqm);
retval = destroy_queue_nocpsch_locked(dqm, qpd, q);
if (!retval)
pdd->sdma_past_activity_counter += sdma_val;
dqm_unlock(dqm);
mqd_mgr->free_mqd(mqd_mgr, q->mqd, q->mqd_mem_obj);
return retval;
}
static int update_queue(struct device_queue_manager *dqm, struct queue *q,
struct mqd_update_info *minfo)
{
int retval = 0;
struct mqd_manager *mqd_mgr;
struct kfd_process_device *pdd;
bool prev_active = false;
dqm_lock(dqm);
pdd = kfd_get_process_device_data(q->device, q->process);
if (!pdd) {
retval = -ENODEV;
goto out_unlock;
}
mqd_mgr = dqm->mqd_mgrs[get_mqd_type_from_queue_type(
q->properties.type)];
/* Save previous activity state for counters */
prev_active = q->properties.is_active;
/* Make sure the queue is unmapped before updating the MQD */
if (dqm->sched_policy != KFD_SCHED_POLICY_NO_HWS) {
if (!dqm->dev->kfd->shared_resources.enable_mes)
retval = unmap_queues_cpsch(dqm,
KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0, USE_DEFAULT_GRACE_PERIOD, false);
else if (prev_active)
retval = remove_queue_mes(dqm, q, &pdd->qpd);
if (retval) {
pr_err("unmap queue failed\n");
goto out_unlock;
}
} else if (prev_active &&
(q->properties.type == KFD_QUEUE_TYPE_COMPUTE ||
q->properties.type == KFD_QUEUE_TYPE_SDMA ||
q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI)) {
if (!dqm->sched_running) {
WARN_ONCE(1, "Update non-HWS queue while stopped\n");
goto out_unlock;
}
retval = mqd_mgr->destroy_mqd(mqd_mgr, q->mqd,
(dqm->dev->kfd->cwsr_enabled ?
KFD_PREEMPT_TYPE_WAVEFRONT_SAVE :
KFD_PREEMPT_TYPE_WAVEFRONT_DRAIN),
KFD_UNMAP_LATENCY_MS, q->pipe, q->queue);
if (retval) {
pr_err("destroy mqd failed\n");
goto out_unlock;
}
}
mqd_mgr->update_mqd(mqd_mgr, q->mqd, &q->properties, minfo);
/*
* check active state vs. the previous state and modify
* counter accordingly. map_queues_cpsch uses the
* dqm->active_queue_count to determine whether a new runlist must be
* uploaded.
*/
if (q->properties.is_active && !prev_active) {
increment_queue_count(dqm, &pdd->qpd, q);
} else if (!q->properties.is_active && prev_active) {
decrement_queue_count(dqm, &pdd->qpd, q);
} else if (q->gws && !q->properties.is_gws) {
if (q->properties.is_active) {
dqm->gws_queue_count++;
pdd->qpd.mapped_gws_queue = true;
}
q->properties.is_gws = true;
} else if (!q->gws && q->properties.is_gws) {
if (q->properties.is_active) {
dqm->gws_queue_count--;
pdd->qpd.mapped_gws_queue = false;
}
q->properties.is_gws = false;
}
if (dqm->sched_policy != KFD_SCHED_POLICY_NO_HWS) {
if (!dqm->dev->kfd->shared_resources.enable_mes)
retval = map_queues_cpsch(dqm);
else if (q->properties.is_active)
retval = add_queue_mes(dqm, q, &pdd->qpd);
} else if (q->properties.is_active &&
(q->properties.type == KFD_QUEUE_TYPE_COMPUTE ||
q->properties.type == KFD_QUEUE_TYPE_SDMA ||
q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI)) {
if (WARN(q->process->mm != current->mm,
"should only run in user thread"))
retval = -EFAULT;
else
retval = mqd_mgr->load_mqd(mqd_mgr, q->mqd,
q->pipe, q->queue,
&q->properties, current->mm);
}
out_unlock:
dqm_unlock(dqm);
return retval;
}
/* suspend_single_queue does not lock the dqm like the
* evict_process_queues_cpsch or evict_process_queues_nocpsch. You should
* lock the dqm before calling, and unlock after calling.
*
* The reason we don't lock the dqm is because this function may be
* called on multiple queues in a loop, so rather than locking/unlocking
* multiple times, we will just keep the dqm locked for all of the calls.
*/
static int suspend_single_queue(struct device_queue_manager *dqm,
struct kfd_process_device *pdd,
struct queue *q)
{
bool is_new;
if (q->properties.is_suspended)
return 0;
pr_debug("Suspending PASID %u queue [%i]\n",
pdd->process->pasid,
q->properties.queue_id);
is_new = q->properties.exception_status & KFD_EC_MASK(EC_QUEUE_NEW);
if (is_new || q->properties.is_being_destroyed) {
pr_debug("Suspend: skip %s queue id %i\n",
is_new ? "new" : "destroyed",
q->properties.queue_id);
return -EBUSY;
}
q->properties.is_suspended = true;
if (q->properties.is_active) {
if (dqm->dev->kfd->shared_resources.enable_mes) {
int r = remove_queue_mes(dqm, q, &pdd->qpd);
if (r)
return r;
}
decrement_queue_count(dqm, &pdd->qpd, q);
q->properties.is_active = false;
}
return 0;
}
/* resume_single_queue does not lock the dqm like the functions
* restore_process_queues_cpsch or restore_process_queues_nocpsch. You should
* lock the dqm before calling, and unlock after calling.
*
* The reason we don't lock the dqm is because this function may be
* called on multiple queues in a loop, so rather than locking/unlocking
* multiple times, we will just keep the dqm locked for all of the calls.
*/
static int resume_single_queue(struct device_queue_manager *dqm,
struct qcm_process_device *qpd,
struct queue *q)
{
struct kfd_process_device *pdd;
if (!q->properties.is_suspended)
return 0;
pdd = qpd_to_pdd(qpd);
pr_debug("Restoring from suspend PASID %u queue [%i]\n",
pdd->process->pasid,
q->properties.queue_id);
q->properties.is_suspended = false;
if (QUEUE_IS_ACTIVE(q->properties)) {
if (dqm->dev->kfd->shared_resources.enable_mes) {
int r = add_queue_mes(dqm, q, &pdd->qpd);
if (r)
return r;
}
q->properties.is_active = true;
increment_queue_count(dqm, qpd, q);
}
return 0;
}
static int evict_process_queues_nocpsch(struct device_queue_manager *dqm,
struct qcm_process_device *qpd)
{
struct queue *q;
struct mqd_manager *mqd_mgr;
struct kfd_process_device *pdd;
int retval, ret = 0;
dqm_lock(dqm);
if (qpd->evicted++ > 0) /* already evicted, do nothing */
goto out;
pdd = qpd_to_pdd(qpd);
pr_debug_ratelimited("Evicting PASID 0x%x queues\n",
pdd->process->pasid);
pdd->last_evict_timestamp = get_jiffies_64();
/* Mark all queues as evicted. Deactivate all active queues on
* the qpd.
*/
list_for_each_entry(q, &qpd->queues_list, list) {
q->properties.is_evicted = true;
if (!q->properties.is_active)
continue;
mqd_mgr = dqm->mqd_mgrs[get_mqd_type_from_queue_type(
q->properties.type)];
q->properties.is_active = false;
decrement_queue_count(dqm, qpd, q);
if (WARN_ONCE(!dqm->sched_running, "Evict when stopped\n"))
continue;
retval = mqd_mgr->destroy_mqd(mqd_mgr, q->mqd,
(dqm->dev->kfd->cwsr_enabled ?
KFD_PREEMPT_TYPE_WAVEFRONT_SAVE :
KFD_PREEMPT_TYPE_WAVEFRONT_DRAIN),
KFD_UNMAP_LATENCY_MS, q->pipe, q->queue);
if (retval && !ret)
/* Return the first error, but keep going to
* maintain a consistent eviction state
*/
ret = retval;
}
out:
dqm_unlock(dqm);
return ret;
}
static int evict_process_queues_cpsch(struct device_queue_manager *dqm,
struct qcm_process_device *qpd)
{
struct queue *q;
struct kfd_process_device *pdd;
int retval = 0;
dqm_lock(dqm);
if (qpd->evicted++ > 0) /* already evicted, do nothing */
goto out;
pdd = qpd_to_pdd(qpd);
/* The debugger creates processes that temporarily have not acquired
* all VMs for all devices and has no VMs itself.
* Skip queue eviction on process eviction.
*/
if (!pdd->drm_priv)
goto out;
pr_debug_ratelimited("Evicting PASID 0x%x queues\n",
pdd->process->pasid);
/* Mark all queues as evicted. Deactivate all active queues on
* the qpd.
*/
list_for_each_entry(q, &qpd->queues_list, list) {
q->properties.is_evicted = true;
if (!q->properties.is_active)
continue;
q->properties.is_active = false;
decrement_queue_count(dqm, qpd, q);
if (dqm->dev->kfd->shared_resources.enable_mes) {
retval = remove_queue_mes(dqm, q, qpd);
if (retval) {
pr_err("Failed to evict queue %d\n",
q->properties.queue_id);
goto out;
}
}
}
pdd->last_evict_timestamp = get_jiffies_64();
if (!dqm->dev->kfd->shared_resources.enable_mes)
retval = execute_queues_cpsch(dqm,
qpd->is_debug ?
KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES :
KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0,
USE_DEFAULT_GRACE_PERIOD);
out:
dqm_unlock(dqm);
return retval;
}
static int restore_process_queues_nocpsch(struct device_queue_manager *dqm,
struct qcm_process_device *qpd)
{
struct mm_struct *mm = NULL;
struct queue *q;
struct mqd_manager *mqd_mgr;
struct kfd_process_device *pdd;
uint64_t pd_base;
uint64_t eviction_duration;
int retval, ret = 0;
pdd = qpd_to_pdd(qpd);
/* Retrieve PD base */
pd_base = amdgpu_amdkfd_gpuvm_get_process_page_dir(pdd->drm_priv);
dqm_lock(dqm);
if (WARN_ON_ONCE(!qpd->evicted)) /* already restored, do nothing */
goto out;
if (qpd->evicted > 1) { /* ref count still > 0, decrement & quit */
qpd->evicted--;
goto out;
}
pr_debug_ratelimited("Restoring PASID 0x%x queues\n",
pdd->process->pasid);
/* Update PD Base in QPD */
qpd->page_table_base = pd_base;
pr_debug("Updated PD address to 0x%llx\n", pd_base);
if (!list_empty(&qpd->queues_list)) {
dqm->dev->kfd2kgd->set_vm_context_page_table_base(
dqm->dev->adev,
qpd->vmid,
qpd->page_table_base);
kfd_flush_tlb(pdd, TLB_FLUSH_LEGACY);
}
/* Take a safe reference to the mm_struct, which may otherwise
* disappear even while the kfd_process is still referenced.
*/
mm = get_task_mm(pdd->process->lead_thread);
if (!mm) {
ret = -EFAULT;
goto out;
}
/* Remove the eviction flags. Activate queues that are not
* inactive for other reasons.
*/
list_for_each_entry(q, &qpd->queues_list, list) {
q->properties.is_evicted = false;
if (!QUEUE_IS_ACTIVE(q->properties))
continue;
mqd_mgr = dqm->mqd_mgrs[get_mqd_type_from_queue_type(
q->properties.type)];
q->properties.is_active = true;
increment_queue_count(dqm, qpd, q);
if (WARN_ONCE(!dqm->sched_running, "Restore when stopped\n"))
continue;
retval = mqd_mgr->load_mqd(mqd_mgr, q->mqd, q->pipe,
q->queue, &q->properties, mm);
if (retval && !ret)
/* Return the first error, but keep going to
* maintain a consistent eviction state
*/
ret = retval;
}
qpd->evicted = 0;
eviction_duration = get_jiffies_64() - pdd->last_evict_timestamp;
atomic64_add(eviction_duration, &pdd->evict_duration_counter);
out:
if (mm)
mmput(mm);
dqm_unlock(dqm);
return ret;
}
static int restore_process_queues_cpsch(struct device_queue_manager *dqm,
struct qcm_process_device *qpd)
{
struct queue *q;
struct kfd_process_device *pdd;
uint64_t eviction_duration;
int retval = 0;
pdd = qpd_to_pdd(qpd);
dqm_lock(dqm);
if (WARN_ON_ONCE(!qpd->evicted)) /* already restored, do nothing */
goto out;
if (qpd->evicted > 1) { /* ref count still > 0, decrement & quit */
qpd->evicted--;
goto out;
}
/* The debugger creates processes that temporarily have not acquired
* all VMs for all devices and has no VMs itself.
* Skip queue restore on process restore.
*/
if (!pdd->drm_priv)
goto vm_not_acquired;
pr_debug_ratelimited("Restoring PASID 0x%x queues\n",
pdd->process->pasid);
/* Update PD Base in QPD */
qpd->page_table_base = amdgpu_amdkfd_gpuvm_get_process_page_dir(pdd->drm_priv);
pr_debug("Updated PD address to 0x%llx\n", qpd->page_table_base);
/* activate all active queues on the qpd */
list_for_each_entry(q, &qpd->queues_list, list) {
q->properties.is_evicted = false;
if (!QUEUE_IS_ACTIVE(q->properties))
continue;
q->properties.is_active = true;
increment_queue_count(dqm, &pdd->qpd, q);
if (dqm->dev->kfd->shared_resources.enable_mes) {
retval = add_queue_mes(dqm, q, qpd);
if (retval) {
pr_err("Failed to restore queue %d\n",
q->properties.queue_id);
goto out;
}
}
}
if (!dqm->dev->kfd->shared_resources.enable_mes)
retval = execute_queues_cpsch(dqm,
KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0, USE_DEFAULT_GRACE_PERIOD);
eviction_duration = get_jiffies_64() - pdd->last_evict_timestamp;
atomic64_add(eviction_duration, &pdd->evict_duration_counter);
vm_not_acquired:
qpd->evicted = 0;
out:
dqm_unlock(dqm);
return retval;
}
static int register_process(struct device_queue_manager *dqm,
struct qcm_process_device *qpd)
{
struct device_process_node *n;
struct kfd_process_device *pdd;
uint64_t pd_base;
int retval;
n = kzalloc(sizeof(*n), GFP_KERNEL);
if (!n)
return -ENOMEM;
n->qpd = qpd;
pdd = qpd_to_pdd(qpd);
/* Retrieve PD base */
pd_base = amdgpu_amdkfd_gpuvm_get_process_page_dir(pdd->drm_priv);
dqm_lock(dqm);
list_add(&n->list, &dqm->queues);
/* Update PD Base in QPD */
qpd->page_table_base = pd_base;
pr_debug("Updated PD address to 0x%llx\n", pd_base);
retval = dqm->asic_ops.update_qpd(dqm, qpd);
dqm->processes_count++;
dqm_unlock(dqm);
/* Outside the DQM lock because under the DQM lock we can't do
* reclaim or take other locks that others hold while reclaiming.
*/
kfd_inc_compute_active(dqm->dev);
return retval;
}
static int unregister_process(struct device_queue_manager *dqm,
struct qcm_process_device *qpd)
{
int retval;
struct device_process_node *cur, *next;
pr_debug("qpd->queues_list is %s\n",
list_empty(&qpd->queues_list) ? "empty" : "not empty");
retval = 0;
dqm_lock(dqm);
list_for_each_entry_safe(cur, next, &dqm->queues, list) {
if (qpd == cur->qpd) {
list_del(&cur->list);
kfree(cur);
dqm->processes_count--;
goto out;
}
}
/* qpd not found in dqm list */
retval = 1;
out:
dqm_unlock(dqm);
/* Outside the DQM lock because under the DQM lock we can't do
* reclaim or take other locks that others hold while reclaiming.
*/
if (!retval)
kfd_dec_compute_active(dqm->dev);
return retval;
}
static int
set_pasid_vmid_mapping(struct device_queue_manager *dqm, u32 pasid,
unsigned int vmid)
{
uint32_t xcc_mask = dqm->dev->xcc_mask;
int xcc_id, ret;
for_each_inst(xcc_id, xcc_mask) {
ret = dqm->dev->kfd2kgd->set_pasid_vmid_mapping(
dqm->dev->adev, pasid, vmid, xcc_id);
if (ret)
break;
}
return ret;
}
static void init_interrupts(struct device_queue_manager *dqm)
{
uint32_t xcc_mask = dqm->dev->xcc_mask;
unsigned int i, xcc_id;
for_each_inst(xcc_id, xcc_mask) {
for (i = 0 ; i < get_pipes_per_mec(dqm) ; i++) {
if (is_pipe_enabled(dqm, 0, i)) {
dqm->dev->kfd2kgd->init_interrupts(
dqm->dev->adev, i, xcc_id);
}
}
}
}
static int initialize_nocpsch(struct device_queue_manager *dqm)
{
int pipe, queue;
pr_debug("num of pipes: %d\n", get_pipes_per_mec(dqm));
dqm->allocated_queues = kcalloc(get_pipes_per_mec(dqm),
sizeof(unsigned int), GFP_KERNEL);
if (!dqm->allocated_queues)
return -ENOMEM;
mutex_init(&dqm->lock_hidden);
INIT_LIST_HEAD(&dqm->queues);
dqm->active_queue_count = dqm->next_pipe_to_allocate = 0;
dqm->active_cp_queue_count = 0;
dqm->gws_queue_count = 0;
for (pipe = 0; pipe < get_pipes_per_mec(dqm); pipe++) {
int pipe_offset = pipe * get_queues_per_pipe(dqm);
for (queue = 0; queue < get_queues_per_pipe(dqm); queue++)
if (test_bit(pipe_offset + queue,
dqm->dev->kfd->shared_resources.cp_queue_bitmap))
dqm->allocated_queues[pipe] |= 1 << queue;
}
memset(dqm->vmid_pasid, 0, sizeof(dqm->vmid_pasid));
init_sdma_bitmaps(dqm);
return 0;
}
static void uninitialize(struct device_queue_manager *dqm)
{
int i;
WARN_ON(dqm->active_queue_count > 0 || dqm->processes_count > 0);
kfree(dqm->allocated_queues);
for (i = 0 ; i < KFD_MQD_TYPE_MAX ; i++)
kfree(dqm->mqd_mgrs[i]);
mutex_destroy(&dqm->lock_hidden);
}
static int start_nocpsch(struct device_queue_manager *dqm)
{
int r = 0;
pr_info("SW scheduler is used");
init_interrupts(dqm);
if (dqm->dev->adev->asic_type == CHIP_HAWAII)
r = pm_init(&dqm->packet_mgr, dqm);
if (!r)
dqm->sched_running = true;
return r;
}
static int stop_nocpsch(struct device_queue_manager *dqm)
{
dqm_lock(dqm);
if (!dqm->sched_running) {
dqm_unlock(dqm);
return 0;
}
if (dqm->dev->adev->asic_type == CHIP_HAWAII)
pm_uninit(&dqm->packet_mgr, false);
dqm->sched_running = false;
dqm_unlock(dqm);
return 0;
}
static void pre_reset(struct device_queue_manager *dqm)
{
dqm_lock(dqm);
dqm->is_resetting = true;
dqm_unlock(dqm);
}
static int allocate_sdma_queue(struct device_queue_manager *dqm,
struct queue *q, const uint32_t *restore_sdma_id)
{
int bit;
if (q->properties.type == KFD_QUEUE_TYPE_SDMA) {
if (bitmap_empty(dqm->sdma_bitmap, KFD_MAX_SDMA_QUEUES)) {
pr_err("No more SDMA queue to allocate\n");
return -ENOMEM;
}
if (restore_sdma_id) {
/* Re-use existing sdma_id */
if (!test_bit(*restore_sdma_id, dqm->sdma_bitmap)) {
pr_err("SDMA queue already in use\n");
return -EBUSY;
}
clear_bit(*restore_sdma_id, dqm->sdma_bitmap);
q->sdma_id = *restore_sdma_id;
} else {
/* Find first available sdma_id */
bit = find_first_bit(dqm->sdma_bitmap,
get_num_sdma_queues(dqm));
clear_bit(bit, dqm->sdma_bitmap);
q->sdma_id = bit;
}
q->properties.sdma_engine_id =
q->sdma_id % kfd_get_num_sdma_engines(dqm->dev);
q->properties.sdma_queue_id = q->sdma_id /
kfd_get_num_sdma_engines(dqm->dev);
} else if (q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI) {
if (bitmap_empty(dqm->xgmi_sdma_bitmap, KFD_MAX_SDMA_QUEUES)) {
pr_err("No more XGMI SDMA queue to allocate\n");
return -ENOMEM;
}
if (restore_sdma_id) {
/* Re-use existing sdma_id */
if (!test_bit(*restore_sdma_id, dqm->xgmi_sdma_bitmap)) {
pr_err("SDMA queue already in use\n");
return -EBUSY;
}
clear_bit(*restore_sdma_id, dqm->xgmi_sdma_bitmap);
q->sdma_id = *restore_sdma_id;
} else {
bit = find_first_bit(dqm->xgmi_sdma_bitmap,
get_num_xgmi_sdma_queues(dqm));
clear_bit(bit, dqm->xgmi_sdma_bitmap);
q->sdma_id = bit;
}
/* sdma_engine_id is sdma id including
* both PCIe-optimized SDMAs and XGMI-
* optimized SDMAs. The calculation below
* assumes the first N engines are always
* PCIe-optimized ones
*/
q->properties.sdma_engine_id =
kfd_get_num_sdma_engines(dqm->dev) +
q->sdma_id % kfd_get_num_xgmi_sdma_engines(dqm->dev);
q->properties.sdma_queue_id = q->sdma_id /
kfd_get_num_xgmi_sdma_engines(dqm->dev);
}
pr_debug("SDMA engine id: %d\n", q->properties.sdma_engine_id);
pr_debug("SDMA queue id: %d\n", q->properties.sdma_queue_id);
return 0;
}
static void deallocate_sdma_queue(struct device_queue_manager *dqm,
struct queue *q)
{
if (q->properties.type == KFD_QUEUE_TYPE_SDMA) {
if (q->sdma_id >= get_num_sdma_queues(dqm))
return;
set_bit(q->sdma_id, dqm->sdma_bitmap);
} else if (q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI) {
if (q->sdma_id >= get_num_xgmi_sdma_queues(dqm))
return;
set_bit(q->sdma_id, dqm->xgmi_sdma_bitmap);
}
}
/*
* Device Queue Manager implementation for cp scheduler
*/
static int set_sched_resources(struct device_queue_manager *dqm)
{
int i, mec;
struct scheduling_resources res;
res.vmid_mask = dqm->dev->compute_vmid_bitmap;
res.queue_mask = 0;
for (i = 0; i < KGD_MAX_QUEUES; ++i) {
mec = (i / dqm->dev->kfd->shared_resources.num_queue_per_pipe)
/ dqm->dev->kfd->shared_resources.num_pipe_per_mec;
if (!test_bit(i, dqm->dev->kfd->shared_resources.cp_queue_bitmap))
continue;
/* only acquire queues from the first MEC */
if (mec > 0)
continue;
/* This situation may be hit in the future if a new HW
* generation exposes more than 64 queues. If so, the
* definition of res.queue_mask needs updating
*/
if (WARN_ON(i >= (sizeof(res.queue_mask)*8))) {
pr_err("Invalid queue enabled by amdgpu: %d\n", i);
break;
}
res.queue_mask |= 1ull
<< amdgpu_queue_mask_bit_to_set_resource_bit(
dqm->dev->adev, i);
}
res.gws_mask = ~0ull;
res.oac_mask = res.gds_heap_base = res.gds_heap_size = 0;
pr_debug("Scheduling resources:\n"
"vmid mask: 0x%8X\n"
"queue mask: 0x%8llX\n",
res.vmid_mask, res.queue_mask);
return pm_send_set_resources(&dqm->packet_mgr, &res);
}
static int initialize_cpsch(struct device_queue_manager *dqm)
{
pr_debug("num of pipes: %d\n", get_pipes_per_mec(dqm));
mutex_init(&dqm->lock_hidden);
INIT_LIST_HEAD(&dqm->queues);
dqm->active_queue_count = dqm->processes_count = 0;
dqm->active_cp_queue_count = 0;
dqm->gws_queue_count = 0;
dqm->active_runlist = false;
INIT_WORK(&dqm->hw_exception_work, kfd_process_hw_exception);
dqm->trap_debug_vmid = 0;
init_sdma_bitmaps(dqm);
if (dqm->dev->kfd2kgd->get_iq_wait_times)
dqm->dev->kfd2kgd->get_iq_wait_times(dqm->dev->adev,
&dqm->wait_times,
ffs(dqm->dev->xcc_mask) - 1);
return 0;
}
static int start_cpsch(struct device_queue_manager *dqm)
{
int retval;
retval = 0;
dqm_lock(dqm);
if (!dqm->dev->kfd->shared_resources.enable_mes) {
retval = pm_init(&dqm->packet_mgr, dqm);
if (retval)
goto fail_packet_manager_init;
retval = set_sched_resources(dqm);
if (retval)
goto fail_set_sched_resources;
}
pr_debug("Allocating fence memory\n");
/* allocate fence memory on the gart */
retval = kfd_gtt_sa_allocate(dqm->dev, sizeof(*dqm->fence_addr),
&dqm->fence_mem);
if (retval)
goto fail_allocate_vidmem;
dqm->fence_addr = (uint64_t *)dqm->fence_mem->cpu_ptr;
dqm->fence_gpu_addr = dqm->fence_mem->gpu_addr;
init_interrupts(dqm);
/* clear hang status when driver try to start the hw scheduler */
dqm->is_hws_hang = false;
dqm->is_resetting = false;
dqm->sched_running = true;
if (!dqm->dev->kfd->shared_resources.enable_mes)
execute_queues_cpsch(dqm, KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0, USE_DEFAULT_GRACE_PERIOD);
/* Set CWSR grace period to 1x1000 cycle for GFX9.4.3 APU */
if (amdgpu_emu_mode == 0 && dqm->dev->adev->gmc.is_app_apu &&
(KFD_GC_VERSION(dqm->dev) == IP_VERSION(9, 4, 3))) {
uint32_t reg_offset = 0;
uint32_t grace_period = 1;
retval = pm_update_grace_period(&dqm->packet_mgr,
grace_period);
if (retval)
pr_err("Setting grace timeout failed\n");
else if (dqm->dev->kfd2kgd->build_grace_period_packet_info)
/* Update dqm->wait_times maintained in software */
dqm->dev->kfd2kgd->build_grace_period_packet_info(
dqm->dev->adev, dqm->wait_times,
grace_period, ®_offset,
&dqm->wait_times);
}
dqm_unlock(dqm);
return 0;
fail_allocate_vidmem:
fail_set_sched_resources:
if (!dqm->dev->kfd->shared_resources.enable_mes)
pm_uninit(&dqm->packet_mgr, false);
fail_packet_manager_init:
dqm_unlock(dqm);
return retval;
}
static int stop_cpsch(struct device_queue_manager *dqm)
{
bool hanging;
dqm_lock(dqm);
if (!dqm->sched_running) {
dqm_unlock(dqm);
return 0;
}
if (!dqm->is_hws_hang) {
if (!dqm->dev->kfd->shared_resources.enable_mes)
unmap_queues_cpsch(dqm, KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES, 0, USE_DEFAULT_GRACE_PERIOD, false);
else
remove_all_queues_mes(dqm);
}
hanging = dqm->is_hws_hang || dqm->is_resetting;
dqm->sched_running = false;
if (!dqm->dev->kfd->shared_resources.enable_mes)
pm_release_ib(&dqm->packet_mgr);
kfd_gtt_sa_free(dqm->dev, dqm->fence_mem);
if (!dqm->dev->kfd->shared_resources.enable_mes)
pm_uninit(&dqm->packet_mgr, hanging);
dqm_unlock(dqm);
return 0;
}
static int create_kernel_queue_cpsch(struct device_queue_manager *dqm,
struct kernel_queue *kq,
struct qcm_process_device *qpd)
{
dqm_lock(dqm);
if (dqm->total_queue_count >= max_num_of_queues_per_device) {
pr_warn("Can't create new kernel queue because %d queues were already created\n",
dqm->total_queue_count);
dqm_unlock(dqm);
return -EPERM;
}
/*
* Unconditionally increment this counter, regardless of the queue's
* type or whether the queue is active.
*/
dqm->total_queue_count++;
pr_debug("Total of %d queues are accountable so far\n",
dqm->total_queue_count);
list_add(&kq->list, &qpd->priv_queue_list);
increment_queue_count(dqm, qpd, kq->queue);
qpd->is_debug = true;
execute_queues_cpsch(dqm, KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0,
USE_DEFAULT_GRACE_PERIOD);
dqm_unlock(dqm);
return 0;
}
static void destroy_kernel_queue_cpsch(struct device_queue_manager *dqm,
struct kernel_queue *kq,
struct qcm_process_device *qpd)
{
dqm_lock(dqm);
list_del(&kq->list);
decrement_queue_count(dqm, qpd, kq->queue);
qpd->is_debug = false;
execute_queues_cpsch(dqm, KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES, 0,
USE_DEFAULT_GRACE_PERIOD);
/*
* Unconditionally decrement this counter, regardless of the queue's
* type.
*/
dqm->total_queue_count--;
pr_debug("Total of %d queues are accountable so far\n",
dqm->total_queue_count);
dqm_unlock(dqm);
}
static int create_queue_cpsch(struct device_queue_manager *dqm, struct queue *q,
struct qcm_process_device *qpd,
const struct kfd_criu_queue_priv_data *qd,
const void *restore_mqd, const void *restore_ctl_stack)
{
int retval;
struct mqd_manager *mqd_mgr;
if (dqm->total_queue_count >= max_num_of_queues_per_device) {
pr_warn("Can't create new usermode queue because %d queues were already created\n",
dqm->total_queue_count);
retval = -EPERM;
goto out;
}
if (q->properties.type == KFD_QUEUE_TYPE_SDMA ||
q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI) {
dqm_lock(dqm);
retval = allocate_sdma_queue(dqm, q, qd ? &qd->sdma_id : NULL);
dqm_unlock(dqm);
if (retval)
goto out;
}
retval = allocate_doorbell(qpd, q, qd ? &qd->doorbell_id : NULL);
if (retval)
goto out_deallocate_sdma_queue;
mqd_mgr = dqm->mqd_mgrs[get_mqd_type_from_queue_type(
q->properties.type)];
if (q->properties.type == KFD_QUEUE_TYPE_SDMA ||
q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI)
dqm->asic_ops.init_sdma_vm(dqm, q, qpd);
q->properties.tba_addr = qpd->tba_addr;
q->properties.tma_addr = qpd->tma_addr;
q->mqd_mem_obj = mqd_mgr->allocate_mqd(mqd_mgr->dev, &q->properties);
if (!q->mqd_mem_obj) {
retval = -ENOMEM;
goto out_deallocate_doorbell;
}
dqm_lock(dqm);
/*
* Eviction state logic: mark all queues as evicted, even ones
* not currently active. Restoring inactive queues later only
* updates the is_evicted flag but is a no-op otherwise.
*/
q->properties.is_evicted = !!qpd->evicted;
q->properties.is_dbg_wa = qpd->pqm->process->debug_trap_enabled &&
kfd_dbg_has_cwsr_workaround(q->device);
if (qd)
mqd_mgr->restore_mqd(mqd_mgr, &q->mqd, q->mqd_mem_obj, &q->gart_mqd_addr,
&q->properties, restore_mqd, restore_ctl_stack,
qd->ctl_stack_size);
else
mqd_mgr->init_mqd(mqd_mgr, &q->mqd, q->mqd_mem_obj,
&q->gart_mqd_addr, &q->properties);
list_add(&q->list, &qpd->queues_list);
qpd->queue_count++;
if (q->properties.is_active) {
increment_queue_count(dqm, qpd, q);
if (!dqm->dev->kfd->shared_resources.enable_mes)
retval = execute_queues_cpsch(dqm,
KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0, USE_DEFAULT_GRACE_PERIOD);
else
retval = add_queue_mes(dqm, q, qpd);
if (retval)
goto cleanup_queue;
}
/*
* Unconditionally increment this counter, regardless of the queue's
* type or whether the queue is active.
*/
dqm->total_queue_count++;
pr_debug("Total of %d queues are accountable so far\n",
dqm->total_queue_count);
dqm_unlock(dqm);
return retval;
cleanup_queue:
qpd->queue_count--;
list_del(&q->list);
if (q->properties.is_active)
decrement_queue_count(dqm, qpd, q);
mqd_mgr->free_mqd(mqd_mgr, q->mqd, q->mqd_mem_obj);
dqm_unlock(dqm);
out_deallocate_doorbell:
deallocate_doorbell(qpd, q);
out_deallocate_sdma_queue:
if (q->properties.type == KFD_QUEUE_TYPE_SDMA ||
q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI) {
dqm_lock(dqm);
deallocate_sdma_queue(dqm, q);
dqm_unlock(dqm);
}
out:
return retval;
}
int amdkfd_fence_wait_timeout(uint64_t *fence_addr,
uint64_t fence_value,
unsigned int timeout_ms)
{
unsigned long end_jiffies = msecs_to_jiffies(timeout_ms) + jiffies;
while (*fence_addr != fence_value) {
if (time_after(jiffies, end_jiffies)) {
pr_err("qcm fence wait loop timeout expired\n");
/* In HWS case, this is used to halt the driver thread
* in order not to mess up CP states before doing
* scandumps for FW debugging.
*/
while (halt_if_hws_hang)
schedule();
return -ETIME;
}
schedule();
}
return 0;
}
/* dqm->lock mutex has to be locked before calling this function */
static int map_queues_cpsch(struct device_queue_manager *dqm)
{
int retval;
if (!dqm->sched_running)
return 0;
if (dqm->active_queue_count <= 0 || dqm->processes_count <= 0)
return 0;
if (dqm->active_runlist)
return 0;
retval = pm_send_runlist(&dqm->packet_mgr, &dqm->queues);
pr_debug("%s sent runlist\n", __func__);
if (retval) {
pr_err("failed to execute runlist\n");
return retval;
}
dqm->active_runlist = true;
return retval;
}
/* dqm->lock mutex has to be locked before calling this function */
static int unmap_queues_cpsch(struct device_queue_manager *dqm,
enum kfd_unmap_queues_filter filter,
uint32_t filter_param,
uint32_t grace_period,
bool reset)
{
int retval = 0;
struct mqd_manager *mqd_mgr;
if (!dqm->sched_running)
return 0;
if (dqm->is_hws_hang || dqm->is_resetting)
return -EIO;
if (!dqm->active_runlist)
return retval;
if (grace_period != USE_DEFAULT_GRACE_PERIOD) {
retval = pm_update_grace_period(&dqm->packet_mgr, grace_period);
if (retval)
return retval;
}
retval = pm_send_unmap_queue(&dqm->packet_mgr, filter, filter_param, reset);
if (retval)
return retval;
*dqm->fence_addr = KFD_FENCE_INIT;
pm_send_query_status(&dqm->packet_mgr, dqm->fence_gpu_addr,
KFD_FENCE_COMPLETED);
/* should be timed out */
retval = amdkfd_fence_wait_timeout(dqm->fence_addr, KFD_FENCE_COMPLETED,
queue_preemption_timeout_ms);
if (retval) {
pr_err("The cp might be in an unrecoverable state due to an unsuccessful queues preemption\n");
kfd_hws_hang(dqm);
return retval;
}
/* In the current MEC firmware implementation, if compute queue
* doesn't response to the preemption request in time, HIQ will
* abandon the unmap request without returning any timeout error
* to driver. Instead, MEC firmware will log the doorbell of the
* unresponding compute queue to HIQ.MQD.queue_doorbell_id fields.
* To make sure the queue unmap was successful, driver need to
* check those fields
*/
mqd_mgr = dqm->mqd_mgrs[KFD_MQD_TYPE_HIQ];
if (mqd_mgr->read_doorbell_id(dqm->packet_mgr.priv_queue->queue->mqd)) {
pr_err("HIQ MQD's queue_doorbell_id0 is not 0, Queue preemption time out\n");
while (halt_if_hws_hang)
schedule();
return -ETIME;
}
/* We need to reset the grace period value for this device */
if (grace_period != USE_DEFAULT_GRACE_PERIOD) {
if (pm_update_grace_period(&dqm->packet_mgr,
USE_DEFAULT_GRACE_PERIOD))
pr_err("Failed to reset grace period\n");
}
pm_release_ib(&dqm->packet_mgr);
dqm->active_runlist = false;
return retval;
}
/* only for compute queue */
static int reset_queues_cpsch(struct device_queue_manager *dqm,
uint16_t pasid)
{
int retval;
dqm_lock(dqm);
retval = unmap_queues_cpsch(dqm, KFD_UNMAP_QUEUES_FILTER_BY_PASID,
pasid, USE_DEFAULT_GRACE_PERIOD, true);
dqm_unlock(dqm);
return retval;
}
/* dqm->lock mutex has to be locked before calling this function */
static int execute_queues_cpsch(struct device_queue_manager *dqm,
enum kfd_unmap_queues_filter filter,
uint32_t filter_param,
uint32_t grace_period)
{
int retval;
if (dqm->is_hws_hang)
return -EIO;
retval = unmap_queues_cpsch(dqm, filter, filter_param, grace_period, false);
if (retval)
return retval;
return map_queues_cpsch(dqm);
}
static int wait_on_destroy_queue(struct device_queue_manager *dqm,
struct queue *q)
{
struct kfd_process_device *pdd = kfd_get_process_device_data(q->device,
q->process);
int ret = 0;
if (pdd->qpd.is_debug)
return ret;
q->properties.is_being_destroyed = true;
if (pdd->process->debug_trap_enabled && q->properties.is_suspended) {
dqm_unlock(dqm);
mutex_unlock(&q->process->mutex);
ret = wait_event_interruptible(dqm->destroy_wait,
!q->properties.is_suspended);
mutex_lock(&q->process->mutex);
dqm_lock(dqm);
}
return ret;
}
static int destroy_queue_cpsch(struct device_queue_manager *dqm,
struct qcm_process_device *qpd,
struct queue *q)
{
int retval;
struct mqd_manager *mqd_mgr;
uint64_t sdma_val = 0;
struct kfd_process_device *pdd = qpd_to_pdd(qpd);
/* Get the SDMA queue stats */
if ((q->properties.type == KFD_QUEUE_TYPE_SDMA) ||
(q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI)) {
retval = read_sdma_queue_counter((uint64_t __user *)q->properties.read_ptr,
&sdma_val);
if (retval)
pr_err("Failed to read SDMA queue counter for queue: %d\n",
q->properties.queue_id);
}
/* remove queue from list to prevent rescheduling after preemption */
dqm_lock(dqm);
retval = wait_on_destroy_queue(dqm, q);
if (retval) {
dqm_unlock(dqm);
return retval;
}
if (qpd->is_debug) {
/*
* error, currently we do not allow to destroy a queue
* of a currently debugged process
*/
retval = -EBUSY;
goto failed_try_destroy_debugged_queue;
}
mqd_mgr = dqm->mqd_mgrs[get_mqd_type_from_queue_type(
q->properties.type)];
deallocate_doorbell(qpd, q);
if ((q->properties.type == KFD_QUEUE_TYPE_SDMA) ||
(q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI)) {
deallocate_sdma_queue(dqm, q);
pdd->sdma_past_activity_counter += sdma_val;
}
list_del(&q->list);
qpd->queue_count--;
if (q->properties.is_active) {
decrement_queue_count(dqm, qpd, q);
if (!dqm->dev->kfd->shared_resources.enable_mes) {
retval = execute_queues_cpsch(dqm,
KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0,
USE_DEFAULT_GRACE_PERIOD);
if (retval == -ETIME)
qpd->reset_wavefronts = true;
} else {
retval = remove_queue_mes(dqm, q, qpd);
}
}
/*
* Unconditionally decrement this counter, regardless of the queue's
* type
*/
dqm->total_queue_count--;
pr_debug("Total of %d queues are accountable so far\n",
dqm->total_queue_count);
dqm_unlock(dqm);
/*
* Do free_mqd and raise delete event after dqm_unlock(dqm) to avoid
* circular locking
*/
kfd_dbg_ev_raise(KFD_EC_MASK(EC_DEVICE_QUEUE_DELETE),
qpd->pqm->process, q->device,
-1, false, NULL, 0);
mqd_mgr->free_mqd(mqd_mgr, q->mqd, q->mqd_mem_obj);
return retval;
failed_try_destroy_debugged_queue:
dqm_unlock(dqm);
return retval;
}
/*
* Low bits must be 0000/FFFF as required by HW, high bits must be 0 to
* stay in user mode.
*/
#define APE1_FIXED_BITS_MASK 0xFFFF80000000FFFFULL
/* APE1 limit is inclusive and 64K aligned. */
#define APE1_LIMIT_ALIGNMENT 0xFFFF
static bool set_cache_memory_policy(struct device_queue_manager *dqm,
struct qcm_process_device *qpd,
enum cache_policy default_policy,
enum cache_policy alternate_policy,
void __user *alternate_aperture_base,
uint64_t alternate_aperture_size)
{
bool retval = true;
if (!dqm->asic_ops.set_cache_memory_policy)
return retval;
dqm_lock(dqm);
if (alternate_aperture_size == 0) {
/* base > limit disables APE1 */
qpd->sh_mem_ape1_base = 1;
qpd->sh_mem_ape1_limit = 0;
} else {
/*
* In FSA64, APE1_Base[63:0] = { 16{SH_MEM_APE1_BASE[31]},
* SH_MEM_APE1_BASE[31:0], 0x0000 }
* APE1_Limit[63:0] = { 16{SH_MEM_APE1_LIMIT[31]},
* SH_MEM_APE1_LIMIT[31:0], 0xFFFF }
* Verify that the base and size parameters can be
* represented in this format and convert them.
* Additionally restrict APE1 to user-mode addresses.
*/
uint64_t base = (uintptr_t)alternate_aperture_base;
uint64_t limit = base + alternate_aperture_size - 1;
if (limit <= base || (base & APE1_FIXED_BITS_MASK) != 0 ||
(limit & APE1_FIXED_BITS_MASK) != APE1_LIMIT_ALIGNMENT) {
retval = false;
goto out;
}
qpd->sh_mem_ape1_base = base >> 16;
qpd->sh_mem_ape1_limit = limit >> 16;
}
retval = dqm->asic_ops.set_cache_memory_policy(
dqm,
qpd,
default_policy,
alternate_policy,
alternate_aperture_base,
alternate_aperture_size);
if ((dqm->sched_policy == KFD_SCHED_POLICY_NO_HWS) && (qpd->vmid != 0))
program_sh_mem_settings(dqm, qpd);
pr_debug("sh_mem_config: 0x%x, ape1_base: 0x%x, ape1_limit: 0x%x\n",
qpd->sh_mem_config, qpd->sh_mem_ape1_base,
qpd->sh_mem_ape1_limit);
out:
dqm_unlock(dqm);
return retval;
}
static int process_termination_nocpsch(struct device_queue_manager *dqm,
struct qcm_process_device *qpd)
{
struct queue *q;
struct device_process_node *cur, *next_dpn;
int retval = 0;
bool found = false;
dqm_lock(dqm);
/* Clear all user mode queues */
while (!list_empty(&qpd->queues_list)) {
struct mqd_manager *mqd_mgr;
int ret;
q = list_first_entry(&qpd->queues_list, struct queue, list);
mqd_mgr = dqm->mqd_mgrs[get_mqd_type_from_queue_type(
q->properties.type)];
ret = destroy_queue_nocpsch_locked(dqm, qpd, q);
if (ret)
retval = ret;
dqm_unlock(dqm);
mqd_mgr->free_mqd(mqd_mgr, q->mqd, q->mqd_mem_obj);
dqm_lock(dqm);
}
/* Unregister process */
list_for_each_entry_safe(cur, next_dpn, &dqm->queues, list) {
if (qpd == cur->qpd) {
list_del(&cur->list);
kfree(cur);
dqm->processes_count--;
found = true;
break;
}
}
dqm_unlock(dqm);
/* Outside the DQM lock because under the DQM lock we can't do
* reclaim or take other locks that others hold while reclaiming.
*/
if (found)
kfd_dec_compute_active(dqm->dev);
return retval;
}
static int get_wave_state(struct device_queue_manager *dqm,
struct queue *q,
void __user *ctl_stack,
u32 *ctl_stack_used_size,
u32 *save_area_used_size)
{
struct mqd_manager *mqd_mgr;
dqm_lock(dqm);
mqd_mgr = dqm->mqd_mgrs[KFD_MQD_TYPE_CP];
if (q->properties.type != KFD_QUEUE_TYPE_COMPUTE ||
q->properties.is_active || !q->device->kfd->cwsr_enabled ||
!mqd_mgr->get_wave_state) {
dqm_unlock(dqm);
return -EINVAL;
}
dqm_unlock(dqm);
/*
* get_wave_state is outside the dqm lock to prevent circular locking
* and the queue should be protected against destruction by the process
* lock.
*/
return mqd_mgr->get_wave_state(mqd_mgr, q->mqd, &q->properties,
ctl_stack, ctl_stack_used_size, save_area_used_size);
}
static void get_queue_checkpoint_info(struct device_queue_manager *dqm,
const struct queue *q,
u32 *mqd_size,
u32 *ctl_stack_size)
{
struct mqd_manager *mqd_mgr;
enum KFD_MQD_TYPE mqd_type =
get_mqd_type_from_queue_type(q->properties.type);
dqm_lock(dqm);
mqd_mgr = dqm->mqd_mgrs[mqd_type];
*mqd_size = mqd_mgr->mqd_size;
*ctl_stack_size = 0;
if (q->properties.type == KFD_QUEUE_TYPE_COMPUTE && mqd_mgr->get_checkpoint_info)
mqd_mgr->get_checkpoint_info(mqd_mgr, q->mqd, ctl_stack_size);
dqm_unlock(dqm);
}
static int checkpoint_mqd(struct device_queue_manager *dqm,
const struct queue *q,
void *mqd,
void *ctl_stack)
{
struct mqd_manager *mqd_mgr;
int r = 0;
enum KFD_MQD_TYPE mqd_type =
get_mqd_type_from_queue_type(q->properties.type);
dqm_lock(dqm);
if (q->properties.is_active || !q->device->kfd->cwsr_enabled) {
r = -EINVAL;
goto dqm_unlock;
}
mqd_mgr = dqm->mqd_mgrs[mqd_type];
if (!mqd_mgr->checkpoint_mqd) {
r = -EOPNOTSUPP;
goto dqm_unlock;
}
mqd_mgr->checkpoint_mqd(mqd_mgr, q->mqd, mqd, ctl_stack);
dqm_unlock:
dqm_unlock(dqm);
return r;
}
static int process_termination_cpsch(struct device_queue_manager *dqm,
struct qcm_process_device *qpd)
{
int retval;
struct queue *q;
struct kernel_queue *kq, *kq_next;
struct mqd_manager *mqd_mgr;
struct device_process_node *cur, *next_dpn;
enum kfd_unmap_queues_filter filter =
KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES;
bool found = false;
retval = 0;
dqm_lock(dqm);
/* Clean all kernel queues */
list_for_each_entry_safe(kq, kq_next, &qpd->priv_queue_list, list) {
list_del(&kq->list);
decrement_queue_count(dqm, qpd, kq->queue);
qpd->is_debug = false;
dqm->total_queue_count--;
filter = KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES;
}
/* Clear all user mode queues */
list_for_each_entry(q, &qpd->queues_list, list) {
if (q->properties.type == KFD_QUEUE_TYPE_SDMA)
deallocate_sdma_queue(dqm, q);
else if (q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI)
deallocate_sdma_queue(dqm, q);
if (q->properties.is_active) {
decrement_queue_count(dqm, qpd, q);
if (dqm->dev->kfd->shared_resources.enable_mes) {
retval = remove_queue_mes(dqm, q, qpd);
if (retval)
pr_err("Failed to remove queue %d\n",
q->properties.queue_id);
}
}
dqm->total_queue_count--;
}
/* Unregister process */
list_for_each_entry_safe(cur, next_dpn, &dqm->queues, list) {
if (qpd == cur->qpd) {
list_del(&cur->list);
kfree(cur);
dqm->processes_count--;
found = true;
break;
}
}
if (!dqm->dev->kfd->shared_resources.enable_mes)
retval = execute_queues_cpsch(dqm, filter, 0, USE_DEFAULT_GRACE_PERIOD);
if ((!dqm->is_hws_hang) && (retval || qpd->reset_wavefronts)) {
pr_warn("Resetting wave fronts (cpsch) on dev %p\n", dqm->dev);
dbgdev_wave_reset_wavefronts(dqm->dev, qpd->pqm->process);
qpd->reset_wavefronts = false;
}
/* Lastly, free mqd resources.
* Do free_mqd() after dqm_unlock to avoid circular locking.
*/
while (!list_empty(&qpd->queues_list)) {
q = list_first_entry(&qpd->queues_list, struct queue, list);
mqd_mgr = dqm->mqd_mgrs[get_mqd_type_from_queue_type(
q->properties.type)];
list_del(&q->list);
qpd->queue_count--;
dqm_unlock(dqm);
mqd_mgr->free_mqd(mqd_mgr, q->mqd, q->mqd_mem_obj);
dqm_lock(dqm);
}
dqm_unlock(dqm);
/* Outside the DQM lock because under the DQM lock we can't do
* reclaim or take other locks that others hold while reclaiming.
*/
if (found)
kfd_dec_compute_active(dqm->dev);
return retval;
}
static int init_mqd_managers(struct device_queue_manager *dqm)
{
int i, j;
struct mqd_manager *mqd_mgr;
for (i = 0; i < KFD_MQD_TYPE_MAX; i++) {
mqd_mgr = dqm->asic_ops.mqd_manager_init(i, dqm->dev);
if (!mqd_mgr) {
pr_err("mqd manager [%d] initialization failed\n", i);
goto out_free;
}
dqm->mqd_mgrs[i] = mqd_mgr;
}
return 0;
out_free:
for (j = 0; j < i; j++) {
kfree(dqm->mqd_mgrs[j]);
dqm->mqd_mgrs[j] = NULL;
}
return -ENOMEM;
}
/* Allocate one hiq mqd (HWS) and all SDMA mqd in a continuous trunk*/
static int allocate_hiq_sdma_mqd(struct device_queue_manager *dqm)
{
int retval;
struct kfd_node *dev = dqm->dev;
struct kfd_mem_obj *mem_obj = &dqm->hiq_sdma_mqd;
uint32_t size = dqm->mqd_mgrs[KFD_MQD_TYPE_SDMA]->mqd_size *
get_num_all_sdma_engines(dqm) *
dev->kfd->device_info.num_sdma_queues_per_engine +
(dqm->mqd_mgrs[KFD_MQD_TYPE_HIQ]->mqd_size *
NUM_XCC(dqm->dev->xcc_mask));
retval = amdgpu_amdkfd_alloc_gtt_mem(dev->adev, size,
&(mem_obj->gtt_mem), &(mem_obj->gpu_addr),
(void *)&(mem_obj->cpu_ptr), false);
return retval;
}
struct device_queue_manager *device_queue_manager_init(struct kfd_node *dev)
{
struct device_queue_manager *dqm;
pr_debug("Loading device queue manager\n");
dqm = kzalloc(sizeof(*dqm), GFP_KERNEL);
if (!dqm)
return NULL;
switch (dev->adev->asic_type) {
/* HWS is not available on Hawaii. */
case CHIP_HAWAII:
/* HWS depends on CWSR for timely dequeue. CWSR is not
* available on Tonga.
*
* FIXME: This argument also applies to Kaveri.
*/
case CHIP_TONGA:
dqm->sched_policy = KFD_SCHED_POLICY_NO_HWS;
break;
default:
dqm->sched_policy = sched_policy;
break;
}
dqm->dev = dev;
switch (dqm->sched_policy) {
case KFD_SCHED_POLICY_HWS:
case KFD_SCHED_POLICY_HWS_NO_OVERSUBSCRIPTION:
/* initialize dqm for cp scheduling */
dqm->ops.create_queue = create_queue_cpsch;
dqm->ops.initialize = initialize_cpsch;
dqm->ops.start = start_cpsch;
dqm->ops.stop = stop_cpsch;
dqm->ops.pre_reset = pre_reset;
dqm->ops.destroy_queue = destroy_queue_cpsch;
dqm->ops.update_queue = update_queue;
dqm->ops.register_process = register_process;
dqm->ops.unregister_process = unregister_process;
dqm->ops.uninitialize = uninitialize;
dqm->ops.create_kernel_queue = create_kernel_queue_cpsch;
dqm->ops.destroy_kernel_queue = destroy_kernel_queue_cpsch;
dqm->ops.set_cache_memory_policy = set_cache_memory_policy;
dqm->ops.process_termination = process_termination_cpsch;
dqm->ops.evict_process_queues = evict_process_queues_cpsch;
dqm->ops.restore_process_queues = restore_process_queues_cpsch;
dqm->ops.get_wave_state = get_wave_state;
dqm->ops.reset_queues = reset_queues_cpsch;
dqm->ops.get_queue_checkpoint_info = get_queue_checkpoint_info;
dqm->ops.checkpoint_mqd = checkpoint_mqd;
break;
case KFD_SCHED_POLICY_NO_HWS:
/* initialize dqm for no cp scheduling */
dqm->ops.start = start_nocpsch;
dqm->ops.stop = stop_nocpsch;
dqm->ops.pre_reset = pre_reset;
dqm->ops.create_queue = create_queue_nocpsch;
dqm->ops.destroy_queue = destroy_queue_nocpsch;
dqm->ops.update_queue = update_queue;
dqm->ops.register_process = register_process;
dqm->ops.unregister_process = unregister_process;
dqm->ops.initialize = initialize_nocpsch;
dqm->ops.uninitialize = uninitialize;
dqm->ops.set_cache_memory_policy = set_cache_memory_policy;
dqm->ops.process_termination = process_termination_nocpsch;
dqm->ops.evict_process_queues = evict_process_queues_nocpsch;
dqm->ops.restore_process_queues =
restore_process_queues_nocpsch;
dqm->ops.get_wave_state = get_wave_state;
dqm->ops.get_queue_checkpoint_info = get_queue_checkpoint_info;
dqm->ops.checkpoint_mqd = checkpoint_mqd;
break;
default:
pr_err("Invalid scheduling policy %d\n", dqm->sched_policy);
goto out_free;
}
switch (dev->adev->asic_type) {
case CHIP_KAVERI:
case CHIP_HAWAII:
device_queue_manager_init_cik(&dqm->asic_ops);
break;
case CHIP_CARRIZO:
case CHIP_TONGA:
case CHIP_FIJI:
case CHIP_POLARIS10:
case CHIP_POLARIS11:
case CHIP_POLARIS12:
case CHIP_VEGAM:
device_queue_manager_init_vi(&dqm->asic_ops);
break;
default:
if (KFD_GC_VERSION(dev) >= IP_VERSION(11, 0, 0))
device_queue_manager_init_v11(&dqm->asic_ops);
else if (KFD_GC_VERSION(dev) >= IP_VERSION(10, 1, 1))
device_queue_manager_init_v10(&dqm->asic_ops);
else if (KFD_GC_VERSION(dev) >= IP_VERSION(9, 0, 1))
device_queue_manager_init_v9(&dqm->asic_ops);
else {
WARN(1, "Unexpected ASIC family %u",
dev->adev->asic_type);
goto out_free;
}
}
if (init_mqd_managers(dqm))
goto out_free;
if (!dev->kfd->shared_resources.enable_mes && allocate_hiq_sdma_mqd(dqm)) {
pr_err("Failed to allocate hiq sdma mqd trunk buffer\n");
goto out_free;
}
if (!dqm->ops.initialize(dqm)) {
init_waitqueue_head(&dqm->destroy_wait);
return dqm;
}
out_free:
kfree(dqm);
return NULL;
}
static void deallocate_hiq_sdma_mqd(struct kfd_node *dev,
struct kfd_mem_obj *mqd)
{
WARN(!mqd, "No hiq sdma mqd trunk to free");
amdgpu_amdkfd_free_gtt_mem(dev->adev, mqd->gtt_mem);
}
void device_queue_manager_uninit(struct device_queue_manager *dqm)
{
dqm->ops.stop(dqm);
dqm->ops.uninitialize(dqm);
if (!dqm->dev->kfd->shared_resources.enable_mes)
deallocate_hiq_sdma_mqd(dqm->dev, &dqm->hiq_sdma_mqd);
kfree(dqm);
}
int kfd_dqm_evict_pasid(struct device_queue_manager *dqm, u32 pasid)
{
struct kfd_process_device *pdd;
struct kfd_process *p = kfd_lookup_process_by_pasid(pasid);
int ret = 0;
if (!p)
return -EINVAL;
WARN(debug_evictions, "Evicting pid %d", p->lead_thread->pid);
pdd = kfd_get_process_device_data(dqm->dev, p);
if (pdd)
ret = dqm->ops.evict_process_queues(dqm, &pdd->qpd);
kfd_unref_process(p);
return ret;
}
static void kfd_process_hw_exception(struct work_struct *work)
{
struct device_queue_manager *dqm = container_of(work,
struct device_queue_manager, hw_exception_work);
amdgpu_amdkfd_gpu_reset(dqm->dev->adev);
}
int reserve_debug_trap_vmid(struct device_queue_manager *dqm,
struct qcm_process_device *qpd)
{
int r;
int updated_vmid_mask;
if (dqm->sched_policy == KFD_SCHED_POLICY_NO_HWS) {
pr_err("Unsupported on sched_policy: %i\n", dqm->sched_policy);
return -EINVAL;
}
dqm_lock(dqm);
if (dqm->trap_debug_vmid != 0) {
pr_err("Trap debug id already reserved\n");
r = -EBUSY;
goto out_unlock;
}
r = unmap_queues_cpsch(dqm, KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES, 0,
USE_DEFAULT_GRACE_PERIOD, false);
if (r)
goto out_unlock;
updated_vmid_mask = dqm->dev->kfd->shared_resources.compute_vmid_bitmap;
updated_vmid_mask &= ~(1 << dqm->dev->vm_info.last_vmid_kfd);
dqm->dev->kfd->shared_resources.compute_vmid_bitmap = updated_vmid_mask;
dqm->trap_debug_vmid = dqm->dev->vm_info.last_vmid_kfd;
r = set_sched_resources(dqm);
if (r)
goto out_unlock;
r = map_queues_cpsch(dqm);
if (r)
goto out_unlock;
pr_debug("Reserved VMID for trap debug: %i\n", dqm->trap_debug_vmid);
out_unlock:
dqm_unlock(dqm);
return r;
}
/*
* Releases vmid for the trap debugger
*/
int release_debug_trap_vmid(struct device_queue_manager *dqm,
struct qcm_process_device *qpd)
{
int r;
int updated_vmid_mask;
uint32_t trap_debug_vmid;
if (dqm->sched_policy == KFD_SCHED_POLICY_NO_HWS) {
pr_err("Unsupported on sched_policy: %i\n", dqm->sched_policy);
return -EINVAL;
}
dqm_lock(dqm);
trap_debug_vmid = dqm->trap_debug_vmid;
if (dqm->trap_debug_vmid == 0) {
pr_err("Trap debug id is not reserved\n");
r = -EINVAL;
goto out_unlock;
}
r = unmap_queues_cpsch(dqm, KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES, 0,
USE_DEFAULT_GRACE_PERIOD, false);
if (r)
goto out_unlock;
updated_vmid_mask = dqm->dev->kfd->shared_resources.compute_vmid_bitmap;
updated_vmid_mask |= (1 << dqm->dev->vm_info.last_vmid_kfd);
dqm->dev->kfd->shared_resources.compute_vmid_bitmap = updated_vmid_mask;
dqm->trap_debug_vmid = 0;
r = set_sched_resources(dqm);
if (r)
goto out_unlock;
r = map_queues_cpsch(dqm);
if (r)
goto out_unlock;
pr_debug("Released VMID for trap debug: %i\n", trap_debug_vmid);
out_unlock:
dqm_unlock(dqm);
return r;
}
#define QUEUE_NOT_FOUND -1
/* invalidate queue operation in array */
static void q_array_invalidate(uint32_t num_queues, uint32_t *queue_ids)
{
int i;
for (i = 0; i < num_queues; i++)
queue_ids[i] |= KFD_DBG_QUEUE_INVALID_MASK;
}
/* find queue index in array */
static int q_array_get_index(unsigned int queue_id,
uint32_t num_queues,
uint32_t *queue_ids)
{
int i;
for (i = 0; i < num_queues; i++)
if (queue_id == (queue_ids[i] & ~KFD_DBG_QUEUE_INVALID_MASK))
return i;
return QUEUE_NOT_FOUND;
}
struct copy_context_work_handler_workarea {
struct work_struct copy_context_work;
struct kfd_process *p;
};
static void copy_context_work_handler (struct work_struct *work)
{
struct copy_context_work_handler_workarea *workarea;
struct mqd_manager *mqd_mgr;
struct queue *q;
struct mm_struct *mm;
struct kfd_process *p;
uint32_t tmp_ctl_stack_used_size, tmp_save_area_used_size;
int i;
workarea = container_of(work,
struct copy_context_work_handler_workarea,
copy_context_work);
p = workarea->p;
mm = get_task_mm(p->lead_thread);
if (!mm)
return;
kthread_use_mm(mm);
for (i = 0; i < p->n_pdds; i++) {
struct kfd_process_device *pdd = p->pdds[i];
struct device_queue_manager *dqm = pdd->dev->dqm;
struct qcm_process_device *qpd = &pdd->qpd;
list_for_each_entry(q, &qpd->queues_list, list) {
mqd_mgr = dqm->mqd_mgrs[KFD_MQD_TYPE_CP];
/* We ignore the return value from get_wave_state
* because
* i) right now, it always returns 0, and
* ii) if we hit an error, we would continue to the
* next queue anyway.
*/
mqd_mgr->get_wave_state(mqd_mgr,
q->mqd,
&q->properties,
(void __user *) q->properties.ctx_save_restore_area_address,
&tmp_ctl_stack_used_size,
&tmp_save_area_used_size);
}
}
kthread_unuse_mm(mm);
mmput(mm);
}
static uint32_t *get_queue_ids(uint32_t num_queues, uint32_t *usr_queue_id_array)
{
size_t array_size = num_queues * sizeof(uint32_t);
if (!usr_queue_id_array)
return NULL;
return memdup_user(usr_queue_id_array, array_size);
}
int resume_queues(struct kfd_process *p,
uint32_t num_queues,
uint32_t *usr_queue_id_array)
{
uint32_t *queue_ids = NULL;
int total_resumed = 0;
int i;
if (usr_queue_id_array) {
queue_ids = get_queue_ids(num_queues, usr_queue_id_array);
if (IS_ERR(queue_ids))
return PTR_ERR(queue_ids);
/* mask all queues as invalid. unmask per successful request */
q_array_invalidate(num_queues, queue_ids);
}
for (i = 0; i < p->n_pdds; i++) {
struct kfd_process_device *pdd = p->pdds[i];
struct device_queue_manager *dqm = pdd->dev->dqm;
struct qcm_process_device *qpd = &pdd->qpd;
struct queue *q;
int r, per_device_resumed = 0;
dqm_lock(dqm);
/* unmask queues that resume or already resumed as valid */
list_for_each_entry(q, &qpd->queues_list, list) {
int q_idx = QUEUE_NOT_FOUND;
if (queue_ids)
q_idx = q_array_get_index(
q->properties.queue_id,
num_queues,
queue_ids);
if (!queue_ids || q_idx != QUEUE_NOT_FOUND) {
int err = resume_single_queue(dqm, &pdd->qpd, q);
if (queue_ids) {
if (!err) {
queue_ids[q_idx] &=
~KFD_DBG_QUEUE_INVALID_MASK;
} else {
queue_ids[q_idx] |=
KFD_DBG_QUEUE_ERROR_MASK;
break;
}
}
if (dqm->dev->kfd->shared_resources.enable_mes) {
wake_up_all(&dqm->destroy_wait);
if (!err)
total_resumed++;
} else {
per_device_resumed++;
}
}
}
if (!per_device_resumed) {
dqm_unlock(dqm);
continue;
}
r = execute_queues_cpsch(dqm,
KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES,
0,
USE_DEFAULT_GRACE_PERIOD);
if (r) {
pr_err("Failed to resume process queues\n");
if (queue_ids) {
list_for_each_entry(q, &qpd->queues_list, list) {
int q_idx = q_array_get_index(
q->properties.queue_id,
num_queues,
queue_ids);
/* mask queue as error on resume fail */
if (q_idx != QUEUE_NOT_FOUND)
queue_ids[q_idx] |=
KFD_DBG_QUEUE_ERROR_MASK;
}
}
} else {
wake_up_all(&dqm->destroy_wait);
total_resumed += per_device_resumed;
}
dqm_unlock(dqm);
}
if (queue_ids) {
if (copy_to_user((void __user *)usr_queue_id_array, queue_ids,
num_queues * sizeof(uint32_t)))
pr_err("copy_to_user failed on queue resume\n");
kfree(queue_ids);
}
return total_resumed;
}
int suspend_queues(struct kfd_process *p,
uint32_t num_queues,
uint32_t grace_period,
uint64_t exception_clear_mask,
uint32_t *usr_queue_id_array)
{
uint32_t *queue_ids = get_queue_ids(num_queues, usr_queue_id_array);
int total_suspended = 0;
int i;
if (IS_ERR(queue_ids))
return PTR_ERR(queue_ids);
/* mask all queues as invalid. umask on successful request */
q_array_invalidate(num_queues, queue_ids);
for (i = 0; i < p->n_pdds; i++) {
struct kfd_process_device *pdd = p->pdds[i];
struct device_queue_manager *dqm = pdd->dev->dqm;
struct qcm_process_device *qpd = &pdd->qpd;
struct queue *q;
int r, per_device_suspended = 0;
mutex_lock(&p->event_mutex);
dqm_lock(dqm);
/* unmask queues that suspend or already suspended */
list_for_each_entry(q, &qpd->queues_list, list) {
int q_idx = q_array_get_index(q->properties.queue_id,
num_queues,
queue_ids);
if (q_idx != QUEUE_NOT_FOUND) {
int err = suspend_single_queue(dqm, pdd, q);
bool is_mes = dqm->dev->kfd->shared_resources.enable_mes;
if (!err) {
queue_ids[q_idx] &= ~KFD_DBG_QUEUE_INVALID_MASK;
if (exception_clear_mask && is_mes)
q->properties.exception_status &=
~exception_clear_mask;
if (is_mes)
total_suspended++;
else
per_device_suspended++;
} else if (err != -EBUSY) {
r = err;
queue_ids[q_idx] |= KFD_DBG_QUEUE_ERROR_MASK;
break;
}
}
}
if (!per_device_suspended) {
dqm_unlock(dqm);
mutex_unlock(&p->event_mutex);
if (total_suspended)
amdgpu_amdkfd_debug_mem_fence(dqm->dev->adev);
continue;
}
r = execute_queues_cpsch(dqm,
KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0,
grace_period);
if (r)
pr_err("Failed to suspend process queues.\n");
else
total_suspended += per_device_suspended;
list_for_each_entry(q, &qpd->queues_list, list) {
int q_idx = q_array_get_index(q->properties.queue_id,
num_queues, queue_ids);
if (q_idx == QUEUE_NOT_FOUND)
continue;
/* mask queue as error on suspend fail */
if (r)
queue_ids[q_idx] |= KFD_DBG_QUEUE_ERROR_MASK;
else if (exception_clear_mask)
q->properties.exception_status &=
~exception_clear_mask;
}
dqm_unlock(dqm);
mutex_unlock(&p->event_mutex);
amdgpu_device_flush_hdp(dqm->dev->adev, NULL);
}
if (total_suspended) {
struct copy_context_work_handler_workarea copy_context_worker;
INIT_WORK_ONSTACK(
©_context_worker.copy_context_work,
copy_context_work_handler);
copy_context_worker.p = p;
schedule_work(©_context_worker.copy_context_work);
flush_work(©_context_worker.copy_context_work);
destroy_work_on_stack(©_context_worker.copy_context_work);
}
if (copy_to_user((void __user *)usr_queue_id_array, queue_ids,
num_queues * sizeof(uint32_t)))
pr_err("copy_to_user failed on queue suspend\n");
kfree(queue_ids);
return total_suspended;
}
static uint32_t set_queue_type_for_user(struct queue_properties *q_props)
{
switch (q_props->type) {
case KFD_QUEUE_TYPE_COMPUTE:
return q_props->format == KFD_QUEUE_FORMAT_PM4
? KFD_IOC_QUEUE_TYPE_COMPUTE
: KFD_IOC_QUEUE_TYPE_COMPUTE_AQL;
case KFD_QUEUE_TYPE_SDMA:
return KFD_IOC_QUEUE_TYPE_SDMA;
case KFD_QUEUE_TYPE_SDMA_XGMI:
return KFD_IOC_QUEUE_TYPE_SDMA_XGMI;
default:
WARN_ONCE(true, "queue type not recognized!");
return 0xffffffff;
};
}
void set_queue_snapshot_entry(struct queue *q,
uint64_t exception_clear_mask,
struct kfd_queue_snapshot_entry *qss_entry)
{
qss_entry->ring_base_address = q->properties.queue_address;
qss_entry->write_pointer_address = (uint64_t)q->properties.write_ptr;
qss_entry->read_pointer_address = (uint64_t)q->properties.read_ptr;
qss_entry->ctx_save_restore_address =
q->properties.ctx_save_restore_area_address;
qss_entry->ctx_save_restore_area_size =
q->properties.ctx_save_restore_area_size;
qss_entry->exception_status = q->properties.exception_status;
qss_entry->queue_id = q->properties.queue_id;
qss_entry->gpu_id = q->device->id;
qss_entry->ring_size = (uint32_t)q->properties.queue_size;
qss_entry->queue_type = set_queue_type_for_user(&q->properties);
q->properties.exception_status &= ~exception_clear_mask;
}
int debug_lock_and_unmap(struct device_queue_manager *dqm)
{
int r;
if (dqm->sched_policy == KFD_SCHED_POLICY_NO_HWS) {
pr_err("Unsupported on sched_policy: %i\n", dqm->sched_policy);
return -EINVAL;
}
if (!kfd_dbg_is_per_vmid_supported(dqm->dev))
return 0;
dqm_lock(dqm);
r = unmap_queues_cpsch(dqm, KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES, 0, 0, false);
if (r)
dqm_unlock(dqm);
return r;
}
int debug_map_and_unlock(struct device_queue_manager *dqm)
{
int r;
if (dqm->sched_policy == KFD_SCHED_POLICY_NO_HWS) {
pr_err("Unsupported on sched_policy: %i\n", dqm->sched_policy);
return -EINVAL;
}
if (!kfd_dbg_is_per_vmid_supported(dqm->dev))
return 0;
r = map_queues_cpsch(dqm);
dqm_unlock(dqm);
return r;
}
int debug_refresh_runlist(struct device_queue_manager *dqm)
{
int r = debug_lock_and_unmap(dqm);
if (r)
return r;
return debug_map_and_unlock(dqm);
}
#if defined(CONFIG_DEBUG_FS)
static void seq_reg_dump(struct seq_file *m,
uint32_t (*dump)[2], uint32_t n_regs)
{
uint32_t i, count;
for (i = 0, count = 0; i < n_regs; i++) {
if (count == 0 ||
dump[i-1][0] + sizeof(uint32_t) != dump[i][0]) {
seq_printf(m, "%s %08x: %08x",
i ? "\n" : "",
dump[i][0], dump[i][1]);
count = 7;
} else {
seq_printf(m, " %08x", dump[i][1]);
count--;
}
}
seq_puts(m, "\n");
}
int dqm_debugfs_hqds(struct seq_file *m, void *data)
{
struct device_queue_manager *dqm = data;
uint32_t xcc_mask = dqm->dev->xcc_mask;
uint32_t (*dump)[2], n_regs;
int pipe, queue;
int r = 0, xcc_id;
uint32_t sdma_engine_start;
if (!dqm->sched_running) {
seq_puts(m, " Device is stopped\n");
return 0;
}
for_each_inst(xcc_id, xcc_mask) {
r = dqm->dev->kfd2kgd->hqd_dump(dqm->dev->adev,
KFD_CIK_HIQ_PIPE,
KFD_CIK_HIQ_QUEUE, &dump,
&n_regs, xcc_id);
if (!r) {
seq_printf(
m,
" Inst %d, HIQ on MEC %d Pipe %d Queue %d\n",
xcc_id,
KFD_CIK_HIQ_PIPE / get_pipes_per_mec(dqm) + 1,
KFD_CIK_HIQ_PIPE % get_pipes_per_mec(dqm),
KFD_CIK_HIQ_QUEUE);
seq_reg_dump(m, dump, n_regs);
kfree(dump);
}
for (pipe = 0; pipe < get_pipes_per_mec(dqm); pipe++) {
int pipe_offset = pipe * get_queues_per_pipe(dqm);
for (queue = 0; queue < get_queues_per_pipe(dqm); queue++) {
if (!test_bit(pipe_offset + queue,
dqm->dev->kfd->shared_resources.cp_queue_bitmap))
continue;
r = dqm->dev->kfd2kgd->hqd_dump(dqm->dev->adev,
pipe, queue,
&dump, &n_regs,
xcc_id);
if (r)
break;
seq_printf(m,
" Inst %d, CP Pipe %d, Queue %d\n",
xcc_id, pipe, queue);
seq_reg_dump(m, dump, n_regs);
kfree(dump);
}
}
}
sdma_engine_start = dqm->dev->node_id * get_num_all_sdma_engines(dqm);
for (pipe = sdma_engine_start;
pipe < (sdma_engine_start + get_num_all_sdma_engines(dqm));
pipe++) {
for (queue = 0;
queue < dqm->dev->kfd->device_info.num_sdma_queues_per_engine;
queue++) {
r = dqm->dev->kfd2kgd->hqd_sdma_dump(
dqm->dev->adev, pipe, queue, &dump, &n_regs);
if (r)
break;
seq_printf(m, " SDMA Engine %d, RLC %d\n",
pipe, queue);
seq_reg_dump(m, dump, n_regs);
kfree(dump);
}
}
return r;
}
int dqm_debugfs_hang_hws(struct device_queue_manager *dqm)
{
int r = 0;
dqm_lock(dqm);
r = pm_debugfs_hang_hws(&dqm->packet_mgr);
if (r) {
dqm_unlock(dqm);
return r;
}
dqm->active_runlist = true;
r = execute_queues_cpsch(dqm, KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES,
0, USE_DEFAULT_GRACE_PERIOD);
dqm_unlock(dqm);
return r;
}
#endif
| linux-master | drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c |
/*
* Copyright 2021 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
*/
#include <linux/printk.h>
#include <linux/slab.h>
#include <linux/uaccess.h>
#include "kfd_priv.h"
#include "kfd_mqd_manager.h"
#include "v11_structs.h"
#include "gc/gc_11_0_0_offset.h"
#include "gc/gc_11_0_0_sh_mask.h"
#include "amdgpu_amdkfd.h"
static inline struct v11_compute_mqd *get_mqd(void *mqd)
{
return (struct v11_compute_mqd *)mqd;
}
static inline struct v11_sdma_mqd *get_sdma_mqd(void *mqd)
{
return (struct v11_sdma_mqd *)mqd;
}
static void update_cu_mask(struct mqd_manager *mm, void *mqd,
struct mqd_update_info *minfo)
{
struct v11_compute_mqd *m;
uint32_t se_mask[KFD_MAX_NUM_SE] = {0};
bool has_wa_flag = minfo && (minfo->update_flag & (UPDATE_FLAG_DBG_WA_ENABLE |
UPDATE_FLAG_DBG_WA_DISABLE));
if (!minfo || !(has_wa_flag || minfo->cu_mask.ptr))
return;
m = get_mqd(mqd);
if (has_wa_flag) {
uint32_t wa_mask = minfo->update_flag == UPDATE_FLAG_DBG_WA_ENABLE ?
0xffff : 0xffffffff;
m->compute_static_thread_mgmt_se0 = wa_mask;
m->compute_static_thread_mgmt_se1 = wa_mask;
m->compute_static_thread_mgmt_se2 = wa_mask;
m->compute_static_thread_mgmt_se3 = wa_mask;
m->compute_static_thread_mgmt_se4 = wa_mask;
m->compute_static_thread_mgmt_se5 = wa_mask;
m->compute_static_thread_mgmt_se6 = wa_mask;
m->compute_static_thread_mgmt_se7 = wa_mask;
return;
}
mqd_symmetrically_map_cu_mask(mm,
minfo->cu_mask.ptr, minfo->cu_mask.count, se_mask, 0);
m->compute_static_thread_mgmt_se0 = se_mask[0];
m->compute_static_thread_mgmt_se1 = se_mask[1];
m->compute_static_thread_mgmt_se2 = se_mask[2];
m->compute_static_thread_mgmt_se3 = se_mask[3];
m->compute_static_thread_mgmt_se4 = se_mask[4];
m->compute_static_thread_mgmt_se5 = se_mask[5];
m->compute_static_thread_mgmt_se6 = se_mask[6];
m->compute_static_thread_mgmt_se7 = se_mask[7];
pr_debug("update cu mask to %#x %#x %#x %#x %#x %#x %#x %#x\n",
m->compute_static_thread_mgmt_se0,
m->compute_static_thread_mgmt_se1,
m->compute_static_thread_mgmt_se2,
m->compute_static_thread_mgmt_se3,
m->compute_static_thread_mgmt_se4,
m->compute_static_thread_mgmt_se5,
m->compute_static_thread_mgmt_se6,
m->compute_static_thread_mgmt_se7);
}
static void set_priority(struct v11_compute_mqd *m, struct queue_properties *q)
{
m->cp_hqd_pipe_priority = pipe_priority_map[q->priority];
m->cp_hqd_queue_priority = q->priority;
}
static struct kfd_mem_obj *allocate_mqd(struct kfd_node *node,
struct queue_properties *q)
{
struct kfd_mem_obj *mqd_mem_obj;
int size;
/*
* MES write to areas beyond MQD size. So allocate
* 1 PAGE_SIZE memory for MQD is MES is enabled.
*/
if (node->kfd->shared_resources.enable_mes)
size = PAGE_SIZE;
else
size = sizeof(struct v11_compute_mqd);
if (kfd_gtt_sa_allocate(node, size, &mqd_mem_obj))
return NULL;
return mqd_mem_obj;
}
static void init_mqd(struct mqd_manager *mm, void **mqd,
struct kfd_mem_obj *mqd_mem_obj, uint64_t *gart_addr,
struct queue_properties *q)
{
uint64_t addr;
struct v11_compute_mqd *m;
int size;
uint32_t wa_mask = q->is_dbg_wa ? 0xffff : 0xffffffff;
m = (struct v11_compute_mqd *) mqd_mem_obj->cpu_ptr;
addr = mqd_mem_obj->gpu_addr;
if (mm->dev->kfd->shared_resources.enable_mes)
size = PAGE_SIZE;
else
size = sizeof(struct v11_compute_mqd);
memset(m, 0, size);
m->header = 0xC0310800;
m->compute_pipelinestat_enable = 1;
m->compute_static_thread_mgmt_se0 = wa_mask;
m->compute_static_thread_mgmt_se1 = wa_mask;
m->compute_static_thread_mgmt_se2 = wa_mask;
m->compute_static_thread_mgmt_se3 = wa_mask;
m->compute_static_thread_mgmt_se4 = wa_mask;
m->compute_static_thread_mgmt_se5 = wa_mask;
m->compute_static_thread_mgmt_se6 = wa_mask;
m->compute_static_thread_mgmt_se7 = wa_mask;
m->cp_hqd_persistent_state = CP_HQD_PERSISTENT_STATE__PRELOAD_REQ_MASK |
0x55 << CP_HQD_PERSISTENT_STATE__PRELOAD_SIZE__SHIFT;
m->cp_mqd_control = 1 << CP_MQD_CONTROL__PRIV_STATE__SHIFT;
m->cp_mqd_base_addr_lo = lower_32_bits(addr);
m->cp_mqd_base_addr_hi = upper_32_bits(addr);
m->cp_hqd_quantum = 1 << CP_HQD_QUANTUM__QUANTUM_EN__SHIFT |
1 << CP_HQD_QUANTUM__QUANTUM_SCALE__SHIFT |
1 << CP_HQD_QUANTUM__QUANTUM_DURATION__SHIFT;
/* Set cp_hqd_hq_scheduler0 bit 14 to 1 to have the CP set up the
* DISPATCH_PTR. This is required for the kfd debugger
*/
m->cp_hqd_hq_status0 = 1 << 14;
/*
* GFX11 RS64 CPFW version >= 509 supports PCIe atomics support
* acknowledgment.
*/
if (amdgpu_amdkfd_have_atomics_support(mm->dev->adev))
m->cp_hqd_hq_status0 |= 1 << 29;
if (q->format == KFD_QUEUE_FORMAT_AQL) {
m->cp_hqd_aql_control =
1 << CP_HQD_AQL_CONTROL__CONTROL0__SHIFT;
}
if (mm->dev->kfd->cwsr_enabled) {
m->cp_hqd_persistent_state |=
(1 << CP_HQD_PERSISTENT_STATE__QSWITCH_MODE__SHIFT);
m->cp_hqd_ctx_save_base_addr_lo =
lower_32_bits(q->ctx_save_restore_area_address);
m->cp_hqd_ctx_save_base_addr_hi =
upper_32_bits(q->ctx_save_restore_area_address);
m->cp_hqd_ctx_save_size = q->ctx_save_restore_area_size;
m->cp_hqd_cntl_stack_size = q->ctl_stack_size;
m->cp_hqd_cntl_stack_offset = q->ctl_stack_size;
m->cp_hqd_wg_state_offset = q->ctl_stack_size;
}
*mqd = m;
if (gart_addr)
*gart_addr = addr;
mm->update_mqd(mm, m, q, NULL);
}
static int load_mqd(struct mqd_manager *mm, void *mqd,
uint32_t pipe_id, uint32_t queue_id,
struct queue_properties *p, struct mm_struct *mms)
{
int r = 0;
/* AQL write pointer counts in 64B packets, PM4/CP counts in dwords. */
uint32_t wptr_shift = (p->format == KFD_QUEUE_FORMAT_AQL ? 4 : 0);
r = mm->dev->kfd2kgd->hqd_load(mm->dev->adev, mqd, pipe_id, queue_id,
(uint32_t __user *)p->write_ptr,
wptr_shift, 0, mms, 0);
return r;
}
static void update_mqd(struct mqd_manager *mm, void *mqd,
struct queue_properties *q,
struct mqd_update_info *minfo)
{
struct v11_compute_mqd *m;
m = get_mqd(mqd);
m->cp_hqd_pq_control = 5 << CP_HQD_PQ_CONTROL__RPTR_BLOCK_SIZE__SHIFT;
m->cp_hqd_pq_control |=
ffs(q->queue_size / sizeof(unsigned int)) - 1 - 1;
pr_debug("cp_hqd_pq_control 0x%x\n", m->cp_hqd_pq_control);
m->cp_hqd_pq_base_lo = lower_32_bits((uint64_t)q->queue_address >> 8);
m->cp_hqd_pq_base_hi = upper_32_bits((uint64_t)q->queue_address >> 8);
m->cp_hqd_pq_rptr_report_addr_lo = lower_32_bits((uint64_t)q->read_ptr);
m->cp_hqd_pq_rptr_report_addr_hi = upper_32_bits((uint64_t)q->read_ptr);
m->cp_hqd_pq_wptr_poll_addr_lo = lower_32_bits((uint64_t)q->write_ptr);
m->cp_hqd_pq_wptr_poll_addr_hi = upper_32_bits((uint64_t)q->write_ptr);
m->cp_hqd_pq_doorbell_control =
q->doorbell_off <<
CP_HQD_PQ_DOORBELL_CONTROL__DOORBELL_OFFSET__SHIFT;
pr_debug("cp_hqd_pq_doorbell_control 0x%x\n",
m->cp_hqd_pq_doorbell_control);
m->cp_hqd_ib_control = 3 << CP_HQD_IB_CONTROL__MIN_IB_AVAIL_SIZE__SHIFT;
/*
* HW does not clamp this field correctly. Maximum EOP queue size
* is constrained by per-SE EOP done signal count, which is 8-bit.
* Limit is 0xFF EOP entries (= 0x7F8 dwords). CP will not submit
* more than (EOP entry count - 1) so a queue size of 0x800 dwords
* is safe, giving a maximum field value of 0xA.
*/
m->cp_hqd_eop_control = min(0xA,
ffs(q->eop_ring_buffer_size / sizeof(unsigned int)) - 1 - 1);
m->cp_hqd_eop_base_addr_lo =
lower_32_bits(q->eop_ring_buffer_address >> 8);
m->cp_hqd_eop_base_addr_hi =
upper_32_bits(q->eop_ring_buffer_address >> 8);
m->cp_hqd_iq_timer = 0;
m->cp_hqd_vmid = q->vmid;
if (q->format == KFD_QUEUE_FORMAT_AQL) {
/* GC 10 removed WPP_CLAMP from PQ Control */
m->cp_hqd_pq_control |= CP_HQD_PQ_CONTROL__NO_UPDATE_RPTR_MASK |
2 << CP_HQD_PQ_CONTROL__SLOT_BASED_WPTR__SHIFT |
1 << CP_HQD_PQ_CONTROL__QUEUE_FULL_EN__SHIFT ;
m->cp_hqd_pq_doorbell_control |=
1 << CP_HQD_PQ_DOORBELL_CONTROL__DOORBELL_BIF_DROP__SHIFT;
}
if (mm->dev->kfd->cwsr_enabled)
m->cp_hqd_ctx_save_control = 0;
update_cu_mask(mm, mqd, minfo);
set_priority(m, q);
q->is_active = QUEUE_IS_ACTIVE(*q);
}
static uint32_t read_doorbell_id(void *mqd)
{
struct v11_compute_mqd *m = (struct v11_compute_mqd *)mqd;
return m->queue_doorbell_id0;
}
static int get_wave_state(struct mqd_manager *mm, void *mqd,
struct queue_properties *q,
void __user *ctl_stack,
u32 *ctl_stack_used_size,
u32 *save_area_used_size)
{
struct v11_compute_mqd *m;
struct kfd_context_save_area_header header;
m = get_mqd(mqd);
/* Control stack is written backwards, while workgroup context data
* is written forwards. Both starts from m->cp_hqd_cntl_stack_size.
* Current position is at m->cp_hqd_cntl_stack_offset and
* m->cp_hqd_wg_state_offset, respectively.
*/
*ctl_stack_used_size = m->cp_hqd_cntl_stack_size -
m->cp_hqd_cntl_stack_offset;
*save_area_used_size = m->cp_hqd_wg_state_offset -
m->cp_hqd_cntl_stack_size;
/* Control stack is not copied to user mode for GFXv11 because
* it's part of the context save area that is already
* accessible to user mode
*/
header.wave_state.control_stack_size = *ctl_stack_used_size;
header.wave_state.wave_state_size = *save_area_used_size;
header.wave_state.wave_state_offset = m->cp_hqd_wg_state_offset;
header.wave_state.control_stack_offset = m->cp_hqd_cntl_stack_offset;
if (copy_to_user(ctl_stack, &header, sizeof(header.wave_state)))
return -EFAULT;
return 0;
}
static void checkpoint_mqd(struct mqd_manager *mm, void *mqd, void *mqd_dst, void *ctl_stack_dst)
{
struct v11_compute_mqd *m;
m = get_mqd(mqd);
memcpy(mqd_dst, m, sizeof(struct v11_compute_mqd));
}
static void restore_mqd(struct mqd_manager *mm, void **mqd,
struct kfd_mem_obj *mqd_mem_obj, uint64_t *gart_addr,
struct queue_properties *qp,
const void *mqd_src,
const void *ctl_stack_src, const u32 ctl_stack_size)
{
uint64_t addr;
struct v11_compute_mqd *m;
m = (struct v11_compute_mqd *) mqd_mem_obj->cpu_ptr;
addr = mqd_mem_obj->gpu_addr;
memcpy(m, mqd_src, sizeof(*m));
*mqd = m;
if (gart_addr)
*gart_addr = addr;
m->cp_hqd_pq_doorbell_control =
qp->doorbell_off <<
CP_HQD_PQ_DOORBELL_CONTROL__DOORBELL_OFFSET__SHIFT;
pr_debug("cp_hqd_pq_doorbell_control 0x%x\n",
m->cp_hqd_pq_doorbell_control);
qp->is_active = 0;
}
static void init_mqd_hiq(struct mqd_manager *mm, void **mqd,
struct kfd_mem_obj *mqd_mem_obj, uint64_t *gart_addr,
struct queue_properties *q)
{
struct v11_compute_mqd *m;
init_mqd(mm, mqd, mqd_mem_obj, gart_addr, q);
m = get_mqd(*mqd);
m->cp_hqd_pq_control |= 1 << CP_HQD_PQ_CONTROL__PRIV_STATE__SHIFT |
1 << CP_HQD_PQ_CONTROL__KMD_QUEUE__SHIFT;
}
static int destroy_hiq_mqd(struct mqd_manager *mm, void *mqd,
enum kfd_preempt_type type, unsigned int timeout,
uint32_t pipe_id, uint32_t queue_id)
{
int err;
struct v11_compute_mqd *m;
u32 doorbell_off;
m = get_mqd(mqd);
doorbell_off = m->cp_hqd_pq_doorbell_control >>
CP_HQD_PQ_DOORBELL_CONTROL__DOORBELL_OFFSET__SHIFT;
err = amdgpu_amdkfd_unmap_hiq(mm->dev->adev, doorbell_off, 0);
if (err)
pr_debug("Destroy HIQ MQD failed: %d\n", err);
return err;
}
static void init_mqd_sdma(struct mqd_manager *mm, void **mqd,
struct kfd_mem_obj *mqd_mem_obj, uint64_t *gart_addr,
struct queue_properties *q)
{
struct v11_sdma_mqd *m;
int size;
m = (struct v11_sdma_mqd *) mqd_mem_obj->cpu_ptr;
if (mm->dev->kfd->shared_resources.enable_mes)
size = PAGE_SIZE;
else
size = sizeof(struct v11_sdma_mqd);
memset(m, 0, size);
*mqd = m;
if (gart_addr)
*gart_addr = mqd_mem_obj->gpu_addr;
mm->update_mqd(mm, m, q, NULL);
}
#define SDMA_RLC_DUMMY_DEFAULT 0xf
static void update_mqd_sdma(struct mqd_manager *mm, void *mqd,
struct queue_properties *q,
struct mqd_update_info *minfo)
{
struct v11_sdma_mqd *m;
m = get_sdma_mqd(mqd);
m->sdmax_rlcx_rb_cntl = (ffs(q->queue_size / sizeof(unsigned int)) - 1)
<< SDMA0_QUEUE0_RB_CNTL__RB_SIZE__SHIFT |
q->vmid << SDMA0_QUEUE0_RB_CNTL__RB_VMID__SHIFT |
1 << SDMA0_QUEUE0_RB_CNTL__RPTR_WRITEBACK_ENABLE__SHIFT |
6 << SDMA0_QUEUE0_RB_CNTL__RPTR_WRITEBACK_TIMER__SHIFT |
1 << SDMA0_QUEUE0_RB_CNTL__F32_WPTR_POLL_ENABLE__SHIFT;
m->sdmax_rlcx_rb_base = lower_32_bits(q->queue_address >> 8);
m->sdmax_rlcx_rb_base_hi = upper_32_bits(q->queue_address >> 8);
m->sdmax_rlcx_rb_rptr_addr_lo = lower_32_bits((uint64_t)q->read_ptr);
m->sdmax_rlcx_rb_rptr_addr_hi = upper_32_bits((uint64_t)q->read_ptr);
m->sdmax_rlcx_rb_wptr_poll_addr_lo = lower_32_bits((uint64_t)q->write_ptr);
m->sdmax_rlcx_rb_wptr_poll_addr_hi = upper_32_bits((uint64_t)q->write_ptr);
m->sdmax_rlcx_doorbell_offset =
q->doorbell_off << SDMA0_QUEUE0_DOORBELL_OFFSET__OFFSET__SHIFT;
m->sdmax_rlcx_sched_cntl = (amdgpu_sdma_phase_quantum
<< SDMA0_QUEUE0_SCHEDULE_CNTL__CONTEXT_QUANTUM__SHIFT)
& SDMA0_QUEUE0_SCHEDULE_CNTL__CONTEXT_QUANTUM_MASK;
m->sdma_engine_id = q->sdma_engine_id;
m->sdma_queue_id = q->sdma_queue_id;
m->sdmax_rlcx_dummy_reg = SDMA_RLC_DUMMY_DEFAULT;
q->is_active = QUEUE_IS_ACTIVE(*q);
}
#if defined(CONFIG_DEBUG_FS)
static int debugfs_show_mqd(struct seq_file *m, void *data)
{
seq_hex_dump(m, " ", DUMP_PREFIX_OFFSET, 32, 4,
data, sizeof(struct v11_compute_mqd), false);
return 0;
}
static int debugfs_show_mqd_sdma(struct seq_file *m, void *data)
{
seq_hex_dump(m, " ", DUMP_PREFIX_OFFSET, 32, 4,
data, sizeof(struct v11_sdma_mqd), false);
return 0;
}
#endif
struct mqd_manager *mqd_manager_init_v11(enum KFD_MQD_TYPE type,
struct kfd_node *dev)
{
struct mqd_manager *mqd;
if (WARN_ON(type >= KFD_MQD_TYPE_MAX))
return NULL;
mqd = kzalloc(sizeof(*mqd), GFP_KERNEL);
if (!mqd)
return NULL;
mqd->dev = dev;
switch (type) {
case KFD_MQD_TYPE_CP:
pr_debug("%s@%i\n", __func__, __LINE__);
mqd->allocate_mqd = allocate_mqd;
mqd->init_mqd = init_mqd;
mqd->free_mqd = kfd_free_mqd_cp;
mqd->load_mqd = load_mqd;
mqd->update_mqd = update_mqd;
mqd->destroy_mqd = kfd_destroy_mqd_cp;
mqd->is_occupied = kfd_is_occupied_cp;
mqd->mqd_size = sizeof(struct v11_compute_mqd);
mqd->get_wave_state = get_wave_state;
mqd->mqd_stride = kfd_mqd_stride;
mqd->checkpoint_mqd = checkpoint_mqd;
mqd->restore_mqd = restore_mqd;
#if defined(CONFIG_DEBUG_FS)
mqd->debugfs_show_mqd = debugfs_show_mqd;
#endif
pr_debug("%s@%i\n", __func__, __LINE__);
break;
case KFD_MQD_TYPE_HIQ:
pr_debug("%s@%i\n", __func__, __LINE__);
mqd->allocate_mqd = allocate_hiq_mqd;
mqd->init_mqd = init_mqd_hiq;
mqd->free_mqd = free_mqd_hiq_sdma;
mqd->load_mqd = kfd_hiq_load_mqd_kiq;
mqd->update_mqd = update_mqd;
mqd->destroy_mqd = destroy_hiq_mqd;
mqd->is_occupied = kfd_is_occupied_cp;
mqd->mqd_size = sizeof(struct v11_compute_mqd);
mqd->mqd_stride = kfd_mqd_stride;
#if defined(CONFIG_DEBUG_FS)
mqd->debugfs_show_mqd = debugfs_show_mqd;
#endif
mqd->read_doorbell_id = read_doorbell_id;
pr_debug("%s@%i\n", __func__, __LINE__);
break;
case KFD_MQD_TYPE_DIQ:
mqd->allocate_mqd = allocate_mqd;
mqd->init_mqd = init_mqd_hiq;
mqd->free_mqd = kfd_free_mqd_cp;
mqd->load_mqd = load_mqd;
mqd->update_mqd = update_mqd;
mqd->destroy_mqd = kfd_destroy_mqd_cp;
mqd->is_occupied = kfd_is_occupied_cp;
mqd->mqd_size = sizeof(struct v11_compute_mqd);
#if defined(CONFIG_DEBUG_FS)
mqd->debugfs_show_mqd = debugfs_show_mqd;
#endif
break;
case KFD_MQD_TYPE_SDMA:
pr_debug("%s@%i\n", __func__, __LINE__);
mqd->allocate_mqd = allocate_sdma_mqd;
mqd->init_mqd = init_mqd_sdma;
mqd->free_mqd = free_mqd_hiq_sdma;
mqd->load_mqd = kfd_load_mqd_sdma;
mqd->update_mqd = update_mqd_sdma;
mqd->destroy_mqd = kfd_destroy_mqd_sdma;
mqd->is_occupied = kfd_is_occupied_sdma;
mqd->checkpoint_mqd = checkpoint_mqd;
mqd->restore_mqd = restore_mqd;
mqd->mqd_size = sizeof(struct v11_sdma_mqd);
mqd->mqd_stride = kfd_mqd_stride;
#if defined(CONFIG_DEBUG_FS)
mqd->debugfs_show_mqd = debugfs_show_mqd_sdma;
#endif
/*
* To allocate SDMA MQDs by generic functions
* when MES is enabled.
*/
if (dev->kfd->shared_resources.enable_mes) {
mqd->allocate_mqd = allocate_mqd;
mqd->free_mqd = kfd_free_mqd_cp;
}
pr_debug("%s@%i\n", __func__, __LINE__);
break;
default:
kfree(mqd);
return NULL;
}
return mqd;
}
| linux-master | drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v11.c |
/*
* Copyright 2014 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
#include "kfd_priv.h"
#include "kfd_events.h"
#include "cik_int.h"
#include "amdgpu_amdkfd.h"
#include "kfd_smi_events.h"
static bool cik_event_interrupt_isr(struct kfd_node *dev,
const uint32_t *ih_ring_entry,
uint32_t *patched_ihre,
bool *patched_flag)
{
const struct cik_ih_ring_entry *ihre =
(const struct cik_ih_ring_entry *)ih_ring_entry;
const struct kfd2kgd_calls *f2g = dev->kfd2kgd;
unsigned int vmid;
uint16_t pasid;
bool ret;
/* This workaround is due to HW/FW limitation on Hawaii that
* VMID and PASID are not written into ih_ring_entry
*/
if ((ihre->source_id == CIK_INTSRC_GFX_PAGE_INV_FAULT ||
ihre->source_id == CIK_INTSRC_GFX_MEM_PROT_FAULT) &&
dev->adev->asic_type == CHIP_HAWAII) {
struct cik_ih_ring_entry *tmp_ihre =
(struct cik_ih_ring_entry *)patched_ihre;
*patched_flag = true;
*tmp_ihre = *ihre;
vmid = f2g->read_vmid_from_vmfault_reg(dev->adev);
ret = f2g->get_atc_vmid_pasid_mapping_info(dev->adev, vmid, &pasid);
tmp_ihre->ring_id &= 0x000000ff;
tmp_ihre->ring_id |= vmid << 8;
tmp_ihre->ring_id |= pasid << 16;
return ret && (pasid != 0) &&
vmid >= dev->vm_info.first_vmid_kfd &&
vmid <= dev->vm_info.last_vmid_kfd;
}
/* Only handle interrupts from KFD VMIDs */
vmid = (ihre->ring_id & 0x0000ff00) >> 8;
if (vmid < dev->vm_info.first_vmid_kfd ||
vmid > dev->vm_info.last_vmid_kfd)
return false;
/* If there is no valid PASID, it's likely a firmware bug */
pasid = (ihre->ring_id & 0xffff0000) >> 16;
if (WARN_ONCE(pasid == 0, "FW bug: No PASID in KFD interrupt"))
return false;
/* Interrupt types we care about: various signals and faults.
* They will be forwarded to a work queue (see below).
*/
return ihre->source_id == CIK_INTSRC_CP_END_OF_PIPE ||
ihre->source_id == CIK_INTSRC_SDMA_TRAP ||
ihre->source_id == CIK_INTSRC_SQ_INTERRUPT_MSG ||
ihre->source_id == CIK_INTSRC_CP_BAD_OPCODE ||
((ihre->source_id == CIK_INTSRC_GFX_PAGE_INV_FAULT ||
ihre->source_id == CIK_INTSRC_GFX_MEM_PROT_FAULT) &&
!amdgpu_no_queue_eviction_on_vm_fault);
}
static void cik_event_interrupt_wq(struct kfd_node *dev,
const uint32_t *ih_ring_entry)
{
const struct cik_ih_ring_entry *ihre =
(const struct cik_ih_ring_entry *)ih_ring_entry;
uint32_t context_id = ihre->data & 0xfffffff;
unsigned int vmid = (ihre->ring_id & 0x0000ff00) >> 8;
u32 pasid = (ihre->ring_id & 0xffff0000) >> 16;
if (pasid == 0)
return;
if (ihre->source_id == CIK_INTSRC_CP_END_OF_PIPE)
kfd_signal_event_interrupt(pasid, context_id, 28);
else if (ihre->source_id == CIK_INTSRC_SDMA_TRAP)
kfd_signal_event_interrupt(pasid, context_id, 28);
else if (ihre->source_id == CIK_INTSRC_SQ_INTERRUPT_MSG)
kfd_signal_event_interrupt(pasid, context_id & 0xff, 8);
else if (ihre->source_id == CIK_INTSRC_CP_BAD_OPCODE)
kfd_signal_hw_exception_event(pasid);
else if (ihre->source_id == CIK_INTSRC_GFX_PAGE_INV_FAULT ||
ihre->source_id == CIK_INTSRC_GFX_MEM_PROT_FAULT) {
struct kfd_vm_fault_info info;
kfd_smi_event_update_vmfault(dev, pasid);
kfd_dqm_evict_pasid(dev->dqm, pasid);
memset(&info, 0, sizeof(info));
amdgpu_amdkfd_gpuvm_get_vm_fault_info(dev->adev, &info);
if (!info.page_addr && !info.status)
return;
if (info.vmid == vmid)
kfd_signal_vm_fault_event(dev, pasid, &info, NULL);
else
kfd_signal_vm_fault_event(dev, pasid, NULL, NULL);
}
}
const struct kfd_event_interrupt_class event_interrupt_class_cik = {
.interrupt_isr = cik_event_interrupt_isr,
.interrupt_wq = cik_event_interrupt_wq,
};
| linux-master | drivers/gpu/drm/amd/amdkfd/cik_event_interrupt.c |
// SPDX-License-Identifier: GPL-2.0 OR MIT
/*
* Copyright 2014-2022 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
#include <linux/mutex.h>
#include <linux/log2.h>
#include <linux/sched.h>
#include <linux/sched/mm.h>
#include <linux/sched/task.h>
#include <linux/mmu_context.h>
#include <linux/slab.h>
#include <linux/notifier.h>
#include <linux/compat.h>
#include <linux/mman.h>
#include <linux/file.h>
#include <linux/pm_runtime.h>
#include "amdgpu_amdkfd.h"
#include "amdgpu.h"
struct mm_struct;
#include "kfd_priv.h"
#include "kfd_device_queue_manager.h"
#include "kfd_svm.h"
#include "kfd_smi_events.h"
#include "kfd_debug.h"
/*
* List of struct kfd_process (field kfd_process).
* Unique/indexed by mm_struct*
*/
DEFINE_HASHTABLE(kfd_processes_table, KFD_PROCESS_TABLE_SIZE);
DEFINE_MUTEX(kfd_processes_mutex);
DEFINE_SRCU(kfd_processes_srcu);
/* For process termination handling */
static struct workqueue_struct *kfd_process_wq;
/* Ordered, single-threaded workqueue for restoring evicted
* processes. Restoring multiple processes concurrently under memory
* pressure can lead to processes blocking each other from validating
* their BOs and result in a live-lock situation where processes
* remain evicted indefinitely.
*/
static struct workqueue_struct *kfd_restore_wq;
static struct kfd_process *find_process(const struct task_struct *thread,
bool ref);
static void kfd_process_ref_release(struct kref *ref);
static struct kfd_process *create_process(const struct task_struct *thread);
static void evict_process_worker(struct work_struct *work);
static void restore_process_worker(struct work_struct *work);
static void kfd_process_device_destroy_cwsr_dgpu(struct kfd_process_device *pdd);
struct kfd_procfs_tree {
struct kobject *kobj;
};
static struct kfd_procfs_tree procfs;
/*
* Structure for SDMA activity tracking
*/
struct kfd_sdma_activity_handler_workarea {
struct work_struct sdma_activity_work;
struct kfd_process_device *pdd;
uint64_t sdma_activity_counter;
};
struct temp_sdma_queue_list {
uint64_t __user *rptr;
uint64_t sdma_val;
unsigned int queue_id;
struct list_head list;
};
static void kfd_sdma_activity_worker(struct work_struct *work)
{
struct kfd_sdma_activity_handler_workarea *workarea;
struct kfd_process_device *pdd;
uint64_t val;
struct mm_struct *mm;
struct queue *q;
struct qcm_process_device *qpd;
struct device_queue_manager *dqm;
int ret = 0;
struct temp_sdma_queue_list sdma_q_list;
struct temp_sdma_queue_list *sdma_q, *next;
workarea = container_of(work, struct kfd_sdma_activity_handler_workarea,
sdma_activity_work);
pdd = workarea->pdd;
if (!pdd)
return;
dqm = pdd->dev->dqm;
qpd = &pdd->qpd;
if (!dqm || !qpd)
return;
/*
* Total SDMA activity is current SDMA activity + past SDMA activity
* Past SDMA count is stored in pdd.
* To get the current activity counters for all active SDMA queues,
* we loop over all SDMA queues and get their counts from user-space.
*
* We cannot call get_user() with dqm_lock held as it can cause
* a circular lock dependency situation. To read the SDMA stats,
* we need to do the following:
*
* 1. Create a temporary list of SDMA queue nodes from the qpd->queues_list,
* with dqm_lock/dqm_unlock().
* 2. Call get_user() for each node in temporary list without dqm_lock.
* Save the SDMA count for each node and also add the count to the total
* SDMA count counter.
* Its possible, during this step, a few SDMA queue nodes got deleted
* from the qpd->queues_list.
* 3. Do a second pass over qpd->queues_list to check if any nodes got deleted.
* If any node got deleted, its SDMA count would be captured in the sdma
* past activity counter. So subtract the SDMA counter stored in step 2
* for this node from the total SDMA count.
*/
INIT_LIST_HEAD(&sdma_q_list.list);
/*
* Create the temp list of all SDMA queues
*/
dqm_lock(dqm);
list_for_each_entry(q, &qpd->queues_list, list) {
if ((q->properties.type != KFD_QUEUE_TYPE_SDMA) &&
(q->properties.type != KFD_QUEUE_TYPE_SDMA_XGMI))
continue;
sdma_q = kzalloc(sizeof(struct temp_sdma_queue_list), GFP_KERNEL);
if (!sdma_q) {
dqm_unlock(dqm);
goto cleanup;
}
INIT_LIST_HEAD(&sdma_q->list);
sdma_q->rptr = (uint64_t __user *)q->properties.read_ptr;
sdma_q->queue_id = q->properties.queue_id;
list_add_tail(&sdma_q->list, &sdma_q_list.list);
}
/*
* If the temp list is empty, then no SDMA queues nodes were found in
* qpd->queues_list. Return the past activity count as the total sdma
* count
*/
if (list_empty(&sdma_q_list.list)) {
workarea->sdma_activity_counter = pdd->sdma_past_activity_counter;
dqm_unlock(dqm);
return;
}
dqm_unlock(dqm);
/*
* Get the usage count for each SDMA queue in temp_list.
*/
mm = get_task_mm(pdd->process->lead_thread);
if (!mm)
goto cleanup;
kthread_use_mm(mm);
list_for_each_entry(sdma_q, &sdma_q_list.list, list) {
val = 0;
ret = read_sdma_queue_counter(sdma_q->rptr, &val);
if (ret) {
pr_debug("Failed to read SDMA queue active counter for queue id: %d",
sdma_q->queue_id);
} else {
sdma_q->sdma_val = val;
workarea->sdma_activity_counter += val;
}
}
kthread_unuse_mm(mm);
mmput(mm);
/*
* Do a second iteration over qpd_queues_list to check if any SDMA
* nodes got deleted while fetching SDMA counter.
*/
dqm_lock(dqm);
workarea->sdma_activity_counter += pdd->sdma_past_activity_counter;
list_for_each_entry(q, &qpd->queues_list, list) {
if (list_empty(&sdma_q_list.list))
break;
if ((q->properties.type != KFD_QUEUE_TYPE_SDMA) &&
(q->properties.type != KFD_QUEUE_TYPE_SDMA_XGMI))
continue;
list_for_each_entry_safe(sdma_q, next, &sdma_q_list.list, list) {
if (((uint64_t __user *)q->properties.read_ptr == sdma_q->rptr) &&
(sdma_q->queue_id == q->properties.queue_id)) {
list_del(&sdma_q->list);
kfree(sdma_q);
break;
}
}
}
dqm_unlock(dqm);
/*
* If temp list is not empty, it implies some queues got deleted
* from qpd->queues_list during SDMA usage read. Subtract the SDMA
* count for each node from the total SDMA count.
*/
list_for_each_entry_safe(sdma_q, next, &sdma_q_list.list, list) {
workarea->sdma_activity_counter -= sdma_q->sdma_val;
list_del(&sdma_q->list);
kfree(sdma_q);
}
return;
cleanup:
list_for_each_entry_safe(sdma_q, next, &sdma_q_list.list, list) {
list_del(&sdma_q->list);
kfree(sdma_q);
}
}
/**
* kfd_get_cu_occupancy - Collect number of waves in-flight on this device
* by current process. Translates acquired wave count into number of compute units
* that are occupied.
*
* @attr: Handle of attribute that allows reporting of wave count. The attribute
* handle encapsulates GPU device it is associated with, thereby allowing collection
* of waves in flight, etc
* @buffer: Handle of user provided buffer updated with wave count
*
* Return: Number of bytes written to user buffer or an error value
*/
static int kfd_get_cu_occupancy(struct attribute *attr, char *buffer)
{
int cu_cnt;
int wave_cnt;
int max_waves_per_cu;
struct kfd_node *dev = NULL;
struct kfd_process *proc = NULL;
struct kfd_process_device *pdd = NULL;
pdd = container_of(attr, struct kfd_process_device, attr_cu_occupancy);
dev = pdd->dev;
if (dev->kfd2kgd->get_cu_occupancy == NULL)
return -EINVAL;
cu_cnt = 0;
proc = pdd->process;
if (pdd->qpd.queue_count == 0) {
pr_debug("Gpu-Id: %d has no active queues for process %d\n",
dev->id, proc->pasid);
return snprintf(buffer, PAGE_SIZE, "%d\n", cu_cnt);
}
/* Collect wave count from device if it supports */
wave_cnt = 0;
max_waves_per_cu = 0;
dev->kfd2kgd->get_cu_occupancy(dev->adev, proc->pasid, &wave_cnt,
&max_waves_per_cu, 0);
/* Translate wave count to number of compute units */
cu_cnt = (wave_cnt + (max_waves_per_cu - 1)) / max_waves_per_cu;
return snprintf(buffer, PAGE_SIZE, "%d\n", cu_cnt);
}
static ssize_t kfd_procfs_show(struct kobject *kobj, struct attribute *attr,
char *buffer)
{
if (strcmp(attr->name, "pasid") == 0) {
struct kfd_process *p = container_of(attr, struct kfd_process,
attr_pasid);
return snprintf(buffer, PAGE_SIZE, "%d\n", p->pasid);
} else if (strncmp(attr->name, "vram_", 5) == 0) {
struct kfd_process_device *pdd = container_of(attr, struct kfd_process_device,
attr_vram);
return snprintf(buffer, PAGE_SIZE, "%llu\n", READ_ONCE(pdd->vram_usage));
} else if (strncmp(attr->name, "sdma_", 5) == 0) {
struct kfd_process_device *pdd = container_of(attr, struct kfd_process_device,
attr_sdma);
struct kfd_sdma_activity_handler_workarea sdma_activity_work_handler;
INIT_WORK(&sdma_activity_work_handler.sdma_activity_work,
kfd_sdma_activity_worker);
sdma_activity_work_handler.pdd = pdd;
sdma_activity_work_handler.sdma_activity_counter = 0;
schedule_work(&sdma_activity_work_handler.sdma_activity_work);
flush_work(&sdma_activity_work_handler.sdma_activity_work);
return snprintf(buffer, PAGE_SIZE, "%llu\n",
(sdma_activity_work_handler.sdma_activity_counter)/
SDMA_ACTIVITY_DIVISOR);
} else {
pr_err("Invalid attribute");
return -EINVAL;
}
return 0;
}
static void kfd_procfs_kobj_release(struct kobject *kobj)
{
kfree(kobj);
}
static const struct sysfs_ops kfd_procfs_ops = {
.show = kfd_procfs_show,
};
static const struct kobj_type procfs_type = {
.release = kfd_procfs_kobj_release,
.sysfs_ops = &kfd_procfs_ops,
};
void kfd_procfs_init(void)
{
int ret = 0;
procfs.kobj = kfd_alloc_struct(procfs.kobj);
if (!procfs.kobj)
return;
ret = kobject_init_and_add(procfs.kobj, &procfs_type,
&kfd_device->kobj, "proc");
if (ret) {
pr_warn("Could not create procfs proc folder");
/* If we fail to create the procfs, clean up */
kfd_procfs_shutdown();
}
}
void kfd_procfs_shutdown(void)
{
if (procfs.kobj) {
kobject_del(procfs.kobj);
kobject_put(procfs.kobj);
procfs.kobj = NULL;
}
}
static ssize_t kfd_procfs_queue_show(struct kobject *kobj,
struct attribute *attr, char *buffer)
{
struct queue *q = container_of(kobj, struct queue, kobj);
if (!strcmp(attr->name, "size"))
return snprintf(buffer, PAGE_SIZE, "%llu",
q->properties.queue_size);
else if (!strcmp(attr->name, "type"))
return snprintf(buffer, PAGE_SIZE, "%d", q->properties.type);
else if (!strcmp(attr->name, "gpuid"))
return snprintf(buffer, PAGE_SIZE, "%u", q->device->id);
else
pr_err("Invalid attribute");
return 0;
}
static ssize_t kfd_procfs_stats_show(struct kobject *kobj,
struct attribute *attr, char *buffer)
{
if (strcmp(attr->name, "evicted_ms") == 0) {
struct kfd_process_device *pdd = container_of(attr,
struct kfd_process_device,
attr_evict);
uint64_t evict_jiffies;
evict_jiffies = atomic64_read(&pdd->evict_duration_counter);
return snprintf(buffer,
PAGE_SIZE,
"%llu\n",
jiffies64_to_msecs(evict_jiffies));
/* Sysfs handle that gets CU occupancy is per device */
} else if (strcmp(attr->name, "cu_occupancy") == 0) {
return kfd_get_cu_occupancy(attr, buffer);
} else {
pr_err("Invalid attribute");
}
return 0;
}
static ssize_t kfd_sysfs_counters_show(struct kobject *kobj,
struct attribute *attr, char *buf)
{
struct kfd_process_device *pdd;
if (!strcmp(attr->name, "faults")) {
pdd = container_of(attr, struct kfd_process_device,
attr_faults);
return sysfs_emit(buf, "%llu\n", READ_ONCE(pdd->faults));
}
if (!strcmp(attr->name, "page_in")) {
pdd = container_of(attr, struct kfd_process_device,
attr_page_in);
return sysfs_emit(buf, "%llu\n", READ_ONCE(pdd->page_in));
}
if (!strcmp(attr->name, "page_out")) {
pdd = container_of(attr, struct kfd_process_device,
attr_page_out);
return sysfs_emit(buf, "%llu\n", READ_ONCE(pdd->page_out));
}
return 0;
}
static struct attribute attr_queue_size = {
.name = "size",
.mode = KFD_SYSFS_FILE_MODE
};
static struct attribute attr_queue_type = {
.name = "type",
.mode = KFD_SYSFS_FILE_MODE
};
static struct attribute attr_queue_gpuid = {
.name = "gpuid",
.mode = KFD_SYSFS_FILE_MODE
};
static struct attribute *procfs_queue_attrs[] = {
&attr_queue_size,
&attr_queue_type,
&attr_queue_gpuid,
NULL
};
ATTRIBUTE_GROUPS(procfs_queue);
static const struct sysfs_ops procfs_queue_ops = {
.show = kfd_procfs_queue_show,
};
static const struct kobj_type procfs_queue_type = {
.sysfs_ops = &procfs_queue_ops,
.default_groups = procfs_queue_groups,
};
static const struct sysfs_ops procfs_stats_ops = {
.show = kfd_procfs_stats_show,
};
static const struct kobj_type procfs_stats_type = {
.sysfs_ops = &procfs_stats_ops,
.release = kfd_procfs_kobj_release,
};
static const struct sysfs_ops sysfs_counters_ops = {
.show = kfd_sysfs_counters_show,
};
static const struct kobj_type sysfs_counters_type = {
.sysfs_ops = &sysfs_counters_ops,
.release = kfd_procfs_kobj_release,
};
int kfd_procfs_add_queue(struct queue *q)
{
struct kfd_process *proc;
int ret;
if (!q || !q->process)
return -EINVAL;
proc = q->process;
/* Create proc/<pid>/queues/<queue id> folder */
if (!proc->kobj_queues)
return -EFAULT;
ret = kobject_init_and_add(&q->kobj, &procfs_queue_type,
proc->kobj_queues, "%u", q->properties.queue_id);
if (ret < 0) {
pr_warn("Creating proc/<pid>/queues/%u failed",
q->properties.queue_id);
kobject_put(&q->kobj);
return ret;
}
return 0;
}
static void kfd_sysfs_create_file(struct kobject *kobj, struct attribute *attr,
char *name)
{
int ret;
if (!kobj || !attr || !name)
return;
attr->name = name;
attr->mode = KFD_SYSFS_FILE_MODE;
sysfs_attr_init(attr);
ret = sysfs_create_file(kobj, attr);
if (ret)
pr_warn("Create sysfs %s/%s failed %d", kobj->name, name, ret);
}
static void kfd_procfs_add_sysfs_stats(struct kfd_process *p)
{
int ret;
int i;
char stats_dir_filename[MAX_SYSFS_FILENAME_LEN];
if (!p || !p->kobj)
return;
/*
* Create sysfs files for each GPU:
* - proc/<pid>/stats_<gpuid>/
* - proc/<pid>/stats_<gpuid>/evicted_ms
* - proc/<pid>/stats_<gpuid>/cu_occupancy
*/
for (i = 0; i < p->n_pdds; i++) {
struct kfd_process_device *pdd = p->pdds[i];
snprintf(stats_dir_filename, MAX_SYSFS_FILENAME_LEN,
"stats_%u", pdd->dev->id);
pdd->kobj_stats = kfd_alloc_struct(pdd->kobj_stats);
if (!pdd->kobj_stats)
return;
ret = kobject_init_and_add(pdd->kobj_stats,
&procfs_stats_type,
p->kobj,
stats_dir_filename);
if (ret) {
pr_warn("Creating KFD proc/stats_%s folder failed",
stats_dir_filename);
kobject_put(pdd->kobj_stats);
pdd->kobj_stats = NULL;
return;
}
kfd_sysfs_create_file(pdd->kobj_stats, &pdd->attr_evict,
"evicted_ms");
/* Add sysfs file to report compute unit occupancy */
if (pdd->dev->kfd2kgd->get_cu_occupancy)
kfd_sysfs_create_file(pdd->kobj_stats,
&pdd->attr_cu_occupancy,
"cu_occupancy");
}
}
static void kfd_procfs_add_sysfs_counters(struct kfd_process *p)
{
int ret = 0;
int i;
char counters_dir_filename[MAX_SYSFS_FILENAME_LEN];
if (!p || !p->kobj)
return;
/*
* Create sysfs files for each GPU which supports SVM
* - proc/<pid>/counters_<gpuid>/
* - proc/<pid>/counters_<gpuid>/faults
* - proc/<pid>/counters_<gpuid>/page_in
* - proc/<pid>/counters_<gpuid>/page_out
*/
for_each_set_bit(i, p->svms.bitmap_supported, p->n_pdds) {
struct kfd_process_device *pdd = p->pdds[i];
struct kobject *kobj_counters;
snprintf(counters_dir_filename, MAX_SYSFS_FILENAME_LEN,
"counters_%u", pdd->dev->id);
kobj_counters = kfd_alloc_struct(kobj_counters);
if (!kobj_counters)
return;
ret = kobject_init_and_add(kobj_counters, &sysfs_counters_type,
p->kobj, counters_dir_filename);
if (ret) {
pr_warn("Creating KFD proc/%s folder failed",
counters_dir_filename);
kobject_put(kobj_counters);
return;
}
pdd->kobj_counters = kobj_counters;
kfd_sysfs_create_file(kobj_counters, &pdd->attr_faults,
"faults");
kfd_sysfs_create_file(kobj_counters, &pdd->attr_page_in,
"page_in");
kfd_sysfs_create_file(kobj_counters, &pdd->attr_page_out,
"page_out");
}
}
static void kfd_procfs_add_sysfs_files(struct kfd_process *p)
{
int i;
if (!p || !p->kobj)
return;
/*
* Create sysfs files for each GPU:
* - proc/<pid>/vram_<gpuid>
* - proc/<pid>/sdma_<gpuid>
*/
for (i = 0; i < p->n_pdds; i++) {
struct kfd_process_device *pdd = p->pdds[i];
snprintf(pdd->vram_filename, MAX_SYSFS_FILENAME_LEN, "vram_%u",
pdd->dev->id);
kfd_sysfs_create_file(p->kobj, &pdd->attr_vram,
pdd->vram_filename);
snprintf(pdd->sdma_filename, MAX_SYSFS_FILENAME_LEN, "sdma_%u",
pdd->dev->id);
kfd_sysfs_create_file(p->kobj, &pdd->attr_sdma,
pdd->sdma_filename);
}
}
void kfd_procfs_del_queue(struct queue *q)
{
if (!q)
return;
kobject_del(&q->kobj);
kobject_put(&q->kobj);
}
int kfd_process_create_wq(void)
{
if (!kfd_process_wq)
kfd_process_wq = alloc_workqueue("kfd_process_wq", 0, 0);
if (!kfd_restore_wq)
kfd_restore_wq = alloc_ordered_workqueue("kfd_restore_wq", 0);
if (!kfd_process_wq || !kfd_restore_wq) {
kfd_process_destroy_wq();
return -ENOMEM;
}
return 0;
}
void kfd_process_destroy_wq(void)
{
if (kfd_process_wq) {
destroy_workqueue(kfd_process_wq);
kfd_process_wq = NULL;
}
if (kfd_restore_wq) {
destroy_workqueue(kfd_restore_wq);
kfd_restore_wq = NULL;
}
}
static void kfd_process_free_gpuvm(struct kgd_mem *mem,
struct kfd_process_device *pdd, void **kptr)
{
struct kfd_node *dev = pdd->dev;
if (kptr && *kptr) {
amdgpu_amdkfd_gpuvm_unmap_gtt_bo_from_kernel(mem);
*kptr = NULL;
}
amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu(dev->adev, mem, pdd->drm_priv);
amdgpu_amdkfd_gpuvm_free_memory_of_gpu(dev->adev, mem, pdd->drm_priv,
NULL);
}
/* kfd_process_alloc_gpuvm - Allocate GPU VM for the KFD process
* This function should be only called right after the process
* is created and when kfd_processes_mutex is still being held
* to avoid concurrency. Because of that exclusiveness, we do
* not need to take p->mutex.
*/
static int kfd_process_alloc_gpuvm(struct kfd_process_device *pdd,
uint64_t gpu_va, uint32_t size,
uint32_t flags, struct kgd_mem **mem, void **kptr)
{
struct kfd_node *kdev = pdd->dev;
int err;
err = amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu(kdev->adev, gpu_va, size,
pdd->drm_priv, mem, NULL,
flags, false);
if (err)
goto err_alloc_mem;
err = amdgpu_amdkfd_gpuvm_map_memory_to_gpu(kdev->adev, *mem,
pdd->drm_priv);
if (err)
goto err_map_mem;
err = amdgpu_amdkfd_gpuvm_sync_memory(kdev->adev, *mem, true);
if (err) {
pr_debug("Sync memory failed, wait interrupted by user signal\n");
goto sync_memory_failed;
}
if (kptr) {
err = amdgpu_amdkfd_gpuvm_map_gtt_bo_to_kernel(
(struct kgd_mem *)*mem, kptr, NULL);
if (err) {
pr_debug("Map GTT BO to kernel failed\n");
goto sync_memory_failed;
}
}
return err;
sync_memory_failed:
amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu(kdev->adev, *mem, pdd->drm_priv);
err_map_mem:
amdgpu_amdkfd_gpuvm_free_memory_of_gpu(kdev->adev, *mem, pdd->drm_priv,
NULL);
err_alloc_mem:
*mem = NULL;
*kptr = NULL;
return err;
}
/* kfd_process_device_reserve_ib_mem - Reserve memory inside the
* process for IB usage The memory reserved is for KFD to submit
* IB to AMDGPU from kernel. If the memory is reserved
* successfully, ib_kaddr will have the CPU/kernel
* address. Check ib_kaddr before accessing the memory.
*/
static int kfd_process_device_reserve_ib_mem(struct kfd_process_device *pdd)
{
struct qcm_process_device *qpd = &pdd->qpd;
uint32_t flags = KFD_IOC_ALLOC_MEM_FLAGS_GTT |
KFD_IOC_ALLOC_MEM_FLAGS_NO_SUBSTITUTE |
KFD_IOC_ALLOC_MEM_FLAGS_WRITABLE |
KFD_IOC_ALLOC_MEM_FLAGS_EXECUTABLE;
struct kgd_mem *mem;
void *kaddr;
int ret;
if (qpd->ib_kaddr || !qpd->ib_base)
return 0;
/* ib_base is only set for dGPU */
ret = kfd_process_alloc_gpuvm(pdd, qpd->ib_base, PAGE_SIZE, flags,
&mem, &kaddr);
if (ret)
return ret;
qpd->ib_mem = mem;
qpd->ib_kaddr = kaddr;
return 0;
}
static void kfd_process_device_destroy_ib_mem(struct kfd_process_device *pdd)
{
struct qcm_process_device *qpd = &pdd->qpd;
if (!qpd->ib_kaddr || !qpd->ib_base)
return;
kfd_process_free_gpuvm(qpd->ib_mem, pdd, &qpd->ib_kaddr);
}
struct kfd_process *kfd_create_process(struct task_struct *thread)
{
struct kfd_process *process;
int ret;
if (!(thread->mm && mmget_not_zero(thread->mm)))
return ERR_PTR(-EINVAL);
/* Only the pthreads threading model is supported. */
if (thread->group_leader->mm != thread->mm) {
mmput(thread->mm);
return ERR_PTR(-EINVAL);
}
/*
* take kfd processes mutex before starting of process creation
* so there won't be a case where two threads of the same process
* create two kfd_process structures
*/
mutex_lock(&kfd_processes_mutex);
if (kfd_is_locked()) {
mutex_unlock(&kfd_processes_mutex);
pr_debug("KFD is locked! Cannot create process");
return ERR_PTR(-EINVAL);
}
/* A prior open of /dev/kfd could have already created the process. */
process = find_process(thread, false);
if (process) {
pr_debug("Process already found\n");
} else {
process = create_process(thread);
if (IS_ERR(process))
goto out;
if (!procfs.kobj)
goto out;
process->kobj = kfd_alloc_struct(process->kobj);
if (!process->kobj) {
pr_warn("Creating procfs kobject failed");
goto out;
}
ret = kobject_init_and_add(process->kobj, &procfs_type,
procfs.kobj, "%d",
(int)process->lead_thread->pid);
if (ret) {
pr_warn("Creating procfs pid directory failed");
kobject_put(process->kobj);
goto out;
}
kfd_sysfs_create_file(process->kobj, &process->attr_pasid,
"pasid");
process->kobj_queues = kobject_create_and_add("queues",
process->kobj);
if (!process->kobj_queues)
pr_warn("Creating KFD proc/queues folder failed");
kfd_procfs_add_sysfs_stats(process);
kfd_procfs_add_sysfs_files(process);
kfd_procfs_add_sysfs_counters(process);
init_waitqueue_head(&process->wait_irq_drain);
}
out:
if (!IS_ERR(process))
kref_get(&process->ref);
mutex_unlock(&kfd_processes_mutex);
mmput(thread->mm);
return process;
}
struct kfd_process *kfd_get_process(const struct task_struct *thread)
{
struct kfd_process *process;
if (!thread->mm)
return ERR_PTR(-EINVAL);
/* Only the pthreads threading model is supported. */
if (thread->group_leader->mm != thread->mm)
return ERR_PTR(-EINVAL);
process = find_process(thread, false);
if (!process)
return ERR_PTR(-EINVAL);
return process;
}
static struct kfd_process *find_process_by_mm(const struct mm_struct *mm)
{
struct kfd_process *process;
hash_for_each_possible_rcu(kfd_processes_table, process,
kfd_processes, (uintptr_t)mm)
if (process->mm == mm)
return process;
return NULL;
}
static struct kfd_process *find_process(const struct task_struct *thread,
bool ref)
{
struct kfd_process *p;
int idx;
idx = srcu_read_lock(&kfd_processes_srcu);
p = find_process_by_mm(thread->mm);
if (p && ref)
kref_get(&p->ref);
srcu_read_unlock(&kfd_processes_srcu, idx);
return p;
}
void kfd_unref_process(struct kfd_process *p)
{
kref_put(&p->ref, kfd_process_ref_release);
}
/* This increments the process->ref counter. */
struct kfd_process *kfd_lookup_process_by_pid(struct pid *pid)
{
struct task_struct *task = NULL;
struct kfd_process *p = NULL;
if (!pid) {
task = current;
get_task_struct(task);
} else {
task = get_pid_task(pid, PIDTYPE_PID);
}
if (task) {
p = find_process(task, true);
put_task_struct(task);
}
return p;
}
static void kfd_process_device_free_bos(struct kfd_process_device *pdd)
{
struct kfd_process *p = pdd->process;
void *mem;
int id;
int i;
/*
* Remove all handles from idr and release appropriate
* local memory object
*/
idr_for_each_entry(&pdd->alloc_idr, mem, id) {
for (i = 0; i < p->n_pdds; i++) {
struct kfd_process_device *peer_pdd = p->pdds[i];
if (!peer_pdd->drm_priv)
continue;
amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu(
peer_pdd->dev->adev, mem, peer_pdd->drm_priv);
}
amdgpu_amdkfd_gpuvm_free_memory_of_gpu(pdd->dev->adev, mem,
pdd->drm_priv, NULL);
kfd_process_device_remove_obj_handle(pdd, id);
}
}
/*
* Just kunmap and unpin signal BO here. It will be freed in
* kfd_process_free_outstanding_kfd_bos()
*/
static void kfd_process_kunmap_signal_bo(struct kfd_process *p)
{
struct kfd_process_device *pdd;
struct kfd_node *kdev;
void *mem;
kdev = kfd_device_by_id(GET_GPU_ID(p->signal_handle));
if (!kdev)
return;
mutex_lock(&p->mutex);
pdd = kfd_get_process_device_data(kdev, p);
if (!pdd)
goto out;
mem = kfd_process_device_translate_handle(
pdd, GET_IDR_HANDLE(p->signal_handle));
if (!mem)
goto out;
amdgpu_amdkfd_gpuvm_unmap_gtt_bo_from_kernel(mem);
out:
mutex_unlock(&p->mutex);
}
static void kfd_process_free_outstanding_kfd_bos(struct kfd_process *p)
{
int i;
for (i = 0; i < p->n_pdds; i++)
kfd_process_device_free_bos(p->pdds[i]);
}
static void kfd_process_destroy_pdds(struct kfd_process *p)
{
int i;
for (i = 0; i < p->n_pdds; i++) {
struct kfd_process_device *pdd = p->pdds[i];
pr_debug("Releasing pdd (topology id %d) for process (pasid 0x%x)\n",
pdd->dev->id, p->pasid);
kfd_process_device_destroy_cwsr_dgpu(pdd);
kfd_process_device_destroy_ib_mem(pdd);
if (pdd->drm_file) {
amdgpu_amdkfd_gpuvm_release_process_vm(
pdd->dev->adev, pdd->drm_priv);
fput(pdd->drm_file);
}
if (pdd->qpd.cwsr_kaddr && !pdd->qpd.cwsr_base)
free_pages((unsigned long)pdd->qpd.cwsr_kaddr,
get_order(KFD_CWSR_TBA_TMA_SIZE));
idr_destroy(&pdd->alloc_idr);
kfd_free_process_doorbells(pdd->dev->kfd, pdd);
if (pdd->dev->kfd->shared_resources.enable_mes)
amdgpu_amdkfd_free_gtt_mem(pdd->dev->adev,
pdd->proc_ctx_bo);
/*
* before destroying pdd, make sure to report availability
* for auto suspend
*/
if (pdd->runtime_inuse) {
pm_runtime_mark_last_busy(adev_to_drm(pdd->dev->adev)->dev);
pm_runtime_put_autosuspend(adev_to_drm(pdd->dev->adev)->dev);
pdd->runtime_inuse = false;
}
kfree(pdd);
p->pdds[i] = NULL;
}
p->n_pdds = 0;
}
static void kfd_process_remove_sysfs(struct kfd_process *p)
{
struct kfd_process_device *pdd;
int i;
if (!p->kobj)
return;
sysfs_remove_file(p->kobj, &p->attr_pasid);
kobject_del(p->kobj_queues);
kobject_put(p->kobj_queues);
p->kobj_queues = NULL;
for (i = 0; i < p->n_pdds; i++) {
pdd = p->pdds[i];
sysfs_remove_file(p->kobj, &pdd->attr_vram);
sysfs_remove_file(p->kobj, &pdd->attr_sdma);
sysfs_remove_file(pdd->kobj_stats, &pdd->attr_evict);
if (pdd->dev->kfd2kgd->get_cu_occupancy)
sysfs_remove_file(pdd->kobj_stats,
&pdd->attr_cu_occupancy);
kobject_del(pdd->kobj_stats);
kobject_put(pdd->kobj_stats);
pdd->kobj_stats = NULL;
}
for_each_set_bit(i, p->svms.bitmap_supported, p->n_pdds) {
pdd = p->pdds[i];
sysfs_remove_file(pdd->kobj_counters, &pdd->attr_faults);
sysfs_remove_file(pdd->kobj_counters, &pdd->attr_page_in);
sysfs_remove_file(pdd->kobj_counters, &pdd->attr_page_out);
kobject_del(pdd->kobj_counters);
kobject_put(pdd->kobj_counters);
pdd->kobj_counters = NULL;
}
kobject_del(p->kobj);
kobject_put(p->kobj);
p->kobj = NULL;
}
/* No process locking is needed in this function, because the process
* is not findable any more. We must assume that no other thread is
* using it any more, otherwise we couldn't safely free the process
* structure in the end.
*/
static void kfd_process_wq_release(struct work_struct *work)
{
struct kfd_process *p = container_of(work, struct kfd_process,
release_work);
kfd_process_dequeue_from_all_devices(p);
pqm_uninit(&p->pqm);
/* Signal the eviction fence after user mode queues are
* destroyed. This allows any BOs to be freed without
* triggering pointless evictions or waiting for fences.
*/
dma_fence_signal(p->ef);
kfd_process_remove_sysfs(p);
kfd_process_kunmap_signal_bo(p);
kfd_process_free_outstanding_kfd_bos(p);
svm_range_list_fini(p);
kfd_process_destroy_pdds(p);
dma_fence_put(p->ef);
kfd_event_free_process(p);
kfd_pasid_free(p->pasid);
mutex_destroy(&p->mutex);
put_task_struct(p->lead_thread);
kfree(p);
}
static void kfd_process_ref_release(struct kref *ref)
{
struct kfd_process *p = container_of(ref, struct kfd_process, ref);
INIT_WORK(&p->release_work, kfd_process_wq_release);
queue_work(kfd_process_wq, &p->release_work);
}
static struct mmu_notifier *kfd_process_alloc_notifier(struct mm_struct *mm)
{
int idx = srcu_read_lock(&kfd_processes_srcu);
struct kfd_process *p = find_process_by_mm(mm);
srcu_read_unlock(&kfd_processes_srcu, idx);
return p ? &p->mmu_notifier : ERR_PTR(-ESRCH);
}
static void kfd_process_free_notifier(struct mmu_notifier *mn)
{
kfd_unref_process(container_of(mn, struct kfd_process, mmu_notifier));
}
static void kfd_process_notifier_release_internal(struct kfd_process *p)
{
int i;
cancel_delayed_work_sync(&p->eviction_work);
cancel_delayed_work_sync(&p->restore_work);
for (i = 0; i < p->n_pdds; i++) {
struct kfd_process_device *pdd = p->pdds[i];
/* re-enable GFX OFF since runtime enable with ttmp setup disabled it. */
if (!kfd_dbg_is_rlc_restore_supported(pdd->dev) && p->runtime_info.ttmp_setup)
amdgpu_gfx_off_ctrl(pdd->dev->adev, true);
}
/* Indicate to other users that MM is no longer valid */
p->mm = NULL;
kfd_dbg_trap_disable(p);
if (atomic_read(&p->debugged_process_count) > 0) {
struct kfd_process *target;
unsigned int temp;
int idx = srcu_read_lock(&kfd_processes_srcu);
hash_for_each_rcu(kfd_processes_table, temp, target, kfd_processes) {
if (target->debugger_process && target->debugger_process == p) {
mutex_lock_nested(&target->mutex, 1);
kfd_dbg_trap_disable(target);
mutex_unlock(&target->mutex);
if (atomic_read(&p->debugged_process_count) == 0)
break;
}
}
srcu_read_unlock(&kfd_processes_srcu, idx);
}
mmu_notifier_put(&p->mmu_notifier);
}
static void kfd_process_notifier_release(struct mmu_notifier *mn,
struct mm_struct *mm)
{
struct kfd_process *p;
/*
* The kfd_process structure can not be free because the
* mmu_notifier srcu is read locked
*/
p = container_of(mn, struct kfd_process, mmu_notifier);
if (WARN_ON(p->mm != mm))
return;
mutex_lock(&kfd_processes_mutex);
/*
* Do early return if table is empty.
*
* This could potentially happen if this function is called concurrently
* by mmu_notifier and by kfd_cleanup_pocesses.
*
*/
if (hash_empty(kfd_processes_table)) {
mutex_unlock(&kfd_processes_mutex);
return;
}
hash_del_rcu(&p->kfd_processes);
mutex_unlock(&kfd_processes_mutex);
synchronize_srcu(&kfd_processes_srcu);
kfd_process_notifier_release_internal(p);
}
static const struct mmu_notifier_ops kfd_process_mmu_notifier_ops = {
.release = kfd_process_notifier_release,
.alloc_notifier = kfd_process_alloc_notifier,
.free_notifier = kfd_process_free_notifier,
};
/*
* This code handles the case when driver is being unloaded before all
* mm_struct are released. We need to safely free the kfd_process and
* avoid race conditions with mmu_notifier that might try to free them.
*
*/
void kfd_cleanup_processes(void)
{
struct kfd_process *p;
struct hlist_node *p_temp;
unsigned int temp;
HLIST_HEAD(cleanup_list);
/*
* Move all remaining kfd_process from the process table to a
* temp list for processing. Once done, callback from mmu_notifier
* release will not see the kfd_process in the table and do early return,
* avoiding double free issues.
*/
mutex_lock(&kfd_processes_mutex);
hash_for_each_safe(kfd_processes_table, temp, p_temp, p, kfd_processes) {
hash_del_rcu(&p->kfd_processes);
synchronize_srcu(&kfd_processes_srcu);
hlist_add_head(&p->kfd_processes, &cleanup_list);
}
mutex_unlock(&kfd_processes_mutex);
hlist_for_each_entry_safe(p, p_temp, &cleanup_list, kfd_processes)
kfd_process_notifier_release_internal(p);
/*
* Ensures that all outstanding free_notifier get called, triggering
* the release of the kfd_process struct.
*/
mmu_notifier_synchronize();
}
int kfd_process_init_cwsr_apu(struct kfd_process *p, struct file *filep)
{
unsigned long offset;
int i;
if (p->has_cwsr)
return 0;
for (i = 0; i < p->n_pdds; i++) {
struct kfd_node *dev = p->pdds[i]->dev;
struct qcm_process_device *qpd = &p->pdds[i]->qpd;
if (!dev->kfd->cwsr_enabled || qpd->cwsr_kaddr || qpd->cwsr_base)
continue;
offset = KFD_MMAP_TYPE_RESERVED_MEM | KFD_MMAP_GPU_ID(dev->id);
qpd->tba_addr = (int64_t)vm_mmap(filep, 0,
KFD_CWSR_TBA_TMA_SIZE, PROT_READ | PROT_EXEC,
MAP_SHARED, offset);
if (IS_ERR_VALUE(qpd->tba_addr)) {
int err = qpd->tba_addr;
pr_err("Failure to set tba address. error %d.\n", err);
qpd->tba_addr = 0;
qpd->cwsr_kaddr = NULL;
return err;
}
memcpy(qpd->cwsr_kaddr, dev->kfd->cwsr_isa, dev->kfd->cwsr_isa_size);
kfd_process_set_trap_debug_flag(qpd, p->debug_trap_enabled);
qpd->tma_addr = qpd->tba_addr + KFD_CWSR_TMA_OFFSET;
pr_debug("set tba :0x%llx, tma:0x%llx, cwsr_kaddr:%p for pqm.\n",
qpd->tba_addr, qpd->tma_addr, qpd->cwsr_kaddr);
}
p->has_cwsr = true;
return 0;
}
static int kfd_process_device_init_cwsr_dgpu(struct kfd_process_device *pdd)
{
struct kfd_node *dev = pdd->dev;
struct qcm_process_device *qpd = &pdd->qpd;
uint32_t flags = KFD_IOC_ALLOC_MEM_FLAGS_GTT
| KFD_IOC_ALLOC_MEM_FLAGS_NO_SUBSTITUTE
| KFD_IOC_ALLOC_MEM_FLAGS_EXECUTABLE;
struct kgd_mem *mem;
void *kaddr;
int ret;
if (!dev->kfd->cwsr_enabled || qpd->cwsr_kaddr || !qpd->cwsr_base)
return 0;
/* cwsr_base is only set for dGPU */
ret = kfd_process_alloc_gpuvm(pdd, qpd->cwsr_base,
KFD_CWSR_TBA_TMA_SIZE, flags, &mem, &kaddr);
if (ret)
return ret;
qpd->cwsr_mem = mem;
qpd->cwsr_kaddr = kaddr;
qpd->tba_addr = qpd->cwsr_base;
memcpy(qpd->cwsr_kaddr, dev->kfd->cwsr_isa, dev->kfd->cwsr_isa_size);
kfd_process_set_trap_debug_flag(&pdd->qpd,
pdd->process->debug_trap_enabled);
qpd->tma_addr = qpd->tba_addr + KFD_CWSR_TMA_OFFSET;
pr_debug("set tba :0x%llx, tma:0x%llx, cwsr_kaddr:%p for pqm.\n",
qpd->tba_addr, qpd->tma_addr, qpd->cwsr_kaddr);
return 0;
}
static void kfd_process_device_destroy_cwsr_dgpu(struct kfd_process_device *pdd)
{
struct kfd_node *dev = pdd->dev;
struct qcm_process_device *qpd = &pdd->qpd;
if (!dev->kfd->cwsr_enabled || !qpd->cwsr_kaddr || !qpd->cwsr_base)
return;
kfd_process_free_gpuvm(qpd->cwsr_mem, pdd, &qpd->cwsr_kaddr);
}
void kfd_process_set_trap_handler(struct qcm_process_device *qpd,
uint64_t tba_addr,
uint64_t tma_addr)
{
if (qpd->cwsr_kaddr) {
/* KFD trap handler is bound, record as second-level TBA/TMA
* in first-level TMA. First-level trap will jump to second.
*/
uint64_t *tma =
(uint64_t *)(qpd->cwsr_kaddr + KFD_CWSR_TMA_OFFSET);
tma[0] = tba_addr;
tma[1] = tma_addr;
} else {
/* No trap handler bound, bind as first-level TBA/TMA. */
qpd->tba_addr = tba_addr;
qpd->tma_addr = tma_addr;
}
}
bool kfd_process_xnack_mode(struct kfd_process *p, bool supported)
{
int i;
/* On most GFXv9 GPUs, the retry mode in the SQ must match the
* boot time retry setting. Mixing processes with different
* XNACK/retry settings can hang the GPU.
*
* Different GPUs can have different noretry settings depending
* on HW bugs or limitations. We need to find at least one
* XNACK mode for this process that's compatible with all GPUs.
* Fortunately GPUs with retry enabled (noretry=0) can run code
* built for XNACK-off. On GFXv9 it may perform slower.
*
* Therefore applications built for XNACK-off can always be
* supported and will be our fallback if any GPU does not
* support retry.
*/
for (i = 0; i < p->n_pdds; i++) {
struct kfd_node *dev = p->pdds[i]->dev;
/* Only consider GFXv9 and higher GPUs. Older GPUs don't
* support the SVM APIs and don't need to be considered
* for the XNACK mode selection.
*/
if (!KFD_IS_SOC15(dev))
continue;
/* Aldebaran can always support XNACK because it can support
* per-process XNACK mode selection. But let the dev->noretry
* setting still influence the default XNACK mode.
*/
if (supported && KFD_SUPPORT_XNACK_PER_PROCESS(dev))
continue;
/* GFXv10 and later GPUs do not support shader preemption
* during page faults. This can lead to poor QoS for queue
* management and memory-manager-related preemptions or
* even deadlocks.
*/
if (KFD_GC_VERSION(dev) >= IP_VERSION(10, 1, 1))
return false;
if (dev->kfd->noretry)
return false;
}
return true;
}
void kfd_process_set_trap_debug_flag(struct qcm_process_device *qpd,
bool enabled)
{
if (qpd->cwsr_kaddr) {
uint64_t *tma =
(uint64_t *)(qpd->cwsr_kaddr + KFD_CWSR_TMA_OFFSET);
tma[2] = enabled;
}
}
/*
* On return the kfd_process is fully operational and will be freed when the
* mm is released
*/
static struct kfd_process *create_process(const struct task_struct *thread)
{
struct kfd_process *process;
struct mmu_notifier *mn;
int err = -ENOMEM;
process = kzalloc(sizeof(*process), GFP_KERNEL);
if (!process)
goto err_alloc_process;
kref_init(&process->ref);
mutex_init(&process->mutex);
process->mm = thread->mm;
process->lead_thread = thread->group_leader;
process->n_pdds = 0;
process->queues_paused = false;
INIT_DELAYED_WORK(&process->eviction_work, evict_process_worker);
INIT_DELAYED_WORK(&process->restore_work, restore_process_worker);
process->last_restore_timestamp = get_jiffies_64();
err = kfd_event_init_process(process);
if (err)
goto err_event_init;
process->is_32bit_user_mode = in_compat_syscall();
process->debug_trap_enabled = false;
process->debugger_process = NULL;
process->exception_enable_mask = 0;
atomic_set(&process->debugged_process_count, 0);
sema_init(&process->runtime_enable_sema, 0);
process->pasid = kfd_pasid_alloc();
if (process->pasid == 0) {
err = -ENOSPC;
goto err_alloc_pasid;
}
err = pqm_init(&process->pqm, process);
if (err != 0)
goto err_process_pqm_init;
/* init process apertures*/
err = kfd_init_apertures(process);
if (err != 0)
goto err_init_apertures;
/* Check XNACK support after PDDs are created in kfd_init_apertures */
process->xnack_enabled = kfd_process_xnack_mode(process, false);
err = svm_range_list_init(process);
if (err)
goto err_init_svm_range_list;
/* alloc_notifier needs to find the process in the hash table */
hash_add_rcu(kfd_processes_table, &process->kfd_processes,
(uintptr_t)process->mm);
/* Avoid free_notifier to start kfd_process_wq_release if
* mmu_notifier_get failed because of pending signal.
*/
kref_get(&process->ref);
/* MMU notifier registration must be the last call that can fail
* because after this point we cannot unwind the process creation.
* After this point, mmu_notifier_put will trigger the cleanup by
* dropping the last process reference in the free_notifier.
*/
mn = mmu_notifier_get(&kfd_process_mmu_notifier_ops, process->mm);
if (IS_ERR(mn)) {
err = PTR_ERR(mn);
goto err_register_notifier;
}
BUG_ON(mn != &process->mmu_notifier);
kfd_unref_process(process);
get_task_struct(process->lead_thread);
INIT_WORK(&process->debug_event_workarea, debug_event_write_work_handler);
return process;
err_register_notifier:
hash_del_rcu(&process->kfd_processes);
svm_range_list_fini(process);
err_init_svm_range_list:
kfd_process_free_outstanding_kfd_bos(process);
kfd_process_destroy_pdds(process);
err_init_apertures:
pqm_uninit(&process->pqm);
err_process_pqm_init:
kfd_pasid_free(process->pasid);
err_alloc_pasid:
kfd_event_free_process(process);
err_event_init:
mutex_destroy(&process->mutex);
kfree(process);
err_alloc_process:
return ERR_PTR(err);
}
struct kfd_process_device *kfd_get_process_device_data(struct kfd_node *dev,
struct kfd_process *p)
{
int i;
for (i = 0; i < p->n_pdds; i++)
if (p->pdds[i]->dev == dev)
return p->pdds[i];
return NULL;
}
struct kfd_process_device *kfd_create_process_device_data(struct kfd_node *dev,
struct kfd_process *p)
{
struct kfd_process_device *pdd = NULL;
int retval = 0;
if (WARN_ON_ONCE(p->n_pdds >= MAX_GPU_INSTANCE))
return NULL;
pdd = kzalloc(sizeof(*pdd), GFP_KERNEL);
if (!pdd)
return NULL;
pdd->dev = dev;
INIT_LIST_HEAD(&pdd->qpd.queues_list);
INIT_LIST_HEAD(&pdd->qpd.priv_queue_list);
pdd->qpd.dqm = dev->dqm;
pdd->qpd.pqm = &p->pqm;
pdd->qpd.evicted = 0;
pdd->qpd.mapped_gws_queue = false;
pdd->process = p;
pdd->bound = PDD_UNBOUND;
pdd->already_dequeued = false;
pdd->runtime_inuse = false;
pdd->vram_usage = 0;
pdd->sdma_past_activity_counter = 0;
pdd->user_gpu_id = dev->id;
atomic64_set(&pdd->evict_duration_counter, 0);
if (dev->kfd->shared_resources.enable_mes) {
retval = amdgpu_amdkfd_alloc_gtt_mem(dev->adev,
AMDGPU_MES_PROC_CTX_SIZE,
&pdd->proc_ctx_bo,
&pdd->proc_ctx_gpu_addr,
&pdd->proc_ctx_cpu_ptr,
false);
if (retval) {
pr_err("failed to allocate process context bo\n");
goto err_free_pdd;
}
memset(pdd->proc_ctx_cpu_ptr, 0, AMDGPU_MES_PROC_CTX_SIZE);
}
p->pdds[p->n_pdds++] = pdd;
if (kfd_dbg_is_per_vmid_supported(pdd->dev))
pdd->spi_dbg_override = pdd->dev->kfd2kgd->disable_debug_trap(
pdd->dev->adev,
false,
0);
/* Init idr used for memory handle translation */
idr_init(&pdd->alloc_idr);
return pdd;
err_free_pdd:
kfree(pdd);
return NULL;
}
/**
* kfd_process_device_init_vm - Initialize a VM for a process-device
*
* @pdd: The process-device
* @drm_file: Optional pointer to a DRM file descriptor
*
* If @drm_file is specified, it will be used to acquire the VM from
* that file descriptor. If successful, the @pdd takes ownership of
* the file descriptor.
*
* If @drm_file is NULL, a new VM is created.
*
* Returns 0 on success, -errno on failure.
*/
int kfd_process_device_init_vm(struct kfd_process_device *pdd,
struct file *drm_file)
{
struct amdgpu_fpriv *drv_priv;
struct amdgpu_vm *avm;
struct kfd_process *p;
struct kfd_node *dev;
int ret;
if (!drm_file)
return -EINVAL;
if (pdd->drm_priv)
return -EBUSY;
ret = amdgpu_file_to_fpriv(drm_file, &drv_priv);
if (ret)
return ret;
avm = &drv_priv->vm;
p = pdd->process;
dev = pdd->dev;
ret = amdgpu_amdkfd_gpuvm_acquire_process_vm(dev->adev, avm,
&p->kgd_process_info,
&p->ef);
if (ret) {
pr_err("Failed to create process VM object\n");
return ret;
}
pdd->drm_priv = drm_file->private_data;
atomic64_set(&pdd->tlb_seq, 0);
ret = kfd_process_device_reserve_ib_mem(pdd);
if (ret)
goto err_reserve_ib_mem;
ret = kfd_process_device_init_cwsr_dgpu(pdd);
if (ret)
goto err_init_cwsr;
ret = amdgpu_amdkfd_gpuvm_set_vm_pasid(dev->adev, avm, p->pasid);
if (ret)
goto err_set_pasid;
pdd->drm_file = drm_file;
return 0;
err_set_pasid:
kfd_process_device_destroy_cwsr_dgpu(pdd);
err_init_cwsr:
kfd_process_device_destroy_ib_mem(pdd);
err_reserve_ib_mem:
pdd->drm_priv = NULL;
amdgpu_amdkfd_gpuvm_destroy_cb(dev->adev, avm);
return ret;
}
/*
* Direct the IOMMU to bind the process (specifically the pasid->mm)
* to the device.
* Unbinding occurs when the process dies or the device is removed.
*
* Assumes that the process lock is held.
*/
struct kfd_process_device *kfd_bind_process_to_device(struct kfd_node *dev,
struct kfd_process *p)
{
struct kfd_process_device *pdd;
int err;
pdd = kfd_get_process_device_data(dev, p);
if (!pdd) {
pr_err("Process device data doesn't exist\n");
return ERR_PTR(-ENOMEM);
}
if (!pdd->drm_priv)
return ERR_PTR(-ENODEV);
/*
* signal runtime-pm system to auto resume and prevent
* further runtime suspend once device pdd is created until
* pdd is destroyed.
*/
if (!pdd->runtime_inuse) {
err = pm_runtime_get_sync(adev_to_drm(dev->adev)->dev);
if (err < 0) {
pm_runtime_put_autosuspend(adev_to_drm(dev->adev)->dev);
return ERR_PTR(err);
}
}
/*
* make sure that runtime_usage counter is incremented just once
* per pdd
*/
pdd->runtime_inuse = true;
return pdd;
}
/* Create specific handle mapped to mem from process local memory idr
* Assumes that the process lock is held.
*/
int kfd_process_device_create_obj_handle(struct kfd_process_device *pdd,
void *mem)
{
return idr_alloc(&pdd->alloc_idr, mem, 0, 0, GFP_KERNEL);
}
/* Translate specific handle from process local memory idr
* Assumes that the process lock is held.
*/
void *kfd_process_device_translate_handle(struct kfd_process_device *pdd,
int handle)
{
if (handle < 0)
return NULL;
return idr_find(&pdd->alloc_idr, handle);
}
/* Remove specific handle from process local memory idr
* Assumes that the process lock is held.
*/
void kfd_process_device_remove_obj_handle(struct kfd_process_device *pdd,
int handle)
{
if (handle >= 0)
idr_remove(&pdd->alloc_idr, handle);
}
/* This increments the process->ref counter. */
struct kfd_process *kfd_lookup_process_by_pasid(u32 pasid)
{
struct kfd_process *p, *ret_p = NULL;
unsigned int temp;
int idx = srcu_read_lock(&kfd_processes_srcu);
hash_for_each_rcu(kfd_processes_table, temp, p, kfd_processes) {
if (p->pasid == pasid) {
kref_get(&p->ref);
ret_p = p;
break;
}
}
srcu_read_unlock(&kfd_processes_srcu, idx);
return ret_p;
}
/* This increments the process->ref counter. */
struct kfd_process *kfd_lookup_process_by_mm(const struct mm_struct *mm)
{
struct kfd_process *p;
int idx = srcu_read_lock(&kfd_processes_srcu);
p = find_process_by_mm(mm);
if (p)
kref_get(&p->ref);
srcu_read_unlock(&kfd_processes_srcu, idx);
return p;
}
/* kfd_process_evict_queues - Evict all user queues of a process
*
* Eviction is reference-counted per process-device. This means multiple
* evictions from different sources can be nested safely.
*/
int kfd_process_evict_queues(struct kfd_process *p, uint32_t trigger)
{
int r = 0;
int i;
unsigned int n_evicted = 0;
for (i = 0; i < p->n_pdds; i++) {
struct kfd_process_device *pdd = p->pdds[i];
kfd_smi_event_queue_eviction(pdd->dev, p->lead_thread->pid,
trigger);
r = pdd->dev->dqm->ops.evict_process_queues(pdd->dev->dqm,
&pdd->qpd);
/* evict return -EIO if HWS is hang or asic is resetting, in this case
* we would like to set all the queues to be in evicted state to prevent
* them been add back since they actually not be saved right now.
*/
if (r && r != -EIO) {
pr_err("Failed to evict process queues\n");
goto fail;
}
n_evicted++;
}
return r;
fail:
/* To keep state consistent, roll back partial eviction by
* restoring queues
*/
for (i = 0; i < p->n_pdds; i++) {
struct kfd_process_device *pdd = p->pdds[i];
if (n_evicted == 0)
break;
kfd_smi_event_queue_restore(pdd->dev, p->lead_thread->pid);
if (pdd->dev->dqm->ops.restore_process_queues(pdd->dev->dqm,
&pdd->qpd))
pr_err("Failed to restore queues\n");
n_evicted--;
}
return r;
}
/* kfd_process_restore_queues - Restore all user queues of a process */
int kfd_process_restore_queues(struct kfd_process *p)
{
int r, ret = 0;
int i;
for (i = 0; i < p->n_pdds; i++) {
struct kfd_process_device *pdd = p->pdds[i];
kfd_smi_event_queue_restore(pdd->dev, p->lead_thread->pid);
r = pdd->dev->dqm->ops.restore_process_queues(pdd->dev->dqm,
&pdd->qpd);
if (r) {
pr_err("Failed to restore process queues\n");
if (!ret)
ret = r;
}
}
return ret;
}
int kfd_process_gpuidx_from_gpuid(struct kfd_process *p, uint32_t gpu_id)
{
int i;
for (i = 0; i < p->n_pdds; i++)
if (p->pdds[i] && gpu_id == p->pdds[i]->user_gpu_id)
return i;
return -EINVAL;
}
int
kfd_process_gpuid_from_node(struct kfd_process *p, struct kfd_node *node,
uint32_t *gpuid, uint32_t *gpuidx)
{
int i;
for (i = 0; i < p->n_pdds; i++)
if (p->pdds[i] && p->pdds[i]->dev == node) {
*gpuid = p->pdds[i]->user_gpu_id;
*gpuidx = i;
return 0;
}
return -EINVAL;
}
static void evict_process_worker(struct work_struct *work)
{
int ret;
struct kfd_process *p;
struct delayed_work *dwork;
dwork = to_delayed_work(work);
/* Process termination destroys this worker thread. So during the
* lifetime of this thread, kfd_process p will be valid
*/
p = container_of(dwork, struct kfd_process, eviction_work);
WARN_ONCE(p->last_eviction_seqno != p->ef->seqno,
"Eviction fence mismatch\n");
/* Narrow window of overlap between restore and evict work
* item is possible. Once amdgpu_amdkfd_gpuvm_restore_process_bos
* unreserves KFD BOs, it is possible to evicted again. But
* restore has few more steps of finish. So lets wait for any
* previous restore work to complete
*/
flush_delayed_work(&p->restore_work);
pr_debug("Started evicting pasid 0x%x\n", p->pasid);
ret = kfd_process_evict_queues(p, KFD_QUEUE_EVICTION_TRIGGER_TTM);
if (!ret) {
dma_fence_signal(p->ef);
dma_fence_put(p->ef);
p->ef = NULL;
queue_delayed_work(kfd_restore_wq, &p->restore_work,
msecs_to_jiffies(PROCESS_RESTORE_TIME_MS));
pr_debug("Finished evicting pasid 0x%x\n", p->pasid);
} else
pr_err("Failed to evict queues of pasid 0x%x\n", p->pasid);
}
static void restore_process_worker(struct work_struct *work)
{
struct delayed_work *dwork;
struct kfd_process *p;
int ret = 0;
dwork = to_delayed_work(work);
/* Process termination destroys this worker thread. So during the
* lifetime of this thread, kfd_process p will be valid
*/
p = container_of(dwork, struct kfd_process, restore_work);
pr_debug("Started restoring pasid 0x%x\n", p->pasid);
/* Setting last_restore_timestamp before successful restoration.
* Otherwise this would have to be set by KGD (restore_process_bos)
* before KFD BOs are unreserved. If not, the process can be evicted
* again before the timestamp is set.
* If restore fails, the timestamp will be set again in the next
* attempt. This would mean that the minimum GPU quanta would be
* PROCESS_ACTIVE_TIME_MS - (time to execute the following two
* functions)
*/
p->last_restore_timestamp = get_jiffies_64();
/* VMs may not have been acquired yet during debugging. */
if (p->kgd_process_info)
ret = amdgpu_amdkfd_gpuvm_restore_process_bos(p->kgd_process_info,
&p->ef);
if (ret) {
pr_debug("Failed to restore BOs of pasid 0x%x, retry after %d ms\n",
p->pasid, PROCESS_BACK_OFF_TIME_MS);
ret = queue_delayed_work(kfd_restore_wq, &p->restore_work,
msecs_to_jiffies(PROCESS_BACK_OFF_TIME_MS));
WARN(!ret, "reschedule restore work failed\n");
return;
}
ret = kfd_process_restore_queues(p);
if (!ret)
pr_debug("Finished restoring pasid 0x%x\n", p->pasid);
else
pr_err("Failed to restore queues of pasid 0x%x\n", p->pasid);
}
void kfd_suspend_all_processes(void)
{
struct kfd_process *p;
unsigned int temp;
int idx = srcu_read_lock(&kfd_processes_srcu);
WARN(debug_evictions, "Evicting all processes");
hash_for_each_rcu(kfd_processes_table, temp, p, kfd_processes) {
cancel_delayed_work_sync(&p->eviction_work);
flush_delayed_work(&p->restore_work);
if (kfd_process_evict_queues(p, KFD_QUEUE_EVICTION_TRIGGER_SUSPEND))
pr_err("Failed to suspend process 0x%x\n", p->pasid);
dma_fence_signal(p->ef);
dma_fence_put(p->ef);
p->ef = NULL;
}
srcu_read_unlock(&kfd_processes_srcu, idx);
}
int kfd_resume_all_processes(void)
{
struct kfd_process *p;
unsigned int temp;
int ret = 0, idx = srcu_read_lock(&kfd_processes_srcu);
hash_for_each_rcu(kfd_processes_table, temp, p, kfd_processes) {
if (!queue_delayed_work(kfd_restore_wq, &p->restore_work, 0)) {
pr_err("Restore process %d failed during resume\n",
p->pasid);
ret = -EFAULT;
}
}
srcu_read_unlock(&kfd_processes_srcu, idx);
return ret;
}
int kfd_reserved_mem_mmap(struct kfd_node *dev, struct kfd_process *process,
struct vm_area_struct *vma)
{
struct kfd_process_device *pdd;
struct qcm_process_device *qpd;
if ((vma->vm_end - vma->vm_start) != KFD_CWSR_TBA_TMA_SIZE) {
pr_err("Incorrect CWSR mapping size.\n");
return -EINVAL;
}
pdd = kfd_get_process_device_data(dev, process);
if (!pdd)
return -EINVAL;
qpd = &pdd->qpd;
qpd->cwsr_kaddr = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
get_order(KFD_CWSR_TBA_TMA_SIZE));
if (!qpd->cwsr_kaddr) {
pr_err("Error allocating per process CWSR buffer.\n");
return -ENOMEM;
}
vm_flags_set(vma, VM_IO | VM_DONTCOPY | VM_DONTEXPAND
| VM_NORESERVE | VM_DONTDUMP | VM_PFNMAP);
/* Mapping pages to user process */
return remap_pfn_range(vma, vma->vm_start,
PFN_DOWN(__pa(qpd->cwsr_kaddr)),
KFD_CWSR_TBA_TMA_SIZE, vma->vm_page_prot);
}
void kfd_flush_tlb(struct kfd_process_device *pdd, enum TLB_FLUSH_TYPE type)
{
struct amdgpu_vm *vm = drm_priv_to_vm(pdd->drm_priv);
uint64_t tlb_seq = amdgpu_vm_tlb_seq(vm);
struct kfd_node *dev = pdd->dev;
uint32_t xcc_mask = dev->xcc_mask;
int xcc = 0;
/*
* It can be that we race and lose here, but that is extremely unlikely
* and the worst thing which could happen is that we flush the changes
* into the TLB once more which is harmless.
*/
if (atomic64_xchg(&pdd->tlb_seq, tlb_seq) == tlb_seq)
return;
if (dev->dqm->sched_policy == KFD_SCHED_POLICY_NO_HWS) {
/* Nothing to flush until a VMID is assigned, which
* only happens when the first queue is created.
*/
if (pdd->qpd.vmid)
amdgpu_amdkfd_flush_gpu_tlb_vmid(dev->adev,
pdd->qpd.vmid);
} else {
for_each_inst(xcc, xcc_mask)
amdgpu_amdkfd_flush_gpu_tlb_pasid(
dev->adev, pdd->process->pasid, type, xcc);
}
}
/* assumes caller holds process lock. */
int kfd_process_drain_interrupts(struct kfd_process_device *pdd)
{
uint32_t irq_drain_fence[8];
uint8_t node_id = 0;
int r = 0;
if (!KFD_IS_SOC15(pdd->dev))
return 0;
pdd->process->irq_drain_is_open = true;
memset(irq_drain_fence, 0, sizeof(irq_drain_fence));
irq_drain_fence[0] = (KFD_IRQ_FENCE_SOURCEID << 8) |
KFD_IRQ_FENCE_CLIENTID;
irq_drain_fence[3] = pdd->process->pasid;
/*
* For GFX 9.4.3, send the NodeId also in IH cookie DW[3]
*/
if (KFD_GC_VERSION(pdd->dev->kfd) == IP_VERSION(9, 4, 3)) {
node_id = ffs(pdd->dev->interrupt_bitmap) - 1;
irq_drain_fence[3] |= node_id << 16;
}
/* ensure stale irqs scheduled KFD interrupts and send drain fence. */
if (amdgpu_amdkfd_send_close_event_drain_irq(pdd->dev->adev,
irq_drain_fence)) {
pdd->process->irq_drain_is_open = false;
return 0;
}
r = wait_event_interruptible(pdd->process->wait_irq_drain,
!READ_ONCE(pdd->process->irq_drain_is_open));
if (r)
pdd->process->irq_drain_is_open = false;
return r;
}
void kfd_process_close_interrupt_drain(unsigned int pasid)
{
struct kfd_process *p;
p = kfd_lookup_process_by_pasid(pasid);
if (!p)
return;
WRITE_ONCE(p->irq_drain_is_open, false);
wake_up_all(&p->wait_irq_drain);
kfd_unref_process(p);
}
struct send_exception_work_handler_workarea {
struct work_struct work;
struct kfd_process *p;
unsigned int queue_id;
uint64_t error_reason;
};
static void send_exception_work_handler(struct work_struct *work)
{
struct send_exception_work_handler_workarea *workarea;
struct kfd_process *p;
struct queue *q;
struct mm_struct *mm;
struct kfd_context_save_area_header __user *csa_header;
uint64_t __user *err_payload_ptr;
uint64_t cur_err;
uint32_t ev_id;
workarea = container_of(work,
struct send_exception_work_handler_workarea,
work);
p = workarea->p;
mm = get_task_mm(p->lead_thread);
if (!mm)
return;
kthread_use_mm(mm);
q = pqm_get_user_queue(&p->pqm, workarea->queue_id);
if (!q)
goto out;
csa_header = (void __user *)q->properties.ctx_save_restore_area_address;
get_user(err_payload_ptr, (uint64_t __user **)&csa_header->err_payload_addr);
get_user(cur_err, err_payload_ptr);
cur_err |= workarea->error_reason;
put_user(cur_err, err_payload_ptr);
get_user(ev_id, &csa_header->err_event_id);
kfd_set_event(p, ev_id);
out:
kthread_unuse_mm(mm);
mmput(mm);
}
int kfd_send_exception_to_runtime(struct kfd_process *p,
unsigned int queue_id,
uint64_t error_reason)
{
struct send_exception_work_handler_workarea worker;
INIT_WORK_ONSTACK(&worker.work, send_exception_work_handler);
worker.p = p;
worker.queue_id = queue_id;
worker.error_reason = error_reason;
schedule_work(&worker.work);
flush_work(&worker.work);
destroy_work_on_stack(&worker.work);
return 0;
}
struct kfd_process_device *kfd_process_device_data_by_id(struct kfd_process *p, uint32_t gpu_id)
{
int i;
if (gpu_id) {
for (i = 0; i < p->n_pdds; i++) {
struct kfd_process_device *pdd = p->pdds[i];
if (pdd->user_gpu_id == gpu_id)
return pdd;
}
}
return NULL;
}
int kfd_process_get_user_gpu_id(struct kfd_process *p, uint32_t actual_gpu_id)
{
int i;
if (!actual_gpu_id)
return 0;
for (i = 0; i < p->n_pdds; i++) {
struct kfd_process_device *pdd = p->pdds[i];
if (pdd->dev->id == actual_gpu_id)
return pdd->user_gpu_id;
}
return -EINVAL;
}
#if defined(CONFIG_DEBUG_FS)
int kfd_debugfs_mqds_by_process(struct seq_file *m, void *data)
{
struct kfd_process *p;
unsigned int temp;
int r = 0;
int idx = srcu_read_lock(&kfd_processes_srcu);
hash_for_each_rcu(kfd_processes_table, temp, p, kfd_processes) {
seq_printf(m, "Process %d PASID 0x%x:\n",
p->lead_thread->tgid, p->pasid);
mutex_lock(&p->mutex);
r = pqm_debugfs_mqds(m, &p->pqm);
mutex_unlock(&p->mutex);
if (r)
break;
}
srcu_read_unlock(&kfd_processes_srcu, idx);
return r;
}
#endif
| linux-master | drivers/gpu/drm/amd/amdkfd/kfd_process.c |
// SPDX-License-Identifier: GPL-2.0 OR MIT
/*
* Copyright 2020-2022 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
#include <linux/poll.h>
#include <linux/wait.h>
#include <linux/anon_inodes.h>
#include <uapi/linux/kfd_ioctl.h>
#include "amdgpu.h"
#include "amdgpu_vm.h"
#include "kfd_priv.h"
#include "kfd_smi_events.h"
struct kfd_smi_client {
struct list_head list;
struct kfifo fifo;
wait_queue_head_t wait_queue;
/* events enabled */
uint64_t events;
struct kfd_node *dev;
spinlock_t lock;
struct rcu_head rcu;
pid_t pid;
bool suser;
};
#define MAX_KFIFO_SIZE 1024
static __poll_t kfd_smi_ev_poll(struct file *, struct poll_table_struct *);
static ssize_t kfd_smi_ev_read(struct file *, char __user *, size_t, loff_t *);
static ssize_t kfd_smi_ev_write(struct file *, const char __user *, size_t,
loff_t *);
static int kfd_smi_ev_release(struct inode *, struct file *);
static const char kfd_smi_name[] = "kfd_smi_ev";
static const struct file_operations kfd_smi_ev_fops = {
.owner = THIS_MODULE,
.poll = kfd_smi_ev_poll,
.read = kfd_smi_ev_read,
.write = kfd_smi_ev_write,
.release = kfd_smi_ev_release
};
static __poll_t kfd_smi_ev_poll(struct file *filep,
struct poll_table_struct *wait)
{
struct kfd_smi_client *client = filep->private_data;
__poll_t mask = 0;
poll_wait(filep, &client->wait_queue, wait);
spin_lock(&client->lock);
if (!kfifo_is_empty(&client->fifo))
mask = EPOLLIN | EPOLLRDNORM;
spin_unlock(&client->lock);
return mask;
}
static ssize_t kfd_smi_ev_read(struct file *filep, char __user *user,
size_t size, loff_t *offset)
{
int ret;
size_t to_copy;
struct kfd_smi_client *client = filep->private_data;
unsigned char *buf;
size = min_t(size_t, size, MAX_KFIFO_SIZE);
buf = kmalloc(size, GFP_KERNEL);
if (!buf)
return -ENOMEM;
/* kfifo_to_user can sleep so we can't use spinlock protection around
* it. Instead, we kfifo out as spinlocked then copy them to the user.
*/
spin_lock(&client->lock);
to_copy = kfifo_len(&client->fifo);
if (!to_copy) {
spin_unlock(&client->lock);
ret = -EAGAIN;
goto ret_err;
}
to_copy = min(size, to_copy);
ret = kfifo_out(&client->fifo, buf, to_copy);
spin_unlock(&client->lock);
if (ret <= 0) {
ret = -EAGAIN;
goto ret_err;
}
ret = copy_to_user(user, buf, to_copy);
if (ret) {
ret = -EFAULT;
goto ret_err;
}
kfree(buf);
return to_copy;
ret_err:
kfree(buf);
return ret;
}
static ssize_t kfd_smi_ev_write(struct file *filep, const char __user *user,
size_t size, loff_t *offset)
{
struct kfd_smi_client *client = filep->private_data;
uint64_t events;
if (!access_ok(user, size) || size < sizeof(events))
return -EFAULT;
if (copy_from_user(&events, user, sizeof(events)))
return -EFAULT;
WRITE_ONCE(client->events, events);
return sizeof(events);
}
static void kfd_smi_ev_client_free(struct rcu_head *p)
{
struct kfd_smi_client *ev = container_of(p, struct kfd_smi_client, rcu);
kfifo_free(&ev->fifo);
kfree(ev);
}
static int kfd_smi_ev_release(struct inode *inode, struct file *filep)
{
struct kfd_smi_client *client = filep->private_data;
struct kfd_node *dev = client->dev;
spin_lock(&dev->smi_lock);
list_del_rcu(&client->list);
spin_unlock(&dev->smi_lock);
call_rcu(&client->rcu, kfd_smi_ev_client_free);
return 0;
}
static bool kfd_smi_ev_enabled(pid_t pid, struct kfd_smi_client *client,
unsigned int event)
{
uint64_t all = KFD_SMI_EVENT_MASK_FROM_INDEX(KFD_SMI_EVENT_ALL_PROCESS);
uint64_t events = READ_ONCE(client->events);
if (pid && client->pid != pid && !(client->suser && (events & all)))
return false;
return events & KFD_SMI_EVENT_MASK_FROM_INDEX(event);
}
static void add_event_to_kfifo(pid_t pid, struct kfd_node *dev,
unsigned int smi_event, char *event_msg, int len)
{
struct kfd_smi_client *client;
rcu_read_lock();
list_for_each_entry_rcu(client, &dev->smi_clients, list) {
if (!kfd_smi_ev_enabled(pid, client, smi_event))
continue;
spin_lock(&client->lock);
if (kfifo_avail(&client->fifo) >= len) {
kfifo_in(&client->fifo, event_msg, len);
wake_up_all(&client->wait_queue);
} else {
pr_debug("smi_event(EventID: %u): no space left\n",
smi_event);
}
spin_unlock(&client->lock);
}
rcu_read_unlock();
}
__printf(4, 5)
static void kfd_smi_event_add(pid_t pid, struct kfd_node *dev,
unsigned int event, char *fmt, ...)
{
char fifo_in[KFD_SMI_EVENT_MSG_SIZE];
int len;
va_list args;
if (list_empty(&dev->smi_clients))
return;
len = snprintf(fifo_in, sizeof(fifo_in), "%x ", event);
va_start(args, fmt);
len += vsnprintf(fifo_in + len, sizeof(fifo_in) - len, fmt, args);
va_end(args);
add_event_to_kfifo(pid, dev, event, fifo_in, len);
}
void kfd_smi_event_update_gpu_reset(struct kfd_node *dev, bool post_reset)
{
unsigned int event;
if (post_reset) {
event = KFD_SMI_EVENT_GPU_POST_RESET;
} else {
event = KFD_SMI_EVENT_GPU_PRE_RESET;
++(dev->reset_seq_num);
}
kfd_smi_event_add(0, dev, event, "%x\n", dev->reset_seq_num);
}
void kfd_smi_event_update_thermal_throttling(struct kfd_node *dev,
uint64_t throttle_bitmask)
{
kfd_smi_event_add(0, dev, KFD_SMI_EVENT_THERMAL_THROTTLE, "%llx:%llx\n",
throttle_bitmask,
amdgpu_dpm_get_thermal_throttling_counter(dev->adev));
}
void kfd_smi_event_update_vmfault(struct kfd_node *dev, uint16_t pasid)
{
struct amdgpu_task_info task_info;
memset(&task_info, 0, sizeof(struct amdgpu_task_info));
amdgpu_vm_get_task_info(dev->adev, pasid, &task_info);
/* Report VM faults from user applications, not retry from kernel */
if (!task_info.pid)
return;
kfd_smi_event_add(0, dev, KFD_SMI_EVENT_VMFAULT, "%x:%s\n",
task_info.pid, task_info.task_name);
}
void kfd_smi_event_page_fault_start(struct kfd_node *node, pid_t pid,
unsigned long address, bool write_fault,
ktime_t ts)
{
kfd_smi_event_add(pid, node, KFD_SMI_EVENT_PAGE_FAULT_START,
"%lld -%d @%lx(%x) %c\n", ktime_to_ns(ts), pid,
address, node->id, write_fault ? 'W' : 'R');
}
void kfd_smi_event_page_fault_end(struct kfd_node *node, pid_t pid,
unsigned long address, bool migration)
{
kfd_smi_event_add(pid, node, KFD_SMI_EVENT_PAGE_FAULT_END,
"%lld -%d @%lx(%x) %c\n", ktime_get_boottime_ns(),
pid, address, node->id, migration ? 'M' : 'U');
}
void kfd_smi_event_migration_start(struct kfd_node *node, pid_t pid,
unsigned long start, unsigned long end,
uint32_t from, uint32_t to,
uint32_t prefetch_loc, uint32_t preferred_loc,
uint32_t trigger)
{
kfd_smi_event_add(pid, node, KFD_SMI_EVENT_MIGRATE_START,
"%lld -%d @%lx(%lx) %x->%x %x:%x %d\n",
ktime_get_boottime_ns(), pid, start, end - start,
from, to, prefetch_loc, preferred_loc, trigger);
}
void kfd_smi_event_migration_end(struct kfd_node *node, pid_t pid,
unsigned long start, unsigned long end,
uint32_t from, uint32_t to, uint32_t trigger)
{
kfd_smi_event_add(pid, node, KFD_SMI_EVENT_MIGRATE_END,
"%lld -%d @%lx(%lx) %x->%x %d\n",
ktime_get_boottime_ns(), pid, start, end - start,
from, to, trigger);
}
void kfd_smi_event_queue_eviction(struct kfd_node *node, pid_t pid,
uint32_t trigger)
{
kfd_smi_event_add(pid, node, KFD_SMI_EVENT_QUEUE_EVICTION,
"%lld -%d %x %d\n", ktime_get_boottime_ns(), pid,
node->id, trigger);
}
void kfd_smi_event_queue_restore(struct kfd_node *node, pid_t pid)
{
kfd_smi_event_add(pid, node, KFD_SMI_EVENT_QUEUE_RESTORE,
"%lld -%d %x\n", ktime_get_boottime_ns(), pid,
node->id);
}
void kfd_smi_event_queue_restore_rescheduled(struct mm_struct *mm)
{
struct kfd_process *p;
int i;
p = kfd_lookup_process_by_mm(mm);
if (!p)
return;
for (i = 0; i < p->n_pdds; i++) {
struct kfd_process_device *pdd = p->pdds[i];
kfd_smi_event_add(p->lead_thread->pid, pdd->dev,
KFD_SMI_EVENT_QUEUE_RESTORE,
"%lld -%d %x %c\n", ktime_get_boottime_ns(),
p->lead_thread->pid, pdd->dev->id, 'R');
}
kfd_unref_process(p);
}
void kfd_smi_event_unmap_from_gpu(struct kfd_node *node, pid_t pid,
unsigned long address, unsigned long last,
uint32_t trigger)
{
kfd_smi_event_add(pid, node, KFD_SMI_EVENT_UNMAP_FROM_GPU,
"%lld -%d @%lx(%lx) %x %d\n", ktime_get_boottime_ns(),
pid, address, last - address + 1, node->id, trigger);
}
int kfd_smi_event_open(struct kfd_node *dev, uint32_t *fd)
{
struct kfd_smi_client *client;
int ret;
client = kzalloc(sizeof(struct kfd_smi_client), GFP_KERNEL);
if (!client)
return -ENOMEM;
INIT_LIST_HEAD(&client->list);
ret = kfifo_alloc(&client->fifo, MAX_KFIFO_SIZE, GFP_KERNEL);
if (ret) {
kfree(client);
return ret;
}
init_waitqueue_head(&client->wait_queue);
spin_lock_init(&client->lock);
client->events = 0;
client->dev = dev;
client->pid = current->tgid;
client->suser = capable(CAP_SYS_ADMIN);
spin_lock(&dev->smi_lock);
list_add_rcu(&client->list, &dev->smi_clients);
spin_unlock(&dev->smi_lock);
ret = anon_inode_getfd(kfd_smi_name, &kfd_smi_ev_fops, (void *)client,
O_RDWR);
if (ret < 0) {
spin_lock(&dev->smi_lock);
list_del_rcu(&client->list);
spin_unlock(&dev->smi_lock);
synchronize_rcu();
kfifo_free(&client->fifo);
kfree(client);
return ret;
}
*fd = ret;
return 0;
}
| linux-master | drivers/gpu/drm/amd/amdkfd/kfd_smi_events.c |
// SPDX-License-Identifier: GPL-2.0 OR MIT
/*
* Copyright 2016-2022 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
*/
#include <linux/printk.h>
#include <linux/slab.h>
#include <linux/uaccess.h>
#include "kfd_priv.h"
#include "kfd_mqd_manager.h"
#include "v9_structs.h"
#include "gc/gc_9_0_offset.h"
#include "gc/gc_9_0_sh_mask.h"
#include "sdma0/sdma0_4_0_sh_mask.h"
#include "amdgpu_amdkfd.h"
#include "kfd_device_queue_manager.h"
static void update_mqd(struct mqd_manager *mm, void *mqd,
struct queue_properties *q,
struct mqd_update_info *minfo);
static uint64_t mqd_stride_v9(struct mqd_manager *mm,
struct queue_properties *q)
{
if (mm->dev->kfd->cwsr_enabled &&
q->type == KFD_QUEUE_TYPE_COMPUTE)
return ALIGN(q->ctl_stack_size, PAGE_SIZE) +
ALIGN(sizeof(struct v9_mqd), PAGE_SIZE);
return mm->mqd_size;
}
static inline struct v9_mqd *get_mqd(void *mqd)
{
return (struct v9_mqd *)mqd;
}
static inline struct v9_sdma_mqd *get_sdma_mqd(void *mqd)
{
return (struct v9_sdma_mqd *)mqd;
}
static void update_cu_mask(struct mqd_manager *mm, void *mqd,
struct mqd_update_info *minfo, uint32_t inst)
{
struct v9_mqd *m;
uint32_t se_mask[KFD_MAX_NUM_SE] = {0};
if (!minfo || !minfo->cu_mask.ptr)
return;
mqd_symmetrically_map_cu_mask(mm,
minfo->cu_mask.ptr, minfo->cu_mask.count, se_mask, inst);
m = get_mqd(mqd);
m->compute_static_thread_mgmt_se0 = se_mask[0];
m->compute_static_thread_mgmt_se1 = se_mask[1];
m->compute_static_thread_mgmt_se2 = se_mask[2];
m->compute_static_thread_mgmt_se3 = se_mask[3];
if (KFD_GC_VERSION(mm->dev) != IP_VERSION(9, 4, 3)) {
m->compute_static_thread_mgmt_se4 = se_mask[4];
m->compute_static_thread_mgmt_se5 = se_mask[5];
m->compute_static_thread_mgmt_se6 = se_mask[6];
m->compute_static_thread_mgmt_se7 = se_mask[7];
pr_debug("update cu mask to %#x %#x %#x %#x %#x %#x %#x %#x\n",
m->compute_static_thread_mgmt_se0,
m->compute_static_thread_mgmt_se1,
m->compute_static_thread_mgmt_se2,
m->compute_static_thread_mgmt_se3,
m->compute_static_thread_mgmt_se4,
m->compute_static_thread_mgmt_se5,
m->compute_static_thread_mgmt_se6,
m->compute_static_thread_mgmt_se7);
} else {
pr_debug("inst: %u, update cu mask to %#x %#x %#x %#x\n",
inst, m->compute_static_thread_mgmt_se0,
m->compute_static_thread_mgmt_se1,
m->compute_static_thread_mgmt_se2,
m->compute_static_thread_mgmt_se3);
}
}
static void set_priority(struct v9_mqd *m, struct queue_properties *q)
{
m->cp_hqd_pipe_priority = pipe_priority_map[q->priority];
m->cp_hqd_queue_priority = q->priority;
}
static struct kfd_mem_obj *allocate_mqd(struct kfd_node *node,
struct queue_properties *q)
{
int retval;
struct kfd_mem_obj *mqd_mem_obj = NULL;
/* For V9 only, due to a HW bug, the control stack of a user mode
* compute queue needs to be allocated just behind the page boundary
* of its regular MQD buffer. So we allocate an enlarged MQD buffer:
* the first page of the buffer serves as the regular MQD buffer
* purpose and the remaining is for control stack. Although the two
* parts are in the same buffer object, they need different memory
* types: MQD part needs UC (uncached) as usual, while control stack
* needs NC (non coherent), which is different from the UC type which
* is used when control stack is allocated in user space.
*
* Because of all those, we use the gtt allocation function instead
* of sub-allocation function for this enlarged MQD buffer. Moreover,
* in order to achieve two memory types in a single buffer object, we
* pass a special bo flag AMDGPU_GEM_CREATE_CP_MQD_GFX9 to instruct
* amdgpu memory functions to do so.
*/
if (node->kfd->cwsr_enabled && (q->type == KFD_QUEUE_TYPE_COMPUTE)) {
mqd_mem_obj = kzalloc(sizeof(struct kfd_mem_obj), GFP_KERNEL);
if (!mqd_mem_obj)
return NULL;
retval = amdgpu_amdkfd_alloc_gtt_mem(node->adev,
(ALIGN(q->ctl_stack_size, PAGE_SIZE) +
ALIGN(sizeof(struct v9_mqd), PAGE_SIZE)) *
NUM_XCC(node->xcc_mask),
&(mqd_mem_obj->gtt_mem),
&(mqd_mem_obj->gpu_addr),
(void *)&(mqd_mem_obj->cpu_ptr), true);
if (retval) {
kfree(mqd_mem_obj);
return NULL;
}
} else {
retval = kfd_gtt_sa_allocate(node, sizeof(struct v9_mqd),
&mqd_mem_obj);
if (retval)
return NULL;
}
return mqd_mem_obj;
}
static void init_mqd(struct mqd_manager *mm, void **mqd,
struct kfd_mem_obj *mqd_mem_obj, uint64_t *gart_addr,
struct queue_properties *q)
{
uint64_t addr;
struct v9_mqd *m;
m = (struct v9_mqd *) mqd_mem_obj->cpu_ptr;
addr = mqd_mem_obj->gpu_addr;
memset(m, 0, sizeof(struct v9_mqd));
m->header = 0xC0310800;
m->compute_pipelinestat_enable = 1;
m->compute_static_thread_mgmt_se0 = 0xFFFFFFFF;
m->compute_static_thread_mgmt_se1 = 0xFFFFFFFF;
m->compute_static_thread_mgmt_se2 = 0xFFFFFFFF;
m->compute_static_thread_mgmt_se3 = 0xFFFFFFFF;
m->compute_static_thread_mgmt_se4 = 0xFFFFFFFF;
m->compute_static_thread_mgmt_se5 = 0xFFFFFFFF;
m->compute_static_thread_mgmt_se6 = 0xFFFFFFFF;
m->compute_static_thread_mgmt_se7 = 0xFFFFFFFF;
m->cp_hqd_persistent_state = CP_HQD_PERSISTENT_STATE__PRELOAD_REQ_MASK |
0x53 << CP_HQD_PERSISTENT_STATE__PRELOAD_SIZE__SHIFT;
m->cp_mqd_control = 1 << CP_MQD_CONTROL__PRIV_STATE__SHIFT;
m->cp_mqd_base_addr_lo = lower_32_bits(addr);
m->cp_mqd_base_addr_hi = upper_32_bits(addr);
m->cp_hqd_quantum = 1 << CP_HQD_QUANTUM__QUANTUM_EN__SHIFT |
1 << CP_HQD_QUANTUM__QUANTUM_SCALE__SHIFT |
1 << CP_HQD_QUANTUM__QUANTUM_DURATION__SHIFT;
/* Set cp_hqd_hq_scheduler0 bit 14 to 1 to have the CP set up the
* DISPATCH_PTR. This is required for the kfd debugger
*/
m->cp_hqd_hq_status0 = 1 << 14;
if (q->format == KFD_QUEUE_FORMAT_AQL)
m->cp_hqd_aql_control =
1 << CP_HQD_AQL_CONTROL__CONTROL0__SHIFT;
if (q->tba_addr) {
m->compute_pgm_rsrc2 |=
(1 << COMPUTE_PGM_RSRC2__TRAP_PRESENT__SHIFT);
}
if (mm->dev->kfd->cwsr_enabled && q->ctx_save_restore_area_address) {
m->cp_hqd_persistent_state |=
(1 << CP_HQD_PERSISTENT_STATE__QSWITCH_MODE__SHIFT);
m->cp_hqd_ctx_save_base_addr_lo =
lower_32_bits(q->ctx_save_restore_area_address);
m->cp_hqd_ctx_save_base_addr_hi =
upper_32_bits(q->ctx_save_restore_area_address);
m->cp_hqd_ctx_save_size = q->ctx_save_restore_area_size;
m->cp_hqd_cntl_stack_size = q->ctl_stack_size;
m->cp_hqd_cntl_stack_offset = q->ctl_stack_size;
m->cp_hqd_wg_state_offset = q->ctl_stack_size;
}
*mqd = m;
if (gart_addr)
*gart_addr = addr;
update_mqd(mm, m, q, NULL);
}
static int load_mqd(struct mqd_manager *mm, void *mqd,
uint32_t pipe_id, uint32_t queue_id,
struct queue_properties *p, struct mm_struct *mms)
{
/* AQL write pointer counts in 64B packets, PM4/CP counts in dwords. */
uint32_t wptr_shift = (p->format == KFD_QUEUE_FORMAT_AQL ? 4 : 0);
return mm->dev->kfd2kgd->hqd_load(mm->dev->adev, mqd, pipe_id, queue_id,
(uint32_t __user *)p->write_ptr,
wptr_shift, 0, mms, 0);
}
static void update_mqd(struct mqd_manager *mm, void *mqd,
struct queue_properties *q,
struct mqd_update_info *minfo)
{
struct v9_mqd *m;
m = get_mqd(mqd);
m->cp_hqd_pq_control = 5 << CP_HQD_PQ_CONTROL__RPTR_BLOCK_SIZE__SHIFT;
m->cp_hqd_pq_control |= order_base_2(q->queue_size / 4) - 1;
pr_debug("cp_hqd_pq_control 0x%x\n", m->cp_hqd_pq_control);
m->cp_hqd_pq_base_lo = lower_32_bits((uint64_t)q->queue_address >> 8);
m->cp_hqd_pq_base_hi = upper_32_bits((uint64_t)q->queue_address >> 8);
m->cp_hqd_pq_rptr_report_addr_lo = lower_32_bits((uint64_t)q->read_ptr);
m->cp_hqd_pq_rptr_report_addr_hi = upper_32_bits((uint64_t)q->read_ptr);
m->cp_hqd_pq_wptr_poll_addr_lo = lower_32_bits((uint64_t)q->write_ptr);
m->cp_hqd_pq_wptr_poll_addr_hi = upper_32_bits((uint64_t)q->write_ptr);
m->cp_hqd_pq_doorbell_control =
q->doorbell_off <<
CP_HQD_PQ_DOORBELL_CONTROL__DOORBELL_OFFSET__SHIFT;
pr_debug("cp_hqd_pq_doorbell_control 0x%x\n",
m->cp_hqd_pq_doorbell_control);
m->cp_hqd_ib_control =
3 << CP_HQD_IB_CONTROL__MIN_IB_AVAIL_SIZE__SHIFT |
1 << CP_HQD_IB_CONTROL__IB_EXE_DISABLE__SHIFT;
/*
* HW does not clamp this field correctly. Maximum EOP queue size
* is constrained by per-SE EOP done signal count, which is 8-bit.
* Limit is 0xFF EOP entries (= 0x7F8 dwords). CP will not submit
* more than (EOP entry count - 1) so a queue size of 0x800 dwords
* is safe, giving a maximum field value of 0xA.
*
* Also, do calculation only if EOP is used (size > 0), otherwise
* the order_base_2 calculation provides incorrect result.
*
*/
m->cp_hqd_eop_control = q->eop_ring_buffer_size ?
min(0xA, order_base_2(q->eop_ring_buffer_size / 4) - 1) : 0;
m->cp_hqd_eop_base_addr_lo =
lower_32_bits(q->eop_ring_buffer_address >> 8);
m->cp_hqd_eop_base_addr_hi =
upper_32_bits(q->eop_ring_buffer_address >> 8);
m->cp_hqd_iq_timer = 0;
m->cp_hqd_vmid = q->vmid;
if (q->format == KFD_QUEUE_FORMAT_AQL) {
m->cp_hqd_pq_control |= CP_HQD_PQ_CONTROL__NO_UPDATE_RPTR_MASK |
2 << CP_HQD_PQ_CONTROL__SLOT_BASED_WPTR__SHIFT |
1 << CP_HQD_PQ_CONTROL__QUEUE_FULL_EN__SHIFT |
1 << CP_HQD_PQ_CONTROL__WPP_CLAMP_EN__SHIFT;
m->cp_hqd_pq_doorbell_control |= 1 <<
CP_HQD_PQ_DOORBELL_CONTROL__DOORBELL_BIF_DROP__SHIFT;
}
if (mm->dev->kfd->cwsr_enabled && q->ctx_save_restore_area_address)
m->cp_hqd_ctx_save_control = 0;
if (KFD_GC_VERSION(mm->dev) != IP_VERSION(9, 4, 3))
update_cu_mask(mm, mqd, minfo, 0);
set_priority(m, q);
q->is_active = QUEUE_IS_ACTIVE(*q);
}
static uint32_t read_doorbell_id(void *mqd)
{
struct v9_mqd *m = (struct v9_mqd *)mqd;
return m->queue_doorbell_id0;
}
static int get_wave_state(struct mqd_manager *mm, void *mqd,
struct queue_properties *q,
void __user *ctl_stack,
u32 *ctl_stack_used_size,
u32 *save_area_used_size)
{
struct v9_mqd *m;
struct kfd_context_save_area_header header;
/* Control stack is located one page after MQD. */
void *mqd_ctl_stack = (void *)((uintptr_t)mqd + PAGE_SIZE);
m = get_mqd(mqd);
*ctl_stack_used_size = m->cp_hqd_cntl_stack_size -
m->cp_hqd_cntl_stack_offset;
*save_area_used_size = m->cp_hqd_wg_state_offset -
m->cp_hqd_cntl_stack_size;
header.wave_state.control_stack_size = *ctl_stack_used_size;
header.wave_state.wave_state_size = *save_area_used_size;
header.wave_state.wave_state_offset = m->cp_hqd_wg_state_offset;
header.wave_state.control_stack_offset = m->cp_hqd_cntl_stack_offset;
if (copy_to_user(ctl_stack, &header, sizeof(header.wave_state)))
return -EFAULT;
if (copy_to_user(ctl_stack + m->cp_hqd_cntl_stack_offset,
mqd_ctl_stack + m->cp_hqd_cntl_stack_offset,
*ctl_stack_used_size))
return -EFAULT;
return 0;
}
static void get_checkpoint_info(struct mqd_manager *mm, void *mqd, u32 *ctl_stack_size)
{
struct v9_mqd *m = get_mqd(mqd);
*ctl_stack_size = m->cp_hqd_cntl_stack_size;
}
static void checkpoint_mqd(struct mqd_manager *mm, void *mqd, void *mqd_dst, void *ctl_stack_dst)
{
struct v9_mqd *m;
/* Control stack is located one page after MQD. */
void *ctl_stack = (void *)((uintptr_t)mqd + PAGE_SIZE);
m = get_mqd(mqd);
memcpy(mqd_dst, m, sizeof(struct v9_mqd));
memcpy(ctl_stack_dst, ctl_stack, m->cp_hqd_cntl_stack_size);
}
static void restore_mqd(struct mqd_manager *mm, void **mqd,
struct kfd_mem_obj *mqd_mem_obj, uint64_t *gart_addr,
struct queue_properties *qp,
const void *mqd_src,
const void *ctl_stack_src, u32 ctl_stack_size)
{
uint64_t addr;
struct v9_mqd *m;
void *ctl_stack;
m = (struct v9_mqd *) mqd_mem_obj->cpu_ptr;
addr = mqd_mem_obj->gpu_addr;
memcpy(m, mqd_src, sizeof(*m));
*mqd = m;
if (gart_addr)
*gart_addr = addr;
/* Control stack is located one page after MQD. */
ctl_stack = (void *)((uintptr_t)*mqd + PAGE_SIZE);
memcpy(ctl_stack, ctl_stack_src, ctl_stack_size);
m->cp_hqd_pq_doorbell_control =
qp->doorbell_off <<
CP_HQD_PQ_DOORBELL_CONTROL__DOORBELL_OFFSET__SHIFT;
pr_debug("cp_hqd_pq_doorbell_control 0x%x\n",
m->cp_hqd_pq_doorbell_control);
qp->is_active = 0;
}
static void init_mqd_hiq(struct mqd_manager *mm, void **mqd,
struct kfd_mem_obj *mqd_mem_obj, uint64_t *gart_addr,
struct queue_properties *q)
{
struct v9_mqd *m;
init_mqd(mm, mqd, mqd_mem_obj, gart_addr, q);
m = get_mqd(*mqd);
m->cp_hqd_pq_control |= 1 << CP_HQD_PQ_CONTROL__PRIV_STATE__SHIFT |
1 << CP_HQD_PQ_CONTROL__KMD_QUEUE__SHIFT;
}
static int destroy_hiq_mqd(struct mqd_manager *mm, void *mqd,
enum kfd_preempt_type type, unsigned int timeout,
uint32_t pipe_id, uint32_t queue_id)
{
int err;
struct v9_mqd *m;
u32 doorbell_off;
m = get_mqd(mqd);
doorbell_off = m->cp_hqd_pq_doorbell_control >>
CP_HQD_PQ_DOORBELL_CONTROL__DOORBELL_OFFSET__SHIFT;
err = amdgpu_amdkfd_unmap_hiq(mm->dev->adev, doorbell_off, 0);
if (err)
pr_debug("Destroy HIQ MQD failed: %d\n", err);
return err;
}
static void init_mqd_sdma(struct mqd_manager *mm, void **mqd,
struct kfd_mem_obj *mqd_mem_obj, uint64_t *gart_addr,
struct queue_properties *q)
{
struct v9_sdma_mqd *m;
m = (struct v9_sdma_mqd *) mqd_mem_obj->cpu_ptr;
memset(m, 0, sizeof(struct v9_sdma_mqd));
*mqd = m;
if (gart_addr)
*gart_addr = mqd_mem_obj->gpu_addr;
mm->update_mqd(mm, m, q, NULL);
}
#define SDMA_RLC_DUMMY_DEFAULT 0xf
static void update_mqd_sdma(struct mqd_manager *mm, void *mqd,
struct queue_properties *q,
struct mqd_update_info *minfo)
{
struct v9_sdma_mqd *m;
m = get_sdma_mqd(mqd);
m->sdmax_rlcx_rb_cntl = order_base_2(q->queue_size / 4)
<< SDMA0_RLC0_RB_CNTL__RB_SIZE__SHIFT |
q->vmid << SDMA0_RLC0_RB_CNTL__RB_VMID__SHIFT |
1 << SDMA0_RLC0_RB_CNTL__RPTR_WRITEBACK_ENABLE__SHIFT |
6 << SDMA0_RLC0_RB_CNTL__RPTR_WRITEBACK_TIMER__SHIFT;
m->sdmax_rlcx_rb_base = lower_32_bits(q->queue_address >> 8);
m->sdmax_rlcx_rb_base_hi = upper_32_bits(q->queue_address >> 8);
m->sdmax_rlcx_rb_rptr_addr_lo = lower_32_bits((uint64_t)q->read_ptr);
m->sdmax_rlcx_rb_rptr_addr_hi = upper_32_bits((uint64_t)q->read_ptr);
m->sdmax_rlcx_doorbell_offset =
q->doorbell_off << SDMA0_RLC0_DOORBELL_OFFSET__OFFSET__SHIFT;
m->sdma_engine_id = q->sdma_engine_id;
m->sdma_queue_id = q->sdma_queue_id;
m->sdmax_rlcx_dummy_reg = SDMA_RLC_DUMMY_DEFAULT;
q->is_active = QUEUE_IS_ACTIVE(*q);
}
static void checkpoint_mqd_sdma(struct mqd_manager *mm,
void *mqd,
void *mqd_dst,
void *ctl_stack_dst)
{
struct v9_sdma_mqd *m;
m = get_sdma_mqd(mqd);
memcpy(mqd_dst, m, sizeof(struct v9_sdma_mqd));
}
static void restore_mqd_sdma(struct mqd_manager *mm, void **mqd,
struct kfd_mem_obj *mqd_mem_obj, uint64_t *gart_addr,
struct queue_properties *qp,
const void *mqd_src,
const void *ctl_stack_src, const u32 ctl_stack_size)
{
uint64_t addr;
struct v9_sdma_mqd *m;
m = (struct v9_sdma_mqd *) mqd_mem_obj->cpu_ptr;
addr = mqd_mem_obj->gpu_addr;
memcpy(m, mqd_src, sizeof(*m));
m->sdmax_rlcx_doorbell_offset =
qp->doorbell_off << SDMA0_RLC0_DOORBELL_OFFSET__OFFSET__SHIFT;
*mqd = m;
if (gart_addr)
*gart_addr = addr;
qp->is_active = 0;
}
static void init_mqd_hiq_v9_4_3(struct mqd_manager *mm, void **mqd,
struct kfd_mem_obj *mqd_mem_obj, uint64_t *gart_addr,
struct queue_properties *q)
{
struct v9_mqd *m;
int xcc = 0;
struct kfd_mem_obj xcc_mqd_mem_obj;
uint64_t xcc_gart_addr = 0;
memset(&xcc_mqd_mem_obj, 0x0, sizeof(struct kfd_mem_obj));
for (xcc = 0; xcc < NUM_XCC(mm->dev->xcc_mask); xcc++) {
kfd_get_hiq_xcc_mqd(mm->dev, &xcc_mqd_mem_obj, xcc);
init_mqd(mm, (void **)&m, &xcc_mqd_mem_obj, &xcc_gart_addr, q);
m->cp_hqd_pq_control |= CP_HQD_PQ_CONTROL__NO_UPDATE_RPTR_MASK |
1 << CP_HQD_PQ_CONTROL__PRIV_STATE__SHIFT |
1 << CP_HQD_PQ_CONTROL__KMD_QUEUE__SHIFT;
m->cp_mqd_stride_size = kfd_hiq_mqd_stride(mm->dev);
if (xcc == 0) {
/* Set no_update_rptr = 0 in Master XCC */
m->cp_hqd_pq_control &= ~CP_HQD_PQ_CONTROL__NO_UPDATE_RPTR_MASK;
/* Set the MQD pointer and gart address to XCC0 MQD */
*mqd = m;
*gart_addr = xcc_gart_addr;
}
}
}
static int hiq_load_mqd_kiq_v9_4_3(struct mqd_manager *mm, void *mqd,
uint32_t pipe_id, uint32_t queue_id,
struct queue_properties *p, struct mm_struct *mms)
{
uint32_t xcc_mask = mm->dev->xcc_mask;
int xcc_id, err, inst = 0;
void *xcc_mqd;
uint64_t hiq_mqd_size = kfd_hiq_mqd_stride(mm->dev);
for_each_inst(xcc_id, xcc_mask) {
xcc_mqd = mqd + hiq_mqd_size * inst;
err = mm->dev->kfd2kgd->hiq_mqd_load(mm->dev->adev, xcc_mqd,
pipe_id, queue_id,
p->doorbell_off, xcc_id);
if (err) {
pr_debug("Failed to load HIQ MQD for XCC: %d\n", inst);
break;
}
++inst;
}
return err;
}
static int destroy_hiq_mqd_v9_4_3(struct mqd_manager *mm, void *mqd,
enum kfd_preempt_type type, unsigned int timeout,
uint32_t pipe_id, uint32_t queue_id)
{
uint32_t xcc_mask = mm->dev->xcc_mask;
int xcc_id, err, inst = 0;
uint64_t hiq_mqd_size = kfd_hiq_mqd_stride(mm->dev);
struct v9_mqd *m;
u32 doorbell_off;
for_each_inst(xcc_id, xcc_mask) {
m = get_mqd(mqd + hiq_mqd_size * inst);
doorbell_off = m->cp_hqd_pq_doorbell_control >>
CP_HQD_PQ_DOORBELL_CONTROL__DOORBELL_OFFSET__SHIFT;
err = amdgpu_amdkfd_unmap_hiq(mm->dev->adev, doorbell_off, xcc_id);
if (err) {
pr_debug("Destroy HIQ MQD failed for xcc: %d\n", inst);
break;
}
++inst;
}
return err;
}
static void get_xcc_mqd(struct kfd_mem_obj *mqd_mem_obj,
struct kfd_mem_obj *xcc_mqd_mem_obj,
uint64_t offset)
{
xcc_mqd_mem_obj->gtt_mem = (offset == 0) ?
mqd_mem_obj->gtt_mem : NULL;
xcc_mqd_mem_obj->gpu_addr = mqd_mem_obj->gpu_addr + offset;
xcc_mqd_mem_obj->cpu_ptr = (uint32_t *)((uintptr_t)mqd_mem_obj->cpu_ptr
+ offset);
}
static void init_mqd_v9_4_3(struct mqd_manager *mm, void **mqd,
struct kfd_mem_obj *mqd_mem_obj, uint64_t *gart_addr,
struct queue_properties *q)
{
struct v9_mqd *m;
int xcc = 0;
struct kfd_mem_obj xcc_mqd_mem_obj;
uint64_t xcc_gart_addr = 0;
uint64_t xcc_ctx_save_restore_area_address;
uint64_t offset = mm->mqd_stride(mm, q);
uint32_t local_xcc_start = mm->dev->dqm->current_logical_xcc_start++;
memset(&xcc_mqd_mem_obj, 0x0, sizeof(struct kfd_mem_obj));
for (xcc = 0; xcc < NUM_XCC(mm->dev->xcc_mask); xcc++) {
get_xcc_mqd(mqd_mem_obj, &xcc_mqd_mem_obj, offset*xcc);
init_mqd(mm, (void **)&m, &xcc_mqd_mem_obj, &xcc_gart_addr, q);
m->cp_mqd_stride_size = offset;
/*
* Update the CWSR address for each XCC if CWSR is enabled
* and CWSR area is allocated in thunk
*/
if (mm->dev->kfd->cwsr_enabled &&
q->ctx_save_restore_area_address) {
xcc_ctx_save_restore_area_address =
q->ctx_save_restore_area_address +
(xcc * q->ctx_save_restore_area_size);
m->cp_hqd_ctx_save_base_addr_lo =
lower_32_bits(xcc_ctx_save_restore_area_address);
m->cp_hqd_ctx_save_base_addr_hi =
upper_32_bits(xcc_ctx_save_restore_area_address);
}
if (q->format == KFD_QUEUE_FORMAT_AQL) {
m->compute_tg_chunk_size = 1;
m->compute_current_logic_xcc_id =
(local_xcc_start + xcc) %
NUM_XCC(mm->dev->xcc_mask);
switch (xcc) {
case 0:
/* Master XCC */
m->cp_hqd_pq_control &=
~CP_HQD_PQ_CONTROL__NO_UPDATE_RPTR_MASK;
break;
default:
break;
}
} else {
/* PM4 Queue */
m->compute_current_logic_xcc_id = 0;
m->compute_tg_chunk_size = 0;
m->pm4_target_xcc_in_xcp = q->pm4_target_xcc;
}
if (xcc == 0) {
/* Set the MQD pointer and gart address to XCC0 MQD */
*mqd = m;
*gart_addr = xcc_gart_addr;
}
}
}
static void update_mqd_v9_4_3(struct mqd_manager *mm, void *mqd,
struct queue_properties *q, struct mqd_update_info *minfo)
{
struct v9_mqd *m;
int xcc = 0;
uint64_t size = mm->mqd_stride(mm, q);
for (xcc = 0; xcc < NUM_XCC(mm->dev->xcc_mask); xcc++) {
m = get_mqd(mqd + size * xcc);
update_mqd(mm, m, q, minfo);
update_cu_mask(mm, mqd, minfo, xcc);
if (q->format == KFD_QUEUE_FORMAT_AQL) {
switch (xcc) {
case 0:
/* Master XCC */
m->cp_hqd_pq_control &=
~CP_HQD_PQ_CONTROL__NO_UPDATE_RPTR_MASK;
break;
default:
break;
}
m->compute_tg_chunk_size = 1;
} else {
/* PM4 Queue */
m->compute_current_logic_xcc_id = 0;
m->compute_tg_chunk_size = 0;
m->pm4_target_xcc_in_xcp = q->pm4_target_xcc;
}
}
}
static int destroy_mqd_v9_4_3(struct mqd_manager *mm, void *mqd,
enum kfd_preempt_type type, unsigned int timeout,
uint32_t pipe_id, uint32_t queue_id)
{
uint32_t xcc_mask = mm->dev->xcc_mask;
int xcc_id, err, inst = 0;
void *xcc_mqd;
struct v9_mqd *m;
uint64_t mqd_offset;
m = get_mqd(mqd);
mqd_offset = m->cp_mqd_stride_size;
for_each_inst(xcc_id, xcc_mask) {
xcc_mqd = mqd + mqd_offset * inst;
err = mm->dev->kfd2kgd->hqd_destroy(mm->dev->adev, xcc_mqd,
type, timeout, pipe_id,
queue_id, xcc_id);
if (err) {
pr_debug("Destroy MQD failed for xcc: %d\n", inst);
break;
}
++inst;
}
return err;
}
static int load_mqd_v9_4_3(struct mqd_manager *mm, void *mqd,
uint32_t pipe_id, uint32_t queue_id,
struct queue_properties *p, struct mm_struct *mms)
{
/* AQL write pointer counts in 64B packets, PM4/CP counts in dwords. */
uint32_t wptr_shift = (p->format == KFD_QUEUE_FORMAT_AQL ? 4 : 0);
uint32_t xcc_mask = mm->dev->xcc_mask;
int xcc_id, err, inst = 0;
void *xcc_mqd;
uint64_t mqd_stride_size = mm->mqd_stride(mm, p);
for_each_inst(xcc_id, xcc_mask) {
xcc_mqd = mqd + mqd_stride_size * inst;
err = mm->dev->kfd2kgd->hqd_load(
mm->dev->adev, xcc_mqd, pipe_id, queue_id,
(uint32_t __user *)p->write_ptr, wptr_shift, 0, mms,
xcc_id);
if (err) {
pr_debug("Load MQD failed for xcc: %d\n", inst);
break;
}
++inst;
}
return err;
}
static int get_wave_state_v9_4_3(struct mqd_manager *mm, void *mqd,
struct queue_properties *q,
void __user *ctl_stack,
u32 *ctl_stack_used_size,
u32 *save_area_used_size)
{
int xcc, err = 0;
void *xcc_mqd;
void __user *xcc_ctl_stack;
uint64_t mqd_stride_size = mm->mqd_stride(mm, q);
u32 tmp_ctl_stack_used_size = 0, tmp_save_area_used_size = 0;
for (xcc = 0; xcc < NUM_XCC(mm->dev->xcc_mask); xcc++) {
xcc_mqd = mqd + mqd_stride_size * xcc;
xcc_ctl_stack = (void __user *)((uintptr_t)ctl_stack +
q->ctx_save_restore_area_size * xcc);
err = get_wave_state(mm, xcc_mqd, q, xcc_ctl_stack,
&tmp_ctl_stack_used_size,
&tmp_save_area_used_size);
if (err)
break;
/*
* Set the ctl_stack_used_size and save_area_used_size to
* ctl_stack_used_size and save_area_used_size of XCC 0 when
* passing the info the user-space.
* For multi XCC, user-space would have to look at the header
* info of each Control stack area to determine the control
* stack size and save area used.
*/
if (xcc == 0) {
*ctl_stack_used_size = tmp_ctl_stack_used_size;
*save_area_used_size = tmp_save_area_used_size;
}
}
return err;
}
#if defined(CONFIG_DEBUG_FS)
static int debugfs_show_mqd(struct seq_file *m, void *data)
{
seq_hex_dump(m, " ", DUMP_PREFIX_OFFSET, 32, 4,
data, sizeof(struct v9_mqd), false);
return 0;
}
static int debugfs_show_mqd_sdma(struct seq_file *m, void *data)
{
seq_hex_dump(m, " ", DUMP_PREFIX_OFFSET, 32, 4,
data, sizeof(struct v9_sdma_mqd), false);
return 0;
}
#endif
struct mqd_manager *mqd_manager_init_v9(enum KFD_MQD_TYPE type,
struct kfd_node *dev)
{
struct mqd_manager *mqd;
if (WARN_ON(type >= KFD_MQD_TYPE_MAX))
return NULL;
mqd = kzalloc(sizeof(*mqd), GFP_KERNEL);
if (!mqd)
return NULL;
mqd->dev = dev;
switch (type) {
case KFD_MQD_TYPE_CP:
mqd->allocate_mqd = allocate_mqd;
mqd->free_mqd = kfd_free_mqd_cp;
mqd->is_occupied = kfd_is_occupied_cp;
mqd->get_checkpoint_info = get_checkpoint_info;
mqd->checkpoint_mqd = checkpoint_mqd;
mqd->restore_mqd = restore_mqd;
mqd->mqd_size = sizeof(struct v9_mqd);
mqd->mqd_stride = mqd_stride_v9;
#if defined(CONFIG_DEBUG_FS)
mqd->debugfs_show_mqd = debugfs_show_mqd;
#endif
if (KFD_GC_VERSION(dev) == IP_VERSION(9, 4, 3)) {
mqd->init_mqd = init_mqd_v9_4_3;
mqd->load_mqd = load_mqd_v9_4_3;
mqd->update_mqd = update_mqd_v9_4_3;
mqd->destroy_mqd = destroy_mqd_v9_4_3;
mqd->get_wave_state = get_wave_state_v9_4_3;
} else {
mqd->init_mqd = init_mqd;
mqd->load_mqd = load_mqd;
mqd->update_mqd = update_mqd;
mqd->destroy_mqd = kfd_destroy_mqd_cp;
mqd->get_wave_state = get_wave_state;
}
break;
case KFD_MQD_TYPE_HIQ:
mqd->allocate_mqd = allocate_hiq_mqd;
mqd->free_mqd = free_mqd_hiq_sdma;
mqd->update_mqd = update_mqd;
mqd->is_occupied = kfd_is_occupied_cp;
mqd->mqd_size = sizeof(struct v9_mqd);
mqd->mqd_stride = kfd_mqd_stride;
#if defined(CONFIG_DEBUG_FS)
mqd->debugfs_show_mqd = debugfs_show_mqd;
#endif
mqd->read_doorbell_id = read_doorbell_id;
if (KFD_GC_VERSION(dev) == IP_VERSION(9, 4, 3)) {
mqd->init_mqd = init_mqd_hiq_v9_4_3;
mqd->load_mqd = hiq_load_mqd_kiq_v9_4_3;
mqd->destroy_mqd = destroy_hiq_mqd_v9_4_3;
} else {
mqd->init_mqd = init_mqd_hiq;
mqd->load_mqd = kfd_hiq_load_mqd_kiq;
mqd->destroy_mqd = destroy_hiq_mqd;
}
break;
case KFD_MQD_TYPE_DIQ:
mqd->allocate_mqd = allocate_mqd;
mqd->init_mqd = init_mqd_hiq;
mqd->free_mqd = kfd_free_mqd_cp;
mqd->load_mqd = load_mqd;
mqd->update_mqd = update_mqd;
mqd->destroy_mqd = kfd_destroy_mqd_cp;
mqd->is_occupied = kfd_is_occupied_cp;
mqd->mqd_size = sizeof(struct v9_mqd);
#if defined(CONFIG_DEBUG_FS)
mqd->debugfs_show_mqd = debugfs_show_mqd;
#endif
break;
case KFD_MQD_TYPE_SDMA:
mqd->allocate_mqd = allocate_sdma_mqd;
mqd->init_mqd = init_mqd_sdma;
mqd->free_mqd = free_mqd_hiq_sdma;
mqd->load_mqd = kfd_load_mqd_sdma;
mqd->update_mqd = update_mqd_sdma;
mqd->destroy_mqd = kfd_destroy_mqd_sdma;
mqd->is_occupied = kfd_is_occupied_sdma;
mqd->checkpoint_mqd = checkpoint_mqd_sdma;
mqd->restore_mqd = restore_mqd_sdma;
mqd->mqd_size = sizeof(struct v9_sdma_mqd);
mqd->mqd_stride = kfd_mqd_stride;
#if defined(CONFIG_DEBUG_FS)
mqd->debugfs_show_mqd = debugfs_show_mqd_sdma;
#endif
break;
default:
kfree(mqd);
return NULL;
}
return mqd;
}
| linux-master | drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c |
// SPDX-License-Identifier: GPL-2.0 OR MIT
/*
* Copyright 2014-2022 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
*/
#include <linux/slab.h>
#include "kfd_priv.h"
void print_queue_properties(struct queue_properties *q)
{
if (!q)
return;
pr_debug("Printing queue properties:\n");
pr_debug("Queue Type: %u\n", q->type);
pr_debug("Queue Size: %llu\n", q->queue_size);
pr_debug("Queue percent: %u\n", q->queue_percent);
pr_debug("Queue Address: 0x%llX\n", q->queue_address);
pr_debug("Queue Id: %u\n", q->queue_id);
pr_debug("Queue Process Vmid: %u\n", q->vmid);
pr_debug("Queue Read Pointer: 0x%px\n", q->read_ptr);
pr_debug("Queue Write Pointer: 0x%px\n", q->write_ptr);
pr_debug("Queue Doorbell Pointer: 0x%p\n", q->doorbell_ptr);
pr_debug("Queue Doorbell Offset: %u\n", q->doorbell_off);
}
void print_queue(struct queue *q)
{
if (!q)
return;
pr_debug("Printing queue:\n");
pr_debug("Queue Type: %u\n", q->properties.type);
pr_debug("Queue Size: %llu\n", q->properties.queue_size);
pr_debug("Queue percent: %u\n", q->properties.queue_percent);
pr_debug("Queue Address: 0x%llX\n", q->properties.queue_address);
pr_debug("Queue Id: %u\n", q->properties.queue_id);
pr_debug("Queue Process Vmid: %u\n", q->properties.vmid);
pr_debug("Queue Read Pointer: 0x%px\n", q->properties.read_ptr);
pr_debug("Queue Write Pointer: 0x%px\n", q->properties.write_ptr);
pr_debug("Queue Doorbell Pointer: 0x%p\n", q->properties.doorbell_ptr);
pr_debug("Queue Doorbell Offset: %u\n", q->properties.doorbell_off);
pr_debug("Queue MQD Address: 0x%p\n", q->mqd);
pr_debug("Queue MQD Gart: 0x%llX\n", q->gart_mqd_addr);
pr_debug("Queue Process Address: 0x%p\n", q->process);
pr_debug("Queue Device Address: 0x%p\n", q->device);
}
int init_queue(struct queue **q, const struct queue_properties *properties)
{
struct queue *tmp_q;
tmp_q = kzalloc(sizeof(*tmp_q), GFP_KERNEL);
if (!tmp_q)
return -ENOMEM;
memcpy(&tmp_q->properties, properties, sizeof(*properties));
*q = tmp_q;
return 0;
}
void uninit_queue(struct queue *q)
{
kfree(q);
}
| linux-master | drivers/gpu/drm/amd/amdkfd/kfd_queue.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Copyright (C) 2016 BayLibre, SAS
* Author: Neil Armstrong <[email protected]>
* Copyright (C) 2015 Amlogic, Inc. All rights reserved.
* Copyright (C) 2014 Endless Mobile
*
* Written by:
* Jasper St. Pierre <[email protected]>
*/
#include <linux/bitfield.h>
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_blend.h>
#include <drm/drm_device.h>
#include <drm/drm_fb_dma_helper.h>
#include <drm/drm_fourcc.h>
#include <drm/drm_framebuffer.h>
#include <drm/drm_gem_atomic_helper.h>
#include <drm/drm_gem_dma_helper.h>
#include "meson_plane.h"
#include "meson_registers.h"
#include "meson_viu.h"
#include "meson_osd_afbcd.h"
/* OSD_SCI_WH_M1 */
#define SCI_WH_M1_W(w) FIELD_PREP(GENMASK(28, 16), w)
#define SCI_WH_M1_H(h) FIELD_PREP(GENMASK(12, 0), h)
/* OSD_SCO_H_START_END */
/* OSD_SCO_V_START_END */
#define SCO_HV_START(start) FIELD_PREP(GENMASK(27, 16), start)
#define SCO_HV_END(end) FIELD_PREP(GENMASK(11, 0), end)
/* OSD_SC_CTRL0 */
#define SC_CTRL0_PATH_EN BIT(3)
#define SC_CTRL0_SEL_OSD1 BIT(2)
/* OSD_VSC_CTRL0 */
#define VSC_BANK_LEN(value) FIELD_PREP(GENMASK(2, 0), value)
#define VSC_TOP_INI_RCV_NUM(value) FIELD_PREP(GENMASK(6, 3), value)
#define VSC_TOP_RPT_L0_NUM(value) FIELD_PREP(GENMASK(9, 8), value)
#define VSC_BOT_INI_RCV_NUM(value) FIELD_PREP(GENMASK(14, 11), value)
#define VSC_BOT_RPT_L0_NUM(value) FIELD_PREP(GENMASK(17, 16), value)
#define VSC_PROG_INTERLACE BIT(23)
#define VSC_VERTICAL_SCALER_EN BIT(24)
/* OSD_VSC_INI_PHASE */
#define VSC_INI_PHASE_BOT(bottom) FIELD_PREP(GENMASK(31, 16), bottom)
#define VSC_INI_PHASE_TOP(top) FIELD_PREP(GENMASK(15, 0), top)
/* OSD_HSC_CTRL0 */
#define HSC_BANK_LENGTH(value) FIELD_PREP(GENMASK(2, 0), value)
#define HSC_INI_RCV_NUM0(value) FIELD_PREP(GENMASK(6, 3), value)
#define HSC_RPT_P0_NUM0(value) FIELD_PREP(GENMASK(9, 8), value)
#define HSC_HORIZ_SCALER_EN BIT(22)
/* VPP_OSD_VSC_PHASE_STEP */
/* VPP_OSD_HSC_PHASE_STEP */
#define SC_PHASE_STEP(value) FIELD_PREP(GENMASK(27, 0), value)
struct meson_plane {
struct drm_plane base;
struct meson_drm *priv;
bool enabled;
};
#define to_meson_plane(x) container_of(x, struct meson_plane, base)
#define FRAC_16_16(mult, div) (((mult) << 16) / (div))
static int meson_plane_atomic_check(struct drm_plane *plane,
struct drm_atomic_state *state)
{
struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state,
plane);
struct drm_crtc_state *crtc_state;
if (!new_plane_state->crtc)
return 0;
crtc_state = drm_atomic_get_crtc_state(state,
new_plane_state->crtc);
if (IS_ERR(crtc_state))
return PTR_ERR(crtc_state);
/*
* Only allow :
* - Upscaling up to 5x, vertical and horizontal
* - Final coordinates must match crtc size
*/
return drm_atomic_helper_check_plane_state(new_plane_state,
crtc_state,
FRAC_16_16(1, 5),
DRM_PLANE_NO_SCALING,
false, true);
}
#define MESON_MOD_AFBC_VALID_BITS (AFBC_FORMAT_MOD_BLOCK_SIZE_16x16 | \
AFBC_FORMAT_MOD_BLOCK_SIZE_32x8 | \
AFBC_FORMAT_MOD_YTR | \
AFBC_FORMAT_MOD_SPARSE | \
AFBC_FORMAT_MOD_SPLIT)
/* Takes a fixed 16.16 number and converts it to integer. */
static inline int64_t fixed16_to_int(int64_t value)
{
return value >> 16;
}
static u32 meson_g12a_afbcd_line_stride(struct meson_drm *priv)
{
u32 line_stride = 0;
switch (priv->afbcd.format) {
case DRM_FORMAT_RGB565:
line_stride = ((priv->viu.osd1_width << 4) + 127) >> 7;
break;
case DRM_FORMAT_RGB888:
case DRM_FORMAT_XRGB8888:
case DRM_FORMAT_ARGB8888:
case DRM_FORMAT_XBGR8888:
case DRM_FORMAT_ABGR8888:
line_stride = ((priv->viu.osd1_width << 5) + 127) >> 7;
break;
}
return ((line_stride + 1) >> 1) << 1;
}
static void meson_plane_atomic_update(struct drm_plane *plane,
struct drm_atomic_state *state)
{
struct meson_plane *meson_plane = to_meson_plane(plane);
struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state,
plane);
struct drm_rect dest = drm_plane_state_dest(new_state);
struct meson_drm *priv = meson_plane->priv;
struct drm_framebuffer *fb = new_state->fb;
struct drm_gem_dma_object *gem;
unsigned long flags;
int vsc_ini_rcv_num, vsc_ini_rpt_p0_num;
int vsc_bot_rcv_num, vsc_bot_rpt_p0_num;
int hsc_ini_rcv_num, hsc_ini_rpt_p0_num;
int hf_phase_step, vf_phase_step;
int src_w, src_h, dst_w, dst_h;
int bot_ini_phase;
int hf_bank_len;
int vf_bank_len;
u8 canvas_id_osd1;
/*
* Update Coordinates
* Update Formats
* Update Buffer
* Enable Plane
*/
spin_lock_irqsave(&priv->drm->event_lock, flags);
/* Check if AFBC decoder is required for this buffer */
if ((meson_vpu_is_compatible(priv, VPU_COMPATIBLE_GXM) ||
meson_vpu_is_compatible(priv, VPU_COMPATIBLE_G12A)) &&
fb->modifier & DRM_FORMAT_MOD_ARM_AFBC(MESON_MOD_AFBC_VALID_BITS))
priv->viu.osd1_afbcd = true;
else
priv->viu.osd1_afbcd = false;
/* Enable OSD and BLK0, set max global alpha */
priv->viu.osd1_ctrl_stat = OSD_ENABLE |
(0x100 << OSD_GLOBAL_ALPHA_SHIFT) |
OSD_BLK0_ENABLE;
priv->viu.osd1_ctrl_stat2 = readl(priv->io_base +
_REG(VIU_OSD1_CTRL_STAT2));
canvas_id_osd1 = priv->canvas_id_osd1;
/* Set up BLK0 to point to the right canvas */
priv->viu.osd1_blk0_cfg[0] = canvas_id_osd1 << OSD_CANVAS_SEL;
if (priv->viu.osd1_afbcd) {
if (meson_vpu_is_compatible(priv, VPU_COMPATIBLE_G12A)) {
/* This is the internal decoding memory address */
priv->viu.osd1_blk1_cfg4 = MESON_G12A_AFBCD_OUT_ADDR;
priv->viu.osd1_blk0_cfg[0] |= OSD_ENDIANNESS_BE;
priv->viu.osd1_ctrl_stat2 |= OSD_PENDING_STAT_CLEAN;
priv->viu.osd1_ctrl_stat |= VIU_OSD1_CFG_SYN_EN;
}
if (meson_vpu_is_compatible(priv, VPU_COMPATIBLE_GXM)) {
priv->viu.osd1_blk0_cfg[0] |= OSD_ENDIANNESS_LE;
priv->viu.osd1_ctrl_stat2 |= OSD_DPATH_MALI_AFBCD;
}
} else {
priv->viu.osd1_blk0_cfg[0] |= OSD_ENDIANNESS_LE;
if (meson_vpu_is_compatible(priv, VPU_COMPATIBLE_GXM))
priv->viu.osd1_ctrl_stat2 &= ~OSD_DPATH_MALI_AFBCD;
}
/* On GXBB, Use the old non-HDR RGB2YUV converter */
if (meson_vpu_is_compatible(priv, VPU_COMPATIBLE_GXBB))
priv->viu.osd1_blk0_cfg[0] |= OSD_OUTPUT_COLOR_RGB;
if (priv->viu.osd1_afbcd &&
meson_vpu_is_compatible(priv, VPU_COMPATIBLE_G12A)) {
priv->viu.osd1_blk0_cfg[0] |= OSD_MALI_SRC_EN |
priv->afbcd.ops->fmt_to_blk_mode(fb->modifier,
fb->format->format);
} else {
switch (fb->format->format) {
case DRM_FORMAT_XRGB8888:
case DRM_FORMAT_ARGB8888:
priv->viu.osd1_blk0_cfg[0] |= OSD_BLK_MODE_32 |
OSD_COLOR_MATRIX_32_ARGB;
break;
case DRM_FORMAT_XBGR8888:
case DRM_FORMAT_ABGR8888:
priv->viu.osd1_blk0_cfg[0] |= OSD_BLK_MODE_32 |
OSD_COLOR_MATRIX_32_ABGR;
break;
case DRM_FORMAT_RGB888:
priv->viu.osd1_blk0_cfg[0] |= OSD_BLK_MODE_24 |
OSD_COLOR_MATRIX_24_RGB;
break;
case DRM_FORMAT_RGB565:
priv->viu.osd1_blk0_cfg[0] |= OSD_BLK_MODE_16 |
OSD_COLOR_MATRIX_16_RGB565;
break;
}
}
switch (fb->format->format) {
case DRM_FORMAT_XRGB8888:
case DRM_FORMAT_XBGR8888:
/* For XRGB, replace the pixel's alpha by 0xFF */
priv->viu.osd1_ctrl_stat2 |= OSD_REPLACE_EN;
break;
case DRM_FORMAT_ARGB8888:
case DRM_FORMAT_ABGR8888:
/* For ARGB, use the pixel's alpha */
priv->viu.osd1_ctrl_stat2 &= ~OSD_REPLACE_EN;
break;
}
/* Default scaler parameters */
vsc_bot_rcv_num = 0;
vsc_bot_rpt_p0_num = 0;
hf_bank_len = 4;
vf_bank_len = 4;
if (new_state->crtc->mode.flags & DRM_MODE_FLAG_INTERLACE) {
vsc_bot_rcv_num = 6;
vsc_bot_rpt_p0_num = 2;
}
hsc_ini_rcv_num = hf_bank_len;
vsc_ini_rcv_num = vf_bank_len;
hsc_ini_rpt_p0_num = (hf_bank_len / 2) - 1;
vsc_ini_rpt_p0_num = (vf_bank_len / 2) - 1;
src_w = fixed16_to_int(new_state->src_w);
src_h = fixed16_to_int(new_state->src_h);
dst_w = new_state->crtc_w;
dst_h = new_state->crtc_h;
/*
* When the output is interlaced, the OSD must switch between
* each field using the INTERLACE_SEL_ODD (0) of VIU_OSD1_BLK0_CFG_W0
* at each vsync.
* But the vertical scaler can provide such funtionnality if
* is configured for 2:1 scaling with interlace options enabled.
*/
if (new_state->crtc->mode.flags & DRM_MODE_FLAG_INTERLACE) {
dest.y1 /= 2;
dest.y2 /= 2;
dst_h /= 2;
}
hf_phase_step = ((src_w << 18) / dst_w) << 6;
vf_phase_step = (src_h << 20) / dst_h;
if (new_state->crtc->mode.flags & DRM_MODE_FLAG_INTERLACE)
bot_ini_phase = ((vf_phase_step / 2) >> 4);
else
bot_ini_phase = 0;
vf_phase_step = (vf_phase_step << 4);
/* In interlaced mode, scaler is always active */
if (src_h != dst_h || src_w != dst_w) {
priv->viu.osd_sc_i_wh_m1 = SCI_WH_M1_W(src_w - 1) |
SCI_WH_M1_H(src_h - 1);
priv->viu.osd_sc_o_h_start_end = SCO_HV_START(dest.x1) |
SCO_HV_END(dest.x2 - 1);
priv->viu.osd_sc_o_v_start_end = SCO_HV_START(dest.y1) |
SCO_HV_END(dest.y2 - 1);
/* Enable OSD Scaler */
priv->viu.osd_sc_ctrl0 = SC_CTRL0_PATH_EN | SC_CTRL0_SEL_OSD1;
} else {
priv->viu.osd_sc_i_wh_m1 = 0;
priv->viu.osd_sc_o_h_start_end = 0;
priv->viu.osd_sc_o_v_start_end = 0;
priv->viu.osd_sc_ctrl0 = 0;
}
/* In interlaced mode, vertical scaler is always active */
if (src_h != dst_h) {
priv->viu.osd_sc_v_ctrl0 =
VSC_BANK_LEN(vf_bank_len) |
VSC_TOP_INI_RCV_NUM(vsc_ini_rcv_num) |
VSC_TOP_RPT_L0_NUM(vsc_ini_rpt_p0_num) |
VSC_VERTICAL_SCALER_EN;
if (new_state->crtc->mode.flags & DRM_MODE_FLAG_INTERLACE)
priv->viu.osd_sc_v_ctrl0 |=
VSC_BOT_INI_RCV_NUM(vsc_bot_rcv_num) |
VSC_BOT_RPT_L0_NUM(vsc_bot_rpt_p0_num) |
VSC_PROG_INTERLACE;
priv->viu.osd_sc_v_phase_step = SC_PHASE_STEP(vf_phase_step);
priv->viu.osd_sc_v_ini_phase = VSC_INI_PHASE_BOT(bot_ini_phase);
} else {
priv->viu.osd_sc_v_ctrl0 = 0;
priv->viu.osd_sc_v_phase_step = 0;
priv->viu.osd_sc_v_ini_phase = 0;
}
/* Horizontal scaler is only used if width does not match */
if (src_w != dst_w) {
priv->viu.osd_sc_h_ctrl0 =
HSC_BANK_LENGTH(hf_bank_len) |
HSC_INI_RCV_NUM0(hsc_ini_rcv_num) |
HSC_RPT_P0_NUM0(hsc_ini_rpt_p0_num) |
HSC_HORIZ_SCALER_EN;
priv->viu.osd_sc_h_phase_step = SC_PHASE_STEP(hf_phase_step);
priv->viu.osd_sc_h_ini_phase = 0;
} else {
priv->viu.osd_sc_h_ctrl0 = 0;
priv->viu.osd_sc_h_phase_step = 0;
priv->viu.osd_sc_h_ini_phase = 0;
}
/*
* The format of these registers is (x2 << 16 | x1),
* where x2 is exclusive.
* e.g. +30x1920 would be (1919 << 16) | 30
*/
priv->viu.osd1_blk0_cfg[1] =
((fixed16_to_int(new_state->src.x2) - 1) << 16) |
fixed16_to_int(new_state->src.x1);
priv->viu.osd1_blk0_cfg[2] =
((fixed16_to_int(new_state->src.y2) - 1) << 16) |
fixed16_to_int(new_state->src.y1);
priv->viu.osd1_blk0_cfg[3] = ((dest.x2 - 1) << 16) | dest.x1;
priv->viu.osd1_blk0_cfg[4] = ((dest.y2 - 1) << 16) | dest.y1;
if (meson_vpu_is_compatible(priv, VPU_COMPATIBLE_G12A)) {
priv->viu.osd_blend_din0_scope_h = ((dest.x2 - 1) << 16) | dest.x1;
priv->viu.osd_blend_din0_scope_v = ((dest.y2 - 1) << 16) | dest.y1;
priv->viu.osb_blend0_size = dst_h << 16 | dst_w;
priv->viu.osb_blend1_size = dst_h << 16 | dst_w;
}
/* Update Canvas with buffer address */
gem = drm_fb_dma_get_gem_obj(fb, 0);
priv->viu.osd1_addr = gem->dma_addr;
priv->viu.osd1_stride = fb->pitches[0];
priv->viu.osd1_height = fb->height;
priv->viu.osd1_width = fb->width;
if (priv->viu.osd1_afbcd) {
priv->afbcd.modifier = fb->modifier;
priv->afbcd.format = fb->format->format;
/* Calculate decoder write stride */
if (meson_vpu_is_compatible(priv, VPU_COMPATIBLE_G12A))
priv->viu.osd1_blk2_cfg4 =
meson_g12a_afbcd_line_stride(priv);
}
if (!meson_plane->enabled) {
/* Reset OSD1 before enabling it on GXL+ SoCs */
if (meson_vpu_is_compatible(priv, VPU_COMPATIBLE_GXM) ||
meson_vpu_is_compatible(priv, VPU_COMPATIBLE_GXL))
meson_viu_osd1_reset(priv);
meson_plane->enabled = true;
}
priv->viu.osd1_enabled = true;
spin_unlock_irqrestore(&priv->drm->event_lock, flags);
}
static void meson_plane_atomic_disable(struct drm_plane *plane,
struct drm_atomic_state *state)
{
struct meson_plane *meson_plane = to_meson_plane(plane);
struct meson_drm *priv = meson_plane->priv;
if (priv->afbcd.ops) {
priv->afbcd.ops->reset(priv);
priv->afbcd.ops->disable(priv);
}
/* Disable OSD1 */
if (meson_vpu_is_compatible(priv, VPU_COMPATIBLE_G12A))
writel_bits_relaxed(VIU_OSD1_POSTBLD_SRC_OSD1, 0,
priv->io_base + _REG(OSD1_BLEND_SRC_CTRL));
else
writel_bits_relaxed(VPP_OSD1_POSTBLEND, 0,
priv->io_base + _REG(VPP_MISC));
meson_plane->enabled = false;
priv->viu.osd1_enabled = false;
}
static const struct drm_plane_helper_funcs meson_plane_helper_funcs = {
.atomic_check = meson_plane_atomic_check,
.atomic_disable = meson_plane_atomic_disable,
.atomic_update = meson_plane_atomic_update,
};
static bool meson_plane_format_mod_supported(struct drm_plane *plane,
u32 format, u64 modifier)
{
struct meson_plane *meson_plane = to_meson_plane(plane);
struct meson_drm *priv = meson_plane->priv;
int i;
if (modifier == DRM_FORMAT_MOD_INVALID)
return false;
if (modifier == DRM_FORMAT_MOD_LINEAR)
return true;
if (!meson_vpu_is_compatible(priv, VPU_COMPATIBLE_GXM) &&
!meson_vpu_is_compatible(priv, VPU_COMPATIBLE_G12A))
return false;
if (modifier & ~DRM_FORMAT_MOD_ARM_AFBC(MESON_MOD_AFBC_VALID_BITS))
return false;
for (i = 0 ; i < plane->modifier_count ; ++i)
if (plane->modifiers[i] == modifier)
break;
if (i == plane->modifier_count) {
DRM_DEBUG_KMS("Unsupported modifier\n");
return false;
}
if (priv->afbcd.ops && priv->afbcd.ops->supported_fmt)
return priv->afbcd.ops->supported_fmt(modifier, format);
DRM_DEBUG_KMS("AFBC Unsupported\n");
return false;
}
static const struct drm_plane_funcs meson_plane_funcs = {
.update_plane = drm_atomic_helper_update_plane,
.disable_plane = drm_atomic_helper_disable_plane,
.destroy = drm_plane_cleanup,
.reset = drm_atomic_helper_plane_reset,
.atomic_duplicate_state = drm_atomic_helper_plane_duplicate_state,
.atomic_destroy_state = drm_atomic_helper_plane_destroy_state,
.format_mod_supported = meson_plane_format_mod_supported,
};
static const uint32_t supported_drm_formats[] = {
DRM_FORMAT_ARGB8888,
DRM_FORMAT_ABGR8888,
DRM_FORMAT_XRGB8888,
DRM_FORMAT_XBGR8888,
DRM_FORMAT_RGB888,
DRM_FORMAT_RGB565,
};
static const uint64_t format_modifiers_afbc_gxm[] = {
DRM_FORMAT_MOD_ARM_AFBC(AFBC_FORMAT_MOD_BLOCK_SIZE_16x16 |
AFBC_FORMAT_MOD_SPARSE |
AFBC_FORMAT_MOD_YTR),
/* SPLIT mandates SPARSE, RGB modes mandates YTR */
DRM_FORMAT_MOD_ARM_AFBC(AFBC_FORMAT_MOD_BLOCK_SIZE_16x16 |
AFBC_FORMAT_MOD_YTR |
AFBC_FORMAT_MOD_SPARSE |
AFBC_FORMAT_MOD_SPLIT),
DRM_FORMAT_MOD_LINEAR,
DRM_FORMAT_MOD_INVALID,
};
static const uint64_t format_modifiers_afbc_g12a[] = {
/*
* - TOFIX Support AFBC modifiers for YUV formats (16x16 + TILED)
* - SPLIT is mandatory for performances reasons when in 16x16
* block size
* - 32x8 block size + SPLIT is mandatory with 4K frame size
* for performances reasons
*/
DRM_FORMAT_MOD_ARM_AFBC(AFBC_FORMAT_MOD_BLOCK_SIZE_16x16 |
AFBC_FORMAT_MOD_SPARSE |
AFBC_FORMAT_MOD_SPLIT),
DRM_FORMAT_MOD_ARM_AFBC(AFBC_FORMAT_MOD_BLOCK_SIZE_16x16 |
AFBC_FORMAT_MOD_YTR |
AFBC_FORMAT_MOD_SPARSE |
AFBC_FORMAT_MOD_SPLIT),
DRM_FORMAT_MOD_ARM_AFBC(AFBC_FORMAT_MOD_BLOCK_SIZE_32x8 |
AFBC_FORMAT_MOD_SPARSE),
DRM_FORMAT_MOD_ARM_AFBC(AFBC_FORMAT_MOD_BLOCK_SIZE_32x8 |
AFBC_FORMAT_MOD_YTR |
AFBC_FORMAT_MOD_SPARSE),
DRM_FORMAT_MOD_ARM_AFBC(AFBC_FORMAT_MOD_BLOCK_SIZE_32x8 |
AFBC_FORMAT_MOD_SPARSE |
AFBC_FORMAT_MOD_SPLIT),
DRM_FORMAT_MOD_ARM_AFBC(AFBC_FORMAT_MOD_BLOCK_SIZE_32x8 |
AFBC_FORMAT_MOD_YTR |
AFBC_FORMAT_MOD_SPARSE |
AFBC_FORMAT_MOD_SPLIT),
DRM_FORMAT_MOD_LINEAR,
DRM_FORMAT_MOD_INVALID,
};
static const uint64_t format_modifiers_default[] = {
DRM_FORMAT_MOD_LINEAR,
DRM_FORMAT_MOD_INVALID,
};
int meson_plane_create(struct meson_drm *priv)
{
struct meson_plane *meson_plane;
struct drm_plane *plane;
const uint64_t *format_modifiers = format_modifiers_default;
meson_plane = devm_kzalloc(priv->drm->dev, sizeof(*meson_plane),
GFP_KERNEL);
if (!meson_plane)
return -ENOMEM;
meson_plane->priv = priv;
plane = &meson_plane->base;
if (meson_vpu_is_compatible(priv, VPU_COMPATIBLE_GXM))
format_modifiers = format_modifiers_afbc_gxm;
else if (meson_vpu_is_compatible(priv, VPU_COMPATIBLE_G12A))
format_modifiers = format_modifiers_afbc_g12a;
drm_universal_plane_init(priv->drm, plane, 0xFF,
&meson_plane_funcs,
supported_drm_formats,
ARRAY_SIZE(supported_drm_formats),
format_modifiers,
DRM_PLANE_TYPE_PRIMARY, "meson_primary_plane");
drm_plane_helper_add(plane, &meson_plane_helper_funcs);
/* For now, OSD Primary plane is always on the front */
drm_plane_create_zpos_immutable_property(plane, 1);
priv->primary_plane = plane;
return 0;
}
| linux-master | drivers/gpu/drm/meson/meson_plane.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Copyright (C) 2016 BayLibre, SAS
* Author: Neil Armstrong <[email protected]>
* Copyright (C) 2015 Amlogic, Inc. All rights reserved.
* Copyright (C) 2014 Endless Mobile
*/
#include <linux/export.h>
#include <linux/bitfield.h>
#include <drm/drm_fourcc.h>
#include "meson_drv.h"
#include "meson_viu.h"
#include "meson_registers.h"
/**
* DOC: Video Input Unit
*
* VIU Handles the Pixel scanout and the basic Colorspace conversions
* We handle the following features :
*
* - OSD1 RGB565/RGB888/xRGB8888 scanout
* - RGB conversion to x/cb/cr
* - Progressive or Interlace buffer scanout
* - OSD1 Commit on Vsync
* - HDR OSD matrix for GXL/GXM
*
* What is missing :
*
* - BGR888/xBGR8888/BGRx8888/BGRx8888 modes
* - YUV4:2:2 Y0CbY1Cr scanout
* - Conversion to YUV 4:4:4 from 4:2:2 input
* - Colorkey Alpha matching
* - Big endian scanout
* - X/Y reverse scanout
* - Global alpha setup
* - OSD2 support, would need interlace switching on vsync
* - OSD1 full scaling to support TV overscan
*/
/* OSD csc defines */
enum viu_matrix_sel_e {
VIU_MATRIX_OSD_EOTF = 0,
VIU_MATRIX_OSD,
};
enum viu_lut_sel_e {
VIU_LUT_OSD_EOTF = 0,
VIU_LUT_OSD_OETF,
};
#define COEFF_NORM(a) ((int)((((a) * 2048.0) + 1) / 2))
#define MATRIX_5X3_COEF_SIZE 24
#define EOTF_COEFF_NORM(a) ((int)((((a) * 4096.0) + 1) / 2))
#define EOTF_COEFF_SIZE 10
#define EOTF_COEFF_RIGHTSHIFT 1
static int RGB709_to_YUV709l_coeff[MATRIX_5X3_COEF_SIZE] = {
0, 0, 0, /* pre offset */
COEFF_NORM(0.181873), COEFF_NORM(0.611831), COEFF_NORM(0.061765),
COEFF_NORM(-0.100251), COEFF_NORM(-0.337249), COEFF_NORM(0.437500),
COEFF_NORM(0.437500), COEFF_NORM(-0.397384), COEFF_NORM(-0.040116),
0, 0, 0, /* 10'/11'/12' */
0, 0, 0, /* 20'/21'/22' */
64, 512, 512, /* offset */
0, 0, 0 /* mode, right_shift, clip_en */
};
/* eotf matrix: bypass */
static int eotf_bypass_coeff[EOTF_COEFF_SIZE] = {
EOTF_COEFF_NORM(1.0), EOTF_COEFF_NORM(0.0), EOTF_COEFF_NORM(0.0),
EOTF_COEFF_NORM(0.0), EOTF_COEFF_NORM(1.0), EOTF_COEFF_NORM(0.0),
EOTF_COEFF_NORM(0.0), EOTF_COEFF_NORM(0.0), EOTF_COEFF_NORM(1.0),
EOTF_COEFF_RIGHTSHIFT /* right shift */
};
static void meson_viu_set_g12a_osd1_matrix(struct meson_drm *priv,
int *m, bool csc_on)
{
/* VPP WRAP OSD1 matrix */
writel(((m[0] & 0xfff) << 16) | (m[1] & 0xfff),
priv->io_base + _REG(VPP_WRAP_OSD1_MATRIX_PRE_OFFSET0_1));
writel(m[2] & 0xfff,
priv->io_base + _REG(VPP_WRAP_OSD1_MATRIX_PRE_OFFSET2));
writel(((m[3] & 0x1fff) << 16) | (m[4] & 0x1fff),
priv->io_base + _REG(VPP_WRAP_OSD1_MATRIX_COEF00_01));
writel(((m[5] & 0x1fff) << 16) | (m[6] & 0x1fff),
priv->io_base + _REG(VPP_WRAP_OSD1_MATRIX_COEF02_10));
writel(((m[7] & 0x1fff) << 16) | (m[8] & 0x1fff),
priv->io_base + _REG(VPP_WRAP_OSD1_MATRIX_COEF11_12));
writel(((m[9] & 0x1fff) << 16) | (m[10] & 0x1fff),
priv->io_base + _REG(VPP_WRAP_OSD1_MATRIX_COEF20_21));
writel((m[11] & 0x1fff),
priv->io_base + _REG(VPP_WRAP_OSD1_MATRIX_COEF22));
writel(((m[18] & 0xfff) << 16) | (m[19] & 0xfff),
priv->io_base + _REG(VPP_WRAP_OSD1_MATRIX_OFFSET0_1));
writel(m[20] & 0xfff,
priv->io_base + _REG(VPP_WRAP_OSD1_MATRIX_OFFSET2));
writel_bits_relaxed(BIT(0), csc_on ? BIT(0) : 0,
priv->io_base + _REG(VPP_WRAP_OSD1_MATRIX_EN_CTRL));
}
static void meson_viu_set_osd_matrix(struct meson_drm *priv,
enum viu_matrix_sel_e m_select,
int *m, bool csc_on)
{
if (m_select == VIU_MATRIX_OSD) {
/* osd matrix, VIU_MATRIX_0 */
writel(((m[0] & 0xfff) << 16) | (m[1] & 0xfff),
priv->io_base + _REG(VIU_OSD1_MATRIX_PRE_OFFSET0_1));
writel(m[2] & 0xfff,
priv->io_base + _REG(VIU_OSD1_MATRIX_PRE_OFFSET2));
writel(((m[3] & 0x1fff) << 16) | (m[4] & 0x1fff),
priv->io_base + _REG(VIU_OSD1_MATRIX_COEF00_01));
writel(((m[5] & 0x1fff) << 16) | (m[6] & 0x1fff),
priv->io_base + _REG(VIU_OSD1_MATRIX_COEF02_10));
writel(((m[7] & 0x1fff) << 16) | (m[8] & 0x1fff),
priv->io_base + _REG(VIU_OSD1_MATRIX_COEF11_12));
writel(((m[9] & 0x1fff) << 16) | (m[10] & 0x1fff),
priv->io_base + _REG(VIU_OSD1_MATRIX_COEF20_21));
if (m[21]) {
writel(((m[11] & 0x1fff) << 16) | (m[12] & 0x1fff),
priv->io_base +
_REG(VIU_OSD1_MATRIX_COEF22_30));
writel(((m[13] & 0x1fff) << 16) | (m[14] & 0x1fff),
priv->io_base +
_REG(VIU_OSD1_MATRIX_COEF31_32));
writel(((m[15] & 0x1fff) << 16) | (m[16] & 0x1fff),
priv->io_base +
_REG(VIU_OSD1_MATRIX_COEF40_41));
writel(m[17] & 0x1fff, priv->io_base +
_REG(VIU_OSD1_MATRIX_COLMOD_COEF42));
} else
writel((m[11] & 0x1fff) << 16, priv->io_base +
_REG(VIU_OSD1_MATRIX_COEF22_30));
writel(((m[18] & 0xfff) << 16) | (m[19] & 0xfff),
priv->io_base + _REG(VIU_OSD1_MATRIX_OFFSET0_1));
writel(m[20] & 0xfff,
priv->io_base + _REG(VIU_OSD1_MATRIX_OFFSET2));
writel_bits_relaxed(3 << 30, m[21] << 30,
priv->io_base + _REG(VIU_OSD1_MATRIX_COLMOD_COEF42));
writel_bits_relaxed(7 << 16, m[22] << 16,
priv->io_base + _REG(VIU_OSD1_MATRIX_COLMOD_COEF42));
/* 23 reserved for clipping control */
writel_bits_relaxed(BIT(0), csc_on ? BIT(0) : 0,
priv->io_base + _REG(VIU_OSD1_MATRIX_CTRL));
writel_bits_relaxed(BIT(1), 0,
priv->io_base + _REG(VIU_OSD1_MATRIX_CTRL));
} else if (m_select == VIU_MATRIX_OSD_EOTF) {
int i;
/* osd eotf matrix, VIU_MATRIX_OSD_EOTF */
for (i = 0; i < 5; i++)
writel(((m[i * 2] & 0x1fff) << 16) |
(m[i * 2 + 1] & 0x1fff), priv->io_base +
_REG(VIU_OSD1_EOTF_CTL + i + 1));
writel_bits_relaxed(BIT(30), csc_on ? BIT(30) : 0,
priv->io_base + _REG(VIU_OSD1_EOTF_CTL));
writel_bits_relaxed(BIT(31), csc_on ? BIT(31) : 0,
priv->io_base + _REG(VIU_OSD1_EOTF_CTL));
}
}
#define OSD_EOTF_LUT_SIZE 33
#define OSD_OETF_LUT_SIZE 41
static void
meson_viu_set_osd_lut(struct meson_drm *priv, enum viu_lut_sel_e lut_sel,
unsigned int *r_map, unsigned int *g_map,
unsigned int *b_map, bool csc_on)
{
unsigned int addr_port;
unsigned int data_port;
unsigned int ctrl_port;
int i;
if (lut_sel == VIU_LUT_OSD_EOTF) {
addr_port = VIU_OSD1_EOTF_LUT_ADDR_PORT;
data_port = VIU_OSD1_EOTF_LUT_DATA_PORT;
ctrl_port = VIU_OSD1_EOTF_CTL;
} else if (lut_sel == VIU_LUT_OSD_OETF) {
addr_port = VIU_OSD1_OETF_LUT_ADDR_PORT;
data_port = VIU_OSD1_OETF_LUT_DATA_PORT;
ctrl_port = VIU_OSD1_OETF_CTL;
} else
return;
if (lut_sel == VIU_LUT_OSD_OETF) {
writel(0, priv->io_base + _REG(addr_port));
for (i = 0; i < (OSD_OETF_LUT_SIZE / 2); i++)
writel(r_map[i * 2] | (r_map[i * 2 + 1] << 16),
priv->io_base + _REG(data_port));
writel(r_map[OSD_OETF_LUT_SIZE - 1] | (g_map[0] << 16),
priv->io_base + _REG(data_port));
for (i = 0; i < (OSD_OETF_LUT_SIZE / 2); i++)
writel(g_map[i * 2 + 1] | (g_map[i * 2 + 2] << 16),
priv->io_base + _REG(data_port));
for (i = 0; i < (OSD_OETF_LUT_SIZE / 2); i++)
writel(b_map[i * 2] | (b_map[i * 2 + 1] << 16),
priv->io_base + _REG(data_port));
writel(b_map[OSD_OETF_LUT_SIZE - 1],
priv->io_base + _REG(data_port));
if (csc_on)
writel_bits_relaxed(0x7 << 29, 7 << 29,
priv->io_base + _REG(ctrl_port));
else
writel_bits_relaxed(0x7 << 29, 0,
priv->io_base + _REG(ctrl_port));
} else if (lut_sel == VIU_LUT_OSD_EOTF) {
writel(0, priv->io_base + _REG(addr_port));
for (i = 0; i < (OSD_EOTF_LUT_SIZE / 2); i++)
writel(r_map[i * 2] | (r_map[i * 2 + 1] << 16),
priv->io_base + _REG(data_port));
writel(r_map[OSD_EOTF_LUT_SIZE - 1] | (g_map[0] << 16),
priv->io_base + _REG(data_port));
for (i = 0; i < (OSD_EOTF_LUT_SIZE / 2); i++)
writel(g_map[i * 2 + 1] | (g_map[i * 2 + 2] << 16),
priv->io_base + _REG(data_port));
for (i = 0; i < (OSD_EOTF_LUT_SIZE / 2); i++)
writel(b_map[i * 2] | (b_map[i * 2 + 1] << 16),
priv->io_base + _REG(data_port));
writel(b_map[OSD_EOTF_LUT_SIZE - 1],
priv->io_base + _REG(data_port));
if (csc_on)
writel_bits_relaxed(7 << 27, 7 << 27,
priv->io_base + _REG(ctrl_port));
else
writel_bits_relaxed(7 << 27, 0,
priv->io_base + _REG(ctrl_port));
writel_bits_relaxed(BIT(31), BIT(31),
priv->io_base + _REG(ctrl_port));
}
}
/* eotf lut: linear */
static unsigned int eotf_33_linear_mapping[OSD_EOTF_LUT_SIZE] = {
0x0000, 0x0200, 0x0400, 0x0600,
0x0800, 0x0a00, 0x0c00, 0x0e00,
0x1000, 0x1200, 0x1400, 0x1600,
0x1800, 0x1a00, 0x1c00, 0x1e00,
0x2000, 0x2200, 0x2400, 0x2600,
0x2800, 0x2a00, 0x2c00, 0x2e00,
0x3000, 0x3200, 0x3400, 0x3600,
0x3800, 0x3a00, 0x3c00, 0x3e00,
0x4000
};
/* osd oetf lut: linear */
static unsigned int oetf_41_linear_mapping[OSD_OETF_LUT_SIZE] = {
0, 0, 0, 0,
0, 32, 64, 96,
128, 160, 196, 224,
256, 288, 320, 352,
384, 416, 448, 480,
512, 544, 576, 608,
640, 672, 704, 736,
768, 800, 832, 864,
896, 928, 960, 992,
1023, 1023, 1023, 1023,
1023
};
static void meson_viu_load_matrix(struct meson_drm *priv)
{
/* eotf lut bypass */
meson_viu_set_osd_lut(priv, VIU_LUT_OSD_EOTF,
eotf_33_linear_mapping, /* R */
eotf_33_linear_mapping, /* G */
eotf_33_linear_mapping, /* B */
false);
/* eotf matrix bypass */
meson_viu_set_osd_matrix(priv, VIU_MATRIX_OSD_EOTF,
eotf_bypass_coeff,
false);
/* oetf lut bypass */
meson_viu_set_osd_lut(priv, VIU_LUT_OSD_OETF,
oetf_41_linear_mapping, /* R */
oetf_41_linear_mapping, /* G */
oetf_41_linear_mapping, /* B */
false);
/* osd matrix RGB709 to YUV709 limit */
meson_viu_set_osd_matrix(priv, VIU_MATRIX_OSD,
RGB709_to_YUV709l_coeff,
true);
}
/* VIU OSD1 Reset as workaround for GXL+ Alpha OSD Bug */
void meson_viu_osd1_reset(struct meson_drm *priv)
{
uint32_t osd1_fifo_ctrl_stat, osd1_ctrl_stat2;
/* Save these 2 registers state */
osd1_fifo_ctrl_stat = readl_relaxed(
priv->io_base + _REG(VIU_OSD1_FIFO_CTRL_STAT));
osd1_ctrl_stat2 = readl_relaxed(
priv->io_base + _REG(VIU_OSD1_CTRL_STAT2));
/* Reset OSD1 */
writel_bits_relaxed(VIU_SW_RESET_OSD1, VIU_SW_RESET_OSD1,
priv->io_base + _REG(VIU_SW_RESET));
writel_bits_relaxed(VIU_SW_RESET_OSD1, 0,
priv->io_base + _REG(VIU_SW_RESET));
/* Rewrite these registers state lost in the reset */
writel_relaxed(osd1_fifo_ctrl_stat,
priv->io_base + _REG(VIU_OSD1_FIFO_CTRL_STAT));
writel_relaxed(osd1_ctrl_stat2,
priv->io_base + _REG(VIU_OSD1_CTRL_STAT2));
/* Reload the conversion matrix */
meson_viu_load_matrix(priv);
}
#define OSD1_MALI_ORDER_ABGR \
(FIELD_PREP(VIU_OSD1_MALI_AFBCD_A_REORDER, \
VIU_OSD1_MALI_REORDER_A) | \
FIELD_PREP(VIU_OSD1_MALI_AFBCD_B_REORDER, \
VIU_OSD1_MALI_REORDER_B) | \
FIELD_PREP(VIU_OSD1_MALI_AFBCD_G_REORDER, \
VIU_OSD1_MALI_REORDER_G) | \
FIELD_PREP(VIU_OSD1_MALI_AFBCD_R_REORDER, \
VIU_OSD1_MALI_REORDER_R))
#define OSD1_MALI_ORDER_ARGB \
(FIELD_PREP(VIU_OSD1_MALI_AFBCD_A_REORDER, \
VIU_OSD1_MALI_REORDER_A) | \
FIELD_PREP(VIU_OSD1_MALI_AFBCD_B_REORDER, \
VIU_OSD1_MALI_REORDER_R) | \
FIELD_PREP(VIU_OSD1_MALI_AFBCD_G_REORDER, \
VIU_OSD1_MALI_REORDER_G) | \
FIELD_PREP(VIU_OSD1_MALI_AFBCD_R_REORDER, \
VIU_OSD1_MALI_REORDER_B))
void meson_viu_g12a_enable_osd1_afbc(struct meson_drm *priv)
{
u32 afbc_order = OSD1_MALI_ORDER_ARGB;
/* Enable Mali AFBC Unpack */
writel_bits_relaxed(VIU_OSD1_MALI_UNPACK_EN,
VIU_OSD1_MALI_UNPACK_EN,
priv->io_base + _REG(VIU_OSD1_MALI_UNPACK_CTRL));
switch (priv->afbcd.format) {
case DRM_FORMAT_XBGR8888:
case DRM_FORMAT_ABGR8888:
afbc_order = OSD1_MALI_ORDER_ABGR;
break;
}
/* Setup RGBA Reordering */
writel_bits_relaxed(VIU_OSD1_MALI_AFBCD_A_REORDER |
VIU_OSD1_MALI_AFBCD_B_REORDER |
VIU_OSD1_MALI_AFBCD_G_REORDER |
VIU_OSD1_MALI_AFBCD_R_REORDER,
afbc_order,
priv->io_base + _REG(VIU_OSD1_MALI_UNPACK_CTRL));
/* Select AFBCD path for OSD1 */
writel_bits_relaxed(OSD_PATH_OSD_AXI_SEL_OSD1_AFBCD,
OSD_PATH_OSD_AXI_SEL_OSD1_AFBCD,
priv->io_base + _REG(OSD_PATH_MISC_CTRL));
}
void meson_viu_g12a_disable_osd1_afbc(struct meson_drm *priv)
{
/* Disable AFBCD path for OSD1 */
writel_bits_relaxed(OSD_PATH_OSD_AXI_SEL_OSD1_AFBCD, 0,
priv->io_base + _REG(OSD_PATH_MISC_CTRL));
/* Disable AFBCD unpack */
writel_bits_relaxed(VIU_OSD1_MALI_UNPACK_EN, 0,
priv->io_base + _REG(VIU_OSD1_MALI_UNPACK_CTRL));
}
void meson_viu_gxm_enable_osd1_afbc(struct meson_drm *priv)
{
writel_bits_relaxed(MALI_AFBC_MISC, FIELD_PREP(MALI_AFBC_MISC, 0x90),
priv->io_base + _REG(VIU_MISC_CTRL1));
}
void meson_viu_gxm_disable_osd1_afbc(struct meson_drm *priv)
{
writel_bits_relaxed(MALI_AFBC_MISC, FIELD_PREP(MALI_AFBC_MISC, 0x00),
priv->io_base + _REG(VIU_MISC_CTRL1));
}
void meson_viu_init(struct meson_drm *priv)
{
uint32_t reg;
/* Disable OSDs */
writel_bits_relaxed(VIU_OSD1_OSD_BLK_ENABLE | VIU_OSD1_OSD_ENABLE, 0,
priv->io_base + _REG(VIU_OSD1_CTRL_STAT));
writel_bits_relaxed(VIU_OSD1_OSD_BLK_ENABLE | VIU_OSD1_OSD_ENABLE, 0,
priv->io_base + _REG(VIU_OSD2_CTRL_STAT));
/* On GXL/GXM, Use the 10bit HDR conversion matrix */
if (meson_vpu_is_compatible(priv, VPU_COMPATIBLE_GXM) ||
meson_vpu_is_compatible(priv, VPU_COMPATIBLE_GXL))
meson_viu_load_matrix(priv);
else if (meson_vpu_is_compatible(priv, VPU_COMPATIBLE_G12A)) {
meson_viu_set_g12a_osd1_matrix(priv, RGB709_to_YUV709l_coeff,
true);
/* fix green/pink color distortion from vendor u-boot */
writel_bits_relaxed(OSD1_HDR2_CTRL_REG_ONLY_MAT |
OSD1_HDR2_CTRL_VDIN0_HDR2_TOP_EN, 0,
priv->io_base + _REG(OSD1_HDR2_CTRL));
}
/* Initialize OSD1 fifo control register */
reg = VIU_OSD_DDR_PRIORITY_URGENT |
VIU_OSD_FIFO_DEPTH_VAL(32) | /* fifo_depth_val: 32*8=256 */
VIU_OSD_WORDS_PER_BURST(4) | /* 4 words in 1 burst */
VIU_OSD_FIFO_LIMITS(2); /* fifo_lim: 2*16=32 */
if (meson_vpu_is_compatible(priv, VPU_COMPATIBLE_G12A))
reg |= (VIU_OSD_BURST_LENGTH_32 | VIU_OSD_HOLD_FIFO_LINES(31));
else
reg |= (VIU_OSD_BURST_LENGTH_64 | VIU_OSD_HOLD_FIFO_LINES(4));
writel_relaxed(reg, priv->io_base + _REG(VIU_OSD1_FIFO_CTRL_STAT));
writel_relaxed(reg, priv->io_base + _REG(VIU_OSD2_FIFO_CTRL_STAT));
/* Set OSD alpha replace value */
writel_bits_relaxed(0xff << OSD_REPLACE_SHIFT,
0xff << OSD_REPLACE_SHIFT,
priv->io_base + _REG(VIU_OSD1_CTRL_STAT2));
writel_bits_relaxed(0xff << OSD_REPLACE_SHIFT,
0xff << OSD_REPLACE_SHIFT,
priv->io_base + _REG(VIU_OSD2_CTRL_STAT2));
/* Disable VD1 AFBC */
/* di_mif0_en=0 mif0_to_vpp_en=0 di_mad_en=0 and afbc vd1 set=0*/
writel_bits_relaxed(VIU_CTRL0_VD1_AFBC_MASK, 0,
priv->io_base + _REG(VIU_MISC_CTRL0));
writel_relaxed(0, priv->io_base + _REG(AFBC_ENABLE));
writel_relaxed(0x00FF00C0,
priv->io_base + _REG(VD1_IF0_LUMA_FIFO_SIZE));
writel_relaxed(0x00FF00C0,
priv->io_base + _REG(VD2_IF0_LUMA_FIFO_SIZE));
if (meson_vpu_is_compatible(priv, VPU_COMPATIBLE_G12A)) {
u32 val = (u32)VIU_OSD_BLEND_REORDER(0, 1) |
(u32)VIU_OSD_BLEND_REORDER(1, 0) |
(u32)VIU_OSD_BLEND_REORDER(2, 0) |
(u32)VIU_OSD_BLEND_REORDER(3, 0) |
(u32)VIU_OSD_BLEND_DIN_EN(1) |
(u32)VIU_OSD_BLEND1_DIN3_BYPASS_TO_DOUT1 |
(u32)VIU_OSD_BLEND1_DOUT_BYPASS_TO_BLEND2 |
(u32)VIU_OSD_BLEND_DIN0_BYPASS_TO_DOUT0 |
(u32)VIU_OSD_BLEND_BLEN2_PREMULT_EN(1) |
(u32)VIU_OSD_BLEND_HOLD_LINES(4);
writel_relaxed(val, priv->io_base + _REG(VIU_OSD_BLEND_CTRL));
writel_relaxed(OSD_BLEND_PATH_SEL_ENABLE,
priv->io_base + _REG(OSD1_BLEND_SRC_CTRL));
writel_relaxed(OSD_BLEND_PATH_SEL_ENABLE,
priv->io_base + _REG(OSD2_BLEND_SRC_CTRL));
writel_relaxed(0, priv->io_base + _REG(VD1_BLEND_SRC_CTRL));
writel_relaxed(0, priv->io_base + _REG(VD2_BLEND_SRC_CTRL));
writel_relaxed(0,
priv->io_base + _REG(VIU_OSD_BLEND_DUMMY_DATA0));
writel_relaxed(0,
priv->io_base + _REG(VIU_OSD_BLEND_DUMMY_ALPHA));
writel_bits_relaxed(DOLBY_BYPASS_EN(0xc), DOLBY_BYPASS_EN(0xc),
priv->io_base + _REG(DOLBY_PATH_CTRL));
meson_viu_g12a_disable_osd1_afbc(priv);
}
if (meson_vpu_is_compatible(priv, VPU_COMPATIBLE_GXM))
meson_viu_gxm_disable_osd1_afbc(priv);
priv->viu.osd1_enabled = false;
priv->viu.osd1_commit = false;
priv->viu.osd1_interlace = false;
}
| linux-master | drivers/gpu/drm/meson/meson_viu.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Copyright (C) 2016 BayLibre, SAS
* Author: Neil Armstrong <[email protected]>
* Copyright (C) 2015 Amlogic, Inc. All rights reserved.
* Copyright (C) 2014 Endless Mobile
*
* Written by:
* Jasper St. Pierre <[email protected]>
*/
#include <linux/export.h>
#include <linux/of_graph.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_bridge.h>
#include <drm/drm_bridge_connector.h>
#include <drm/drm_device.h>
#include <drm/drm_edid.h>
#include <drm/drm_probe_helper.h>
#include <drm/drm_simple_kms_helper.h>
#include "meson_registers.h"
#include "meson_vclk.h"
#include "meson_encoder_cvbs.h"
/* HHI VDAC Registers */
#define HHI_VDAC_CNTL0 0x2F4 /* 0xbd offset in data sheet */
#define HHI_VDAC_CNTL0_G12A 0x2EC /* 0xbd offset in data sheet */
#define HHI_VDAC_CNTL1 0x2F8 /* 0xbe offset in data sheet */
#define HHI_VDAC_CNTL1_G12A 0x2F0 /* 0xbe offset in data sheet */
struct meson_encoder_cvbs {
struct drm_encoder encoder;
struct drm_bridge bridge;
struct drm_bridge *next_bridge;
struct meson_drm *priv;
};
#define bridge_to_meson_encoder_cvbs(x) \
container_of(x, struct meson_encoder_cvbs, bridge)
/* Supported Modes */
struct meson_cvbs_mode meson_cvbs_modes[MESON_CVBS_MODES_COUNT] = {
{ /* PAL */
.enci = &meson_cvbs_enci_pal,
.mode = {
DRM_MODE("720x576i", DRM_MODE_TYPE_DRIVER, 13500,
720, 732, 795, 864, 0, 576, 580, 586, 625, 0,
DRM_MODE_FLAG_INTERLACE),
.picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3,
},
},
{ /* NTSC */
.enci = &meson_cvbs_enci_ntsc,
.mode = {
DRM_MODE("720x480i", DRM_MODE_TYPE_DRIVER, 13500,
720, 739, 801, 858, 0, 480, 488, 494, 525, 0,
DRM_MODE_FLAG_INTERLACE),
.picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3,
},
},
};
static const struct meson_cvbs_mode *
meson_cvbs_get_mode(const struct drm_display_mode *req_mode)
{
int i;
for (i = 0; i < MESON_CVBS_MODES_COUNT; ++i) {
struct meson_cvbs_mode *meson_mode = &meson_cvbs_modes[i];
if (drm_mode_match(req_mode, &meson_mode->mode,
DRM_MODE_MATCH_TIMINGS |
DRM_MODE_MATCH_CLOCK |
DRM_MODE_MATCH_FLAGS |
DRM_MODE_MATCH_3D_FLAGS))
return meson_mode;
}
return NULL;
}
static int meson_encoder_cvbs_attach(struct drm_bridge *bridge,
enum drm_bridge_attach_flags flags)
{
struct meson_encoder_cvbs *meson_encoder_cvbs =
bridge_to_meson_encoder_cvbs(bridge);
return drm_bridge_attach(bridge->encoder, meson_encoder_cvbs->next_bridge,
&meson_encoder_cvbs->bridge, flags);
}
static int meson_encoder_cvbs_get_modes(struct drm_bridge *bridge,
struct drm_connector *connector)
{
struct meson_encoder_cvbs *meson_encoder_cvbs =
bridge_to_meson_encoder_cvbs(bridge);
struct meson_drm *priv = meson_encoder_cvbs->priv;
struct drm_display_mode *mode;
int i;
for (i = 0; i < MESON_CVBS_MODES_COUNT; ++i) {
struct meson_cvbs_mode *meson_mode = &meson_cvbs_modes[i];
mode = drm_mode_duplicate(priv->drm, &meson_mode->mode);
if (!mode) {
dev_err(priv->dev, "Failed to create a new display mode\n");
return 0;
}
drm_mode_probed_add(connector, mode);
}
return i;
}
static enum drm_mode_status
meson_encoder_cvbs_mode_valid(struct drm_bridge *bridge,
const struct drm_display_info *display_info,
const struct drm_display_mode *mode)
{
if (meson_cvbs_get_mode(mode))
return MODE_OK;
return MODE_BAD;
}
static int meson_encoder_cvbs_atomic_check(struct drm_bridge *bridge,
struct drm_bridge_state *bridge_state,
struct drm_crtc_state *crtc_state,
struct drm_connector_state *conn_state)
{
if (meson_cvbs_get_mode(&crtc_state->mode))
return 0;
return -EINVAL;
}
static void meson_encoder_cvbs_atomic_enable(struct drm_bridge *bridge,
struct drm_bridge_state *bridge_state)
{
struct meson_encoder_cvbs *encoder_cvbs = bridge_to_meson_encoder_cvbs(bridge);
struct drm_atomic_state *state = bridge_state->base.state;
struct meson_drm *priv = encoder_cvbs->priv;
const struct meson_cvbs_mode *meson_mode;
struct drm_connector_state *conn_state;
struct drm_crtc_state *crtc_state;
struct drm_connector *connector;
connector = drm_atomic_get_new_connector_for_encoder(state, bridge->encoder);
if (WARN_ON(!connector))
return;
conn_state = drm_atomic_get_new_connector_state(state, connector);
if (WARN_ON(!conn_state))
return;
crtc_state = drm_atomic_get_new_crtc_state(state, conn_state->crtc);
if (WARN_ON(!crtc_state))
return;
meson_mode = meson_cvbs_get_mode(&crtc_state->adjusted_mode);
if (WARN_ON(!meson_mode))
return;
meson_venci_cvbs_mode_set(priv, meson_mode->enci);
/* Setup 27MHz vclk2 for ENCI and VDAC */
meson_vclk_setup(priv, MESON_VCLK_TARGET_CVBS,
MESON_VCLK_CVBS, MESON_VCLK_CVBS,
MESON_VCLK_CVBS, MESON_VCLK_CVBS,
true);
/* VDAC0 source is not from ATV */
writel_bits_relaxed(VENC_VDAC_SEL_ATV_DMD, 0,
priv->io_base + _REG(VENC_VDAC_DACSEL0));
if (meson_vpu_is_compatible(priv, VPU_COMPATIBLE_GXBB)) {
regmap_write(priv->hhi, HHI_VDAC_CNTL0, 1);
regmap_write(priv->hhi, HHI_VDAC_CNTL1, 0);
} else if (meson_vpu_is_compatible(priv, VPU_COMPATIBLE_GXM) ||
meson_vpu_is_compatible(priv, VPU_COMPATIBLE_GXL)) {
regmap_write(priv->hhi, HHI_VDAC_CNTL0, 0xf0001);
regmap_write(priv->hhi, HHI_VDAC_CNTL1, 0);
} else if (meson_vpu_is_compatible(priv, VPU_COMPATIBLE_G12A)) {
regmap_write(priv->hhi, HHI_VDAC_CNTL0_G12A, 0x906001);
regmap_write(priv->hhi, HHI_VDAC_CNTL1_G12A, 0);
}
}
static void meson_encoder_cvbs_atomic_disable(struct drm_bridge *bridge,
struct drm_bridge_state *bridge_state)
{
struct meson_encoder_cvbs *meson_encoder_cvbs =
bridge_to_meson_encoder_cvbs(bridge);
struct meson_drm *priv = meson_encoder_cvbs->priv;
/* Disable CVBS VDAC */
if (meson_vpu_is_compatible(priv, VPU_COMPATIBLE_G12A)) {
regmap_write(priv->hhi, HHI_VDAC_CNTL0_G12A, 0);
regmap_write(priv->hhi, HHI_VDAC_CNTL1_G12A, 0);
} else {
regmap_write(priv->hhi, HHI_VDAC_CNTL0, 0);
regmap_write(priv->hhi, HHI_VDAC_CNTL1, 8);
}
}
static const struct drm_bridge_funcs meson_encoder_cvbs_bridge_funcs = {
.attach = meson_encoder_cvbs_attach,
.mode_valid = meson_encoder_cvbs_mode_valid,
.get_modes = meson_encoder_cvbs_get_modes,
.atomic_enable = meson_encoder_cvbs_atomic_enable,
.atomic_disable = meson_encoder_cvbs_atomic_disable,
.atomic_check = meson_encoder_cvbs_atomic_check,
.atomic_duplicate_state = drm_atomic_helper_bridge_duplicate_state,
.atomic_destroy_state = drm_atomic_helper_bridge_destroy_state,
.atomic_reset = drm_atomic_helper_bridge_reset,
};
int meson_encoder_cvbs_init(struct meson_drm *priv)
{
struct drm_device *drm = priv->drm;
struct meson_encoder_cvbs *meson_encoder_cvbs;
struct drm_connector *connector;
struct device_node *remote;
int ret;
meson_encoder_cvbs = devm_kzalloc(priv->dev, sizeof(*meson_encoder_cvbs), GFP_KERNEL);
if (!meson_encoder_cvbs)
return -ENOMEM;
/* CVBS Connector Bridge */
remote = of_graph_get_remote_node(priv->dev->of_node, 0, 0);
if (!remote) {
dev_info(drm->dev, "CVBS Output connector not available\n");
return 0;
}
meson_encoder_cvbs->next_bridge = of_drm_find_bridge(remote);
of_node_put(remote);
if (!meson_encoder_cvbs->next_bridge) {
dev_err(priv->dev, "Failed to find CVBS Connector bridge\n");
return -EPROBE_DEFER;
}
/* CVBS Encoder Bridge */
meson_encoder_cvbs->bridge.funcs = &meson_encoder_cvbs_bridge_funcs;
meson_encoder_cvbs->bridge.of_node = priv->dev->of_node;
meson_encoder_cvbs->bridge.type = DRM_MODE_CONNECTOR_Composite;
meson_encoder_cvbs->bridge.ops = DRM_BRIDGE_OP_MODES;
meson_encoder_cvbs->bridge.interlace_allowed = true;
drm_bridge_add(&meson_encoder_cvbs->bridge);
meson_encoder_cvbs->priv = priv;
/* Encoder */
ret = drm_simple_encoder_init(priv->drm, &meson_encoder_cvbs->encoder,
DRM_MODE_ENCODER_TVDAC);
if (ret) {
dev_err(priv->dev, "Failed to init CVBS encoder: %d\n", ret);
return ret;
}
meson_encoder_cvbs->encoder.possible_crtcs = BIT(0);
/* Attach CVBS Encoder Bridge to Encoder */
ret = drm_bridge_attach(&meson_encoder_cvbs->encoder, &meson_encoder_cvbs->bridge, NULL,
DRM_BRIDGE_ATTACH_NO_CONNECTOR);
if (ret) {
dev_err(priv->dev, "Failed to attach bridge: %d\n", ret);
return ret;
}
/* Initialize & attach Bridge Connector */
connector = drm_bridge_connector_init(priv->drm, &meson_encoder_cvbs->encoder);
if (IS_ERR(connector)) {
dev_err(priv->dev, "Unable to create CVBS bridge connector\n");
return PTR_ERR(connector);
}
drm_connector_attach_encoder(connector, &meson_encoder_cvbs->encoder);
priv->encoders[MESON_ENC_CVBS] = meson_encoder_cvbs;
return 0;
}
void meson_encoder_cvbs_remove(struct meson_drm *priv)
{
struct meson_encoder_cvbs *meson_encoder_cvbs;
if (priv->encoders[MESON_ENC_CVBS]) {
meson_encoder_cvbs = priv->encoders[MESON_ENC_CVBS];
drm_bridge_remove(&meson_encoder_cvbs->bridge);
drm_bridge_remove(meson_encoder_cvbs->next_bridge);
}
}
| linux-master | drivers/gpu/drm/meson/meson_encoder_cvbs.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Copyright (C) 2016 BayLibre, SAS
* Author: Neil Armstrong <[email protected]>
* Copyright (C) 2015 Amlogic, Inc. All rights reserved.
*/
#include <linux/export.h>
#include <drm/drm_print.h>
#include "meson_drv.h"
#include "meson_vclk.h"
/**
* DOC: Video Clocks
*
* VCLK is the "Pixel Clock" frequency generator from a dedicated PLL.
* We handle the following encodings :
*
* - CVBS 27MHz generator via the VCLK2 to the VENCI and VDAC blocks
* - HDMI Pixel Clocks generation
*
* What is missing :
*
* - Genenate Pixel clocks for 2K/4K 10bit formats
*
* Clock generator scheme :
*
* .. code::
*
* __________ _________ _____
* | | | | | |--ENCI
* | HDMI PLL |-| PLL_DIV |--- VCLK--| |--ENCL
* |__________| |_________| \ | MUX |--ENCP
* --VCLK2-| |--VDAC
* |_____|--HDMI-TX
*
* Final clocks can take input for either VCLK or VCLK2, but
* VCLK is the preferred path for HDMI clocking and VCLK2 is the
* preferred path for CVBS VDAC clocking.
*
* VCLK and VCLK2 have fixed divided clocks paths for /1, /2, /4, /6 or /12.
*
* The PLL_DIV can achieve an additional fractional dividing like
* 1.5, 3.5, 3.75... to generate special 2K and 4K 10bit clocks.
*/
/* HHI Registers */
#define HHI_VID_PLL_CLK_DIV 0x1a0 /* 0x68 offset in data sheet */
#define VID_PLL_EN BIT(19)
#define VID_PLL_BYPASS BIT(18)
#define VID_PLL_PRESET BIT(15)
#define HHI_VIID_CLK_DIV 0x128 /* 0x4a offset in data sheet */
#define VCLK2_DIV_MASK 0xff
#define VCLK2_DIV_EN BIT(16)
#define VCLK2_DIV_RESET BIT(17)
#define CTS_VDAC_SEL_MASK (0xf << 28)
#define CTS_VDAC_SEL_SHIFT 28
#define HHI_VIID_CLK_CNTL 0x12c /* 0x4b offset in data sheet */
#define VCLK2_EN BIT(19)
#define VCLK2_SEL_MASK (0x7 << 16)
#define VCLK2_SEL_SHIFT 16
#define VCLK2_SOFT_RESET BIT(15)
#define VCLK2_DIV1_EN BIT(0)
#define HHI_VID_CLK_DIV 0x164 /* 0x59 offset in data sheet */
#define VCLK_DIV_MASK 0xff
#define VCLK_DIV_EN BIT(16)
#define VCLK_DIV_RESET BIT(17)
#define CTS_ENCP_SEL_MASK (0xf << 24)
#define CTS_ENCP_SEL_SHIFT 24
#define CTS_ENCI_SEL_MASK (0xf << 28)
#define CTS_ENCI_SEL_SHIFT 28
#define HHI_VID_CLK_CNTL 0x17c /* 0x5f offset in data sheet */
#define VCLK_EN BIT(19)
#define VCLK_SEL_MASK (0x7 << 16)
#define VCLK_SEL_SHIFT 16
#define VCLK_SOFT_RESET BIT(15)
#define VCLK_DIV1_EN BIT(0)
#define VCLK_DIV2_EN BIT(1)
#define VCLK_DIV4_EN BIT(2)
#define VCLK_DIV6_EN BIT(3)
#define VCLK_DIV12_EN BIT(4)
#define HHI_VID_CLK_CNTL2 0x194 /* 0x65 offset in data sheet */
#define CTS_ENCI_EN BIT(0)
#define CTS_ENCP_EN BIT(2)
#define CTS_VDAC_EN BIT(4)
#define HDMI_TX_PIXEL_EN BIT(5)
#define HHI_HDMI_CLK_CNTL 0x1cc /* 0x73 offset in data sheet */
#define HDMI_TX_PIXEL_SEL_MASK (0xf << 16)
#define HDMI_TX_PIXEL_SEL_SHIFT 16
#define CTS_HDMI_SYS_SEL_MASK (0x7 << 9)
#define CTS_HDMI_SYS_DIV_MASK (0x7f)
#define CTS_HDMI_SYS_EN BIT(8)
#define HHI_VDAC_CNTL0 0x2F4 /* 0xbd offset in data sheet */
#define HHI_VDAC_CNTL1 0x2F8 /* 0xbe offset in data sheet */
#define HHI_HDMI_PLL_CNTL 0x320 /* 0xc8 offset in data sheet */
#define HHI_HDMI_PLL_CNTL_EN BIT(30)
#define HHI_HDMI_PLL_CNTL2 0x324 /* 0xc9 offset in data sheet */
#define HHI_HDMI_PLL_CNTL3 0x328 /* 0xca offset in data sheet */
#define HHI_HDMI_PLL_CNTL4 0x32C /* 0xcb offset in data sheet */
#define HHI_HDMI_PLL_CNTL5 0x330 /* 0xcc offset in data sheet */
#define HHI_HDMI_PLL_CNTL6 0x334 /* 0xcd offset in data sheet */
#define HHI_HDMI_PLL_CNTL7 0x338 /* 0xce offset in data sheet */
#define HDMI_PLL_RESET BIT(28)
#define HDMI_PLL_RESET_G12A BIT(29)
#define HDMI_PLL_LOCK BIT(31)
#define HDMI_PLL_LOCK_G12A (3 << 30)
#define FREQ_1000_1001(_freq) DIV_ROUND_CLOSEST(_freq * 1000, 1001)
/* VID PLL Dividers */
enum {
VID_PLL_DIV_1 = 0,
VID_PLL_DIV_2,
VID_PLL_DIV_2p5,
VID_PLL_DIV_3,
VID_PLL_DIV_3p5,
VID_PLL_DIV_3p75,
VID_PLL_DIV_4,
VID_PLL_DIV_5,
VID_PLL_DIV_6,
VID_PLL_DIV_6p25,
VID_PLL_DIV_7,
VID_PLL_DIV_7p5,
VID_PLL_DIV_12,
VID_PLL_DIV_14,
VID_PLL_DIV_15,
};
static void meson_vid_pll_set(struct meson_drm *priv, unsigned int div)
{
unsigned int shift_val = 0;
unsigned int shift_sel = 0;
/* Disable vid_pll output clock */
regmap_update_bits(priv->hhi, HHI_VID_PLL_CLK_DIV, VID_PLL_EN, 0);
regmap_update_bits(priv->hhi, HHI_VID_PLL_CLK_DIV, VID_PLL_PRESET, 0);
switch (div) {
case VID_PLL_DIV_2:
shift_val = 0x0aaa;
shift_sel = 0;
break;
case VID_PLL_DIV_2p5:
shift_val = 0x5294;
shift_sel = 2;
break;
case VID_PLL_DIV_3:
shift_val = 0x0db6;
shift_sel = 0;
break;
case VID_PLL_DIV_3p5:
shift_val = 0x36cc;
shift_sel = 1;
break;
case VID_PLL_DIV_3p75:
shift_val = 0x6666;
shift_sel = 2;
break;
case VID_PLL_DIV_4:
shift_val = 0x0ccc;
shift_sel = 0;
break;
case VID_PLL_DIV_5:
shift_val = 0x739c;
shift_sel = 2;
break;
case VID_PLL_DIV_6:
shift_val = 0x0e38;
shift_sel = 0;
break;
case VID_PLL_DIV_6p25:
shift_val = 0x0000;
shift_sel = 3;
break;
case VID_PLL_DIV_7:
shift_val = 0x3c78;
shift_sel = 1;
break;
case VID_PLL_DIV_7p5:
shift_val = 0x78f0;
shift_sel = 2;
break;
case VID_PLL_DIV_12:
shift_val = 0x0fc0;
shift_sel = 0;
break;
case VID_PLL_DIV_14:
shift_val = 0x3f80;
shift_sel = 1;
break;
case VID_PLL_DIV_15:
shift_val = 0x7f80;
shift_sel = 2;
break;
}
if (div == VID_PLL_DIV_1)
/* Enable vid_pll bypass to HDMI pll */
regmap_update_bits(priv->hhi, HHI_VID_PLL_CLK_DIV,
VID_PLL_BYPASS, VID_PLL_BYPASS);
else {
/* Disable Bypass */
regmap_update_bits(priv->hhi, HHI_VID_PLL_CLK_DIV,
VID_PLL_BYPASS, 0);
/* Clear sel */
regmap_update_bits(priv->hhi, HHI_VID_PLL_CLK_DIV,
3 << 16, 0);
regmap_update_bits(priv->hhi, HHI_VID_PLL_CLK_DIV,
VID_PLL_PRESET, 0);
regmap_update_bits(priv->hhi, HHI_VID_PLL_CLK_DIV,
0x7fff, 0);
/* Setup sel and val */
regmap_update_bits(priv->hhi, HHI_VID_PLL_CLK_DIV,
3 << 16, shift_sel << 16);
regmap_update_bits(priv->hhi, HHI_VID_PLL_CLK_DIV,
VID_PLL_PRESET, VID_PLL_PRESET);
regmap_update_bits(priv->hhi, HHI_VID_PLL_CLK_DIV,
0x7fff, shift_val);
regmap_update_bits(priv->hhi, HHI_VID_PLL_CLK_DIV,
VID_PLL_PRESET, 0);
}
/* Enable the vid_pll output clock */
regmap_update_bits(priv->hhi, HHI_VID_PLL_CLK_DIV,
VID_PLL_EN, VID_PLL_EN);
}
/*
* Setup VCLK2 for 27MHz, and enable clocks for ENCI and VDAC
*
* TOFIX: Refactor into table to also handle HDMI frequency and paths
*/
static void meson_venci_cvbs_clock_config(struct meson_drm *priv)
{
unsigned int val;
/* Setup PLL to output 1.485GHz */
if (meson_vpu_is_compatible(priv, VPU_COMPATIBLE_GXBB)) {
regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL, 0x5800023d);
regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL2, 0x00404e00);
regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL3, 0x0d5c5091);
regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL4, 0x801da72c);
regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL5, 0x71486980);
regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL6, 0x00000e55);
regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL, 0x4800023d);
/* Poll for lock bit */
regmap_read_poll_timeout(priv->hhi, HHI_HDMI_PLL_CNTL, val,
(val & HDMI_PLL_LOCK), 10, 0);
} else if (meson_vpu_is_compatible(priv, VPU_COMPATIBLE_GXM) ||
meson_vpu_is_compatible(priv, VPU_COMPATIBLE_GXL)) {
regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL, 0x4000027b);
regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL2, 0x800cb300);
regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL3, 0xa6212844);
regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL4, 0x0c4d000c);
regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL5, 0x001fa729);
regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL6, 0x01a31500);
/* Reset PLL */
regmap_update_bits(priv->hhi, HHI_HDMI_PLL_CNTL,
HDMI_PLL_RESET, HDMI_PLL_RESET);
regmap_update_bits(priv->hhi, HHI_HDMI_PLL_CNTL,
HDMI_PLL_RESET, 0);
/* Poll for lock bit */
regmap_read_poll_timeout(priv->hhi, HHI_HDMI_PLL_CNTL, val,
(val & HDMI_PLL_LOCK), 10, 0);
} else if (meson_vpu_is_compatible(priv, VPU_COMPATIBLE_G12A)) {
regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL, 0x1a0504f7);
regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL2, 0x00010000);
regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL3, 0x00000000);
regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL4, 0x6a28dc00);
regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL5, 0x65771290);
regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL6, 0x39272000);
regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL7, 0x56540000);
regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL, 0x3a0504f7);
regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL, 0x1a0504f7);
/* Poll for lock bit */
regmap_read_poll_timeout(priv->hhi, HHI_HDMI_PLL_CNTL, val,
((val & HDMI_PLL_LOCK_G12A) == HDMI_PLL_LOCK_G12A),
10, 0);
}
/* Disable VCLK2 */
regmap_update_bits(priv->hhi, HHI_VIID_CLK_CNTL, VCLK2_EN, 0);
/* Setup vid_pll to /1 */
meson_vid_pll_set(priv, VID_PLL_DIV_1);
/* Setup the VCLK2 divider value to achieve 27MHz */
regmap_update_bits(priv->hhi, HHI_VIID_CLK_DIV,
VCLK2_DIV_MASK, (55 - 1));
/* select vid_pll for vclk2 */
if (meson_vpu_is_compatible(priv, VPU_COMPATIBLE_G12A))
regmap_update_bits(priv->hhi, HHI_VIID_CLK_CNTL,
VCLK2_SEL_MASK, (0 << VCLK2_SEL_SHIFT));
else
regmap_update_bits(priv->hhi, HHI_VIID_CLK_CNTL,
VCLK2_SEL_MASK, (4 << VCLK2_SEL_SHIFT));
/* enable vclk2 gate */
regmap_update_bits(priv->hhi, HHI_VIID_CLK_CNTL, VCLK2_EN, VCLK2_EN);
/* select vclk_div1 for enci */
regmap_update_bits(priv->hhi, HHI_VID_CLK_DIV,
CTS_ENCI_SEL_MASK, (8 << CTS_ENCI_SEL_SHIFT));
/* select vclk_div1 for vdac */
regmap_update_bits(priv->hhi, HHI_VIID_CLK_DIV,
CTS_VDAC_SEL_MASK, (8 << CTS_VDAC_SEL_SHIFT));
/* release vclk2_div_reset and enable vclk2_div */
regmap_update_bits(priv->hhi, HHI_VIID_CLK_DIV,
VCLK2_DIV_EN | VCLK2_DIV_RESET, VCLK2_DIV_EN);
/* enable vclk2_div1 gate */
regmap_update_bits(priv->hhi, HHI_VIID_CLK_CNTL,
VCLK2_DIV1_EN, VCLK2_DIV1_EN);
/* reset vclk2 */
regmap_update_bits(priv->hhi, HHI_VIID_CLK_CNTL,
VCLK2_SOFT_RESET, VCLK2_SOFT_RESET);
regmap_update_bits(priv->hhi, HHI_VIID_CLK_CNTL,
VCLK2_SOFT_RESET, 0);
/* enable enci_clk */
regmap_update_bits(priv->hhi, HHI_VID_CLK_CNTL2,
CTS_ENCI_EN, CTS_ENCI_EN);
/* enable vdac_clk */
regmap_update_bits(priv->hhi, HHI_VID_CLK_CNTL2,
CTS_VDAC_EN, CTS_VDAC_EN);
}
enum {
/* PLL O1 O2 O3 VP DV EN TX */
/* 4320 /4 /4 /1 /5 /1 => /2 /2 */
MESON_VCLK_HDMI_ENCI_54000 = 0,
/* 4320 /4 /4 /1 /5 /1 => /1 /2 */
MESON_VCLK_HDMI_DDR_54000,
/* 2970 /4 /1 /1 /5 /1 => /1 /2 */
MESON_VCLK_HDMI_DDR_148500,
/* 2970 /2 /2 /2 /5 /1 => /1 /1 */
MESON_VCLK_HDMI_74250,
/* 2970 /1 /2 /2 /5 /1 => /1 /1 */
MESON_VCLK_HDMI_148500,
/* 2970 /1 /1 /1 /5 /2 => /1 /1 */
MESON_VCLK_HDMI_297000,
/* 5940 /1 /1 /2 /5 /1 => /1 /1 */
MESON_VCLK_HDMI_594000,
/* 2970 /1 /1 /1 /5 /1 => /1 /2 */
MESON_VCLK_HDMI_594000_YUV420,
};
struct meson_vclk_params {
unsigned int pll_freq;
unsigned int phy_freq;
unsigned int vclk_freq;
unsigned int venc_freq;
unsigned int pixel_freq;
unsigned int pll_od1;
unsigned int pll_od2;
unsigned int pll_od3;
unsigned int vid_pll_div;
unsigned int vclk_div;
} params[] = {
[MESON_VCLK_HDMI_ENCI_54000] = {
.pll_freq = 4320000,
.phy_freq = 270000,
.vclk_freq = 54000,
.venc_freq = 54000,
.pixel_freq = 54000,
.pll_od1 = 4,
.pll_od2 = 4,
.pll_od3 = 1,
.vid_pll_div = VID_PLL_DIV_5,
.vclk_div = 1,
},
[MESON_VCLK_HDMI_DDR_54000] = {
.pll_freq = 4320000,
.phy_freq = 270000,
.vclk_freq = 54000,
.venc_freq = 54000,
.pixel_freq = 27000,
.pll_od1 = 4,
.pll_od2 = 4,
.pll_od3 = 1,
.vid_pll_div = VID_PLL_DIV_5,
.vclk_div = 1,
},
[MESON_VCLK_HDMI_DDR_148500] = {
.pll_freq = 2970000,
.phy_freq = 742500,
.vclk_freq = 148500,
.venc_freq = 148500,
.pixel_freq = 74250,
.pll_od1 = 4,
.pll_od2 = 1,
.pll_od3 = 1,
.vid_pll_div = VID_PLL_DIV_5,
.vclk_div = 1,
},
[MESON_VCLK_HDMI_74250] = {
.pll_freq = 2970000,
.phy_freq = 742500,
.vclk_freq = 74250,
.venc_freq = 74250,
.pixel_freq = 74250,
.pll_od1 = 2,
.pll_od2 = 2,
.pll_od3 = 2,
.vid_pll_div = VID_PLL_DIV_5,
.vclk_div = 1,
},
[MESON_VCLK_HDMI_148500] = {
.pll_freq = 2970000,
.phy_freq = 1485000,
.vclk_freq = 148500,
.venc_freq = 148500,
.pixel_freq = 148500,
.pll_od1 = 1,
.pll_od2 = 2,
.pll_od3 = 2,
.vid_pll_div = VID_PLL_DIV_5,
.vclk_div = 1,
},
[MESON_VCLK_HDMI_297000] = {
.pll_freq = 5940000,
.phy_freq = 2970000,
.venc_freq = 297000,
.vclk_freq = 297000,
.pixel_freq = 297000,
.pll_od1 = 2,
.pll_od2 = 1,
.pll_od3 = 1,
.vid_pll_div = VID_PLL_DIV_5,
.vclk_div = 2,
},
[MESON_VCLK_HDMI_594000] = {
.pll_freq = 5940000,
.phy_freq = 5940000,
.venc_freq = 594000,
.vclk_freq = 594000,
.pixel_freq = 594000,
.pll_od1 = 1,
.pll_od2 = 1,
.pll_od3 = 2,
.vid_pll_div = VID_PLL_DIV_5,
.vclk_div = 1,
},
[MESON_VCLK_HDMI_594000_YUV420] = {
.pll_freq = 5940000,
.phy_freq = 2970000,
.venc_freq = 594000,
.vclk_freq = 594000,
.pixel_freq = 297000,
.pll_od1 = 2,
.pll_od2 = 1,
.pll_od3 = 1,
.vid_pll_div = VID_PLL_DIV_5,
.vclk_div = 1,
},
{ /* sentinel */ },
};
static inline unsigned int pll_od_to_reg(unsigned int od)
{
switch (od) {
case 1:
return 0;
case 2:
return 1;
case 4:
return 2;
case 8:
return 3;
}
/* Invalid */
return 0;
}
static void meson_hdmi_pll_set_params(struct meson_drm *priv, unsigned int m,
unsigned int frac, unsigned int od1,
unsigned int od2, unsigned int od3)
{
unsigned int val;
if (meson_vpu_is_compatible(priv, VPU_COMPATIBLE_GXBB)) {
regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL, 0x58000200 | m);
if (frac)
regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL2,
0x00004000 | frac);
else
regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL2,
0x00000000);
regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL3, 0x0d5c5091);
regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL4, 0x801da72c);
regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL5, 0x71486980);
regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL6, 0x00000e55);
/* Enable and unreset */
regmap_update_bits(priv->hhi, HHI_HDMI_PLL_CNTL,
0x7 << 28, HHI_HDMI_PLL_CNTL_EN);
/* Poll for lock bit */
regmap_read_poll_timeout(priv->hhi, HHI_HDMI_PLL_CNTL,
val, (val & HDMI_PLL_LOCK), 10, 0);
} else if (meson_vpu_is_compatible(priv, VPU_COMPATIBLE_GXM) ||
meson_vpu_is_compatible(priv, VPU_COMPATIBLE_GXL)) {
regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL, 0x40000200 | m);
regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL2, 0x800cb000 | frac);
regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL3, 0x860f30c4);
regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL4, 0x0c8e0000);
regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL5, 0x001fa729);
regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL6, 0x01a31500);
/* Reset PLL */
regmap_update_bits(priv->hhi, HHI_HDMI_PLL_CNTL,
HDMI_PLL_RESET, HDMI_PLL_RESET);
regmap_update_bits(priv->hhi, HHI_HDMI_PLL_CNTL,
HDMI_PLL_RESET, 0);
/* Poll for lock bit */
regmap_read_poll_timeout(priv->hhi, HHI_HDMI_PLL_CNTL, val,
(val & HDMI_PLL_LOCK), 10, 0);
} else if (meson_vpu_is_compatible(priv, VPU_COMPATIBLE_G12A)) {
regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL, 0x0b3a0400 | m);
/* Enable and reset */
/* TODO: add specific macro for g12a here */
regmap_update_bits(priv->hhi, HHI_HDMI_PLL_CNTL,
0x3 << 28, 0x3 << 28);
regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL2, frac);
regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL3, 0x00000000);
/* G12A HDMI PLL Needs specific parameters for 5.4GHz */
if (m >= 0xf7) {
if (frac < 0x10000) {
regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL4,
0x6a685c00);
regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL5,
0x11551293);
} else {
regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL4,
0xea68dc00);
regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL5,
0x65771290);
}
regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL6, 0x39272000);
regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL7, 0x55540000);
} else {
regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL4, 0x0a691c00);
regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL5, 0x33771290);
regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL6, 0x39270000);
regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL7, 0x50540000);
}
do {
/* Reset PLL */
regmap_update_bits(priv->hhi, HHI_HDMI_PLL_CNTL,
HDMI_PLL_RESET_G12A, HDMI_PLL_RESET_G12A);
/* UN-Reset PLL */
regmap_update_bits(priv->hhi, HHI_HDMI_PLL_CNTL,
HDMI_PLL_RESET_G12A, 0);
/* Poll for lock bits */
if (!regmap_read_poll_timeout(priv->hhi,
HHI_HDMI_PLL_CNTL, val,
((val & HDMI_PLL_LOCK_G12A)
== HDMI_PLL_LOCK_G12A),
10, 100))
break;
} while(1);
}
if (meson_vpu_is_compatible(priv, VPU_COMPATIBLE_GXBB))
regmap_update_bits(priv->hhi, HHI_HDMI_PLL_CNTL2,
3 << 16, pll_od_to_reg(od1) << 16);
else if (meson_vpu_is_compatible(priv, VPU_COMPATIBLE_GXM) ||
meson_vpu_is_compatible(priv, VPU_COMPATIBLE_GXL))
regmap_update_bits(priv->hhi, HHI_HDMI_PLL_CNTL3,
3 << 21, pll_od_to_reg(od1) << 21);
else if (meson_vpu_is_compatible(priv, VPU_COMPATIBLE_G12A))
regmap_update_bits(priv->hhi, HHI_HDMI_PLL_CNTL,
3 << 16, pll_od_to_reg(od1) << 16);
if (meson_vpu_is_compatible(priv, VPU_COMPATIBLE_GXBB))
regmap_update_bits(priv->hhi, HHI_HDMI_PLL_CNTL2,
3 << 22, pll_od_to_reg(od2) << 22);
else if (meson_vpu_is_compatible(priv, VPU_COMPATIBLE_GXM) ||
meson_vpu_is_compatible(priv, VPU_COMPATIBLE_GXL))
regmap_update_bits(priv->hhi, HHI_HDMI_PLL_CNTL3,
3 << 23, pll_od_to_reg(od2) << 23);
else if (meson_vpu_is_compatible(priv, VPU_COMPATIBLE_G12A))
regmap_update_bits(priv->hhi, HHI_HDMI_PLL_CNTL,
3 << 18, pll_od_to_reg(od2) << 18);
if (meson_vpu_is_compatible(priv, VPU_COMPATIBLE_GXBB))
regmap_update_bits(priv->hhi, HHI_HDMI_PLL_CNTL2,
3 << 18, pll_od_to_reg(od3) << 18);
else if (meson_vpu_is_compatible(priv, VPU_COMPATIBLE_GXM) ||
meson_vpu_is_compatible(priv, VPU_COMPATIBLE_GXL))
regmap_update_bits(priv->hhi, HHI_HDMI_PLL_CNTL3,
3 << 19, pll_od_to_reg(od3) << 19);
else if (meson_vpu_is_compatible(priv, VPU_COMPATIBLE_G12A))
regmap_update_bits(priv->hhi, HHI_HDMI_PLL_CNTL,
3 << 20, pll_od_to_reg(od3) << 20);
}
#define XTAL_FREQ 24000
static unsigned int meson_hdmi_pll_get_m(struct meson_drm *priv,
unsigned int pll_freq)
{
/* The GXBB PLL has a /2 pre-multiplier */
if (meson_vpu_is_compatible(priv, VPU_COMPATIBLE_GXBB))
pll_freq /= 2;
return pll_freq / XTAL_FREQ;
}
#define HDMI_FRAC_MAX_GXBB 4096
#define HDMI_FRAC_MAX_GXL 1024
#define HDMI_FRAC_MAX_G12A 131072
static unsigned int meson_hdmi_pll_get_frac(struct meson_drm *priv,
unsigned int m,
unsigned int pll_freq)
{
unsigned int parent_freq = XTAL_FREQ;
unsigned int frac_max = HDMI_FRAC_MAX_GXL;
unsigned int frac_m;
unsigned int frac;
/* The GXBB PLL has a /2 pre-multiplier and a larger FRAC width */
if (meson_vpu_is_compatible(priv, VPU_COMPATIBLE_GXBB)) {
frac_max = HDMI_FRAC_MAX_GXBB;
parent_freq *= 2;
}
if (meson_vpu_is_compatible(priv, VPU_COMPATIBLE_G12A))
frac_max = HDMI_FRAC_MAX_G12A;
/* We can have a perfect match !*/
if (pll_freq / m == parent_freq &&
pll_freq % m == 0)
return 0;
frac = div_u64((u64)pll_freq * (u64)frac_max, parent_freq);
frac_m = m * frac_max;
if (frac_m > frac)
return frac_max;
frac -= frac_m;
return min((u16)frac, (u16)(frac_max - 1));
}
static bool meson_hdmi_pll_validate_params(struct meson_drm *priv,
unsigned int m,
unsigned int frac)
{
if (meson_vpu_is_compatible(priv, VPU_COMPATIBLE_GXBB)) {
/* Empiric supported min/max dividers */
if (m < 53 || m > 123)
return false;
if (frac >= HDMI_FRAC_MAX_GXBB)
return false;
} else if (meson_vpu_is_compatible(priv, VPU_COMPATIBLE_GXM) ||
meson_vpu_is_compatible(priv, VPU_COMPATIBLE_GXL)) {
/* Empiric supported min/max dividers */
if (m < 106 || m > 247)
return false;
if (frac >= HDMI_FRAC_MAX_GXL)
return false;
} else if (meson_vpu_is_compatible(priv, VPU_COMPATIBLE_G12A)) {
/* Empiric supported min/max dividers */
if (m < 106 || m > 247)
return false;
if (frac >= HDMI_FRAC_MAX_G12A)
return false;
}
return true;
}
static bool meson_hdmi_pll_find_params(struct meson_drm *priv,
unsigned int freq,
unsigned int *m,
unsigned int *frac,
unsigned int *od)
{
/* Cycle from /16 to /2 */
for (*od = 16 ; *od > 1 ; *od >>= 1) {
*m = meson_hdmi_pll_get_m(priv, freq * *od);
if (!*m)
continue;
*frac = meson_hdmi_pll_get_frac(priv, *m, freq * *od);
DRM_DEBUG_DRIVER("PLL params for %dkHz: m=%x frac=%x od=%d\n",
freq, *m, *frac, *od);
if (meson_hdmi_pll_validate_params(priv, *m, *frac))
return true;
}
return false;
}
/* pll_freq is the frequency after the OD dividers */
enum drm_mode_status
meson_vclk_dmt_supported_freq(struct meson_drm *priv, unsigned int freq)
{
unsigned int od, m, frac;
/* In DMT mode, path after PLL is always /10 */
freq *= 10;
/* Check against soc revision/package limits */
if (priv->limits) {
if (priv->limits->max_hdmi_phy_freq &&
freq > priv->limits->max_hdmi_phy_freq)
return MODE_CLOCK_HIGH;
}
if (meson_hdmi_pll_find_params(priv, freq, &m, &frac, &od))
return MODE_OK;
return MODE_CLOCK_RANGE;
}
EXPORT_SYMBOL_GPL(meson_vclk_dmt_supported_freq);
/* pll_freq is the frequency after the OD dividers */
static void meson_hdmi_pll_generic_set(struct meson_drm *priv,
unsigned int pll_freq)
{
unsigned int od, m, frac, od1, od2, od3;
if (meson_hdmi_pll_find_params(priv, pll_freq, &m, &frac, &od)) {
/* OD2 goes to the PHY, and needs to be *10, so keep OD3=1 */
od3 = 1;
if (od < 4) {
od1 = 2;
od2 = 1;
} else {
od2 = od / 4;
od1 = od / od2;
}
DRM_DEBUG_DRIVER("PLL params for %dkHz: m=%x frac=%x od=%d/%d/%d\n",
pll_freq, m, frac, od1, od2, od3);
meson_hdmi_pll_set_params(priv, m, frac, od1, od2, od3);
return;
}
DRM_ERROR("Fatal, unable to find parameters for PLL freq %d\n",
pll_freq);
}
enum drm_mode_status
meson_vclk_vic_supported_freq(struct meson_drm *priv, unsigned int phy_freq,
unsigned int vclk_freq)
{
int i;
DRM_DEBUG_DRIVER("phy_freq = %d vclk_freq = %d\n",
phy_freq, vclk_freq);
/* Check against soc revision/package limits */
if (priv->limits) {
if (priv->limits->max_hdmi_phy_freq &&
phy_freq > priv->limits->max_hdmi_phy_freq)
return MODE_CLOCK_HIGH;
}
for (i = 0 ; params[i].pixel_freq ; ++i) {
DRM_DEBUG_DRIVER("i = %d pixel_freq = %d alt = %d\n",
i, params[i].pixel_freq,
FREQ_1000_1001(params[i].pixel_freq));
DRM_DEBUG_DRIVER("i = %d phy_freq = %d alt = %d\n",
i, params[i].phy_freq,
FREQ_1000_1001(params[i].phy_freq/10)*10);
/* Match strict frequency */
if (phy_freq == params[i].phy_freq &&
vclk_freq == params[i].vclk_freq)
return MODE_OK;
/* Match 1000/1001 variant */
if (phy_freq == (FREQ_1000_1001(params[i].phy_freq/10)*10) &&
vclk_freq == FREQ_1000_1001(params[i].vclk_freq))
return MODE_OK;
}
return MODE_CLOCK_RANGE;
}
EXPORT_SYMBOL_GPL(meson_vclk_vic_supported_freq);
static void meson_vclk_set(struct meson_drm *priv, unsigned int pll_base_freq,
unsigned int od1, unsigned int od2, unsigned int od3,
unsigned int vid_pll_div, unsigned int vclk_div,
unsigned int hdmi_tx_div, unsigned int venc_div,
bool hdmi_use_enci, bool vic_alternate_clock)
{
unsigned int m = 0, frac = 0;
/* Set HDMI-TX sys clock */
regmap_update_bits(priv->hhi, HHI_HDMI_CLK_CNTL,
CTS_HDMI_SYS_SEL_MASK, 0);
regmap_update_bits(priv->hhi, HHI_HDMI_CLK_CNTL,
CTS_HDMI_SYS_DIV_MASK, 0);
regmap_update_bits(priv->hhi, HHI_HDMI_CLK_CNTL,
CTS_HDMI_SYS_EN, CTS_HDMI_SYS_EN);
/* Set HDMI PLL rate */
if (!od1 && !od2 && !od3) {
meson_hdmi_pll_generic_set(priv, pll_base_freq);
} else if (meson_vpu_is_compatible(priv, VPU_COMPATIBLE_GXBB)) {
switch (pll_base_freq) {
case 2970000:
m = 0x3d;
frac = vic_alternate_clock ? 0xd02 : 0xe00;
break;
case 4320000:
m = vic_alternate_clock ? 0x59 : 0x5a;
frac = vic_alternate_clock ? 0xe8f : 0;
break;
case 5940000:
m = 0x7b;
frac = vic_alternate_clock ? 0xa05 : 0xc00;
break;
}
meson_hdmi_pll_set_params(priv, m, frac, od1, od2, od3);
} else if (meson_vpu_is_compatible(priv, VPU_COMPATIBLE_GXM) ||
meson_vpu_is_compatible(priv, VPU_COMPATIBLE_GXL)) {
switch (pll_base_freq) {
case 2970000:
m = 0x7b;
frac = vic_alternate_clock ? 0x281 : 0x300;
break;
case 4320000:
m = vic_alternate_clock ? 0xb3 : 0xb4;
frac = vic_alternate_clock ? 0x347 : 0;
break;
case 5940000:
m = 0xf7;
frac = vic_alternate_clock ? 0x102 : 0x200;
break;
}
meson_hdmi_pll_set_params(priv, m, frac, od1, od2, od3);
} else if (meson_vpu_is_compatible(priv, VPU_COMPATIBLE_G12A)) {
switch (pll_base_freq) {
case 2970000:
m = 0x7b;
frac = vic_alternate_clock ? 0x140b4 : 0x18000;
break;
case 4320000:
m = vic_alternate_clock ? 0xb3 : 0xb4;
frac = vic_alternate_clock ? 0x1a3ee : 0;
break;
case 5940000:
m = 0xf7;
frac = vic_alternate_clock ? 0x8148 : 0x10000;
break;
}
meson_hdmi_pll_set_params(priv, m, frac, od1, od2, od3);
}
/* Setup vid_pll divider */
meson_vid_pll_set(priv, vid_pll_div);
/* Set VCLK div */
regmap_update_bits(priv->hhi, HHI_VID_CLK_CNTL,
VCLK_SEL_MASK, 0);
regmap_update_bits(priv->hhi, HHI_VID_CLK_DIV,
VCLK_DIV_MASK, vclk_div - 1);
/* Set HDMI-TX source */
switch (hdmi_tx_div) {
case 1:
/* enable vclk_div1 gate */
regmap_update_bits(priv->hhi, HHI_VID_CLK_CNTL,
VCLK_DIV1_EN, VCLK_DIV1_EN);
/* select vclk_div1 for HDMI-TX */
regmap_update_bits(priv->hhi, HHI_HDMI_CLK_CNTL,
HDMI_TX_PIXEL_SEL_MASK, 0);
break;
case 2:
/* enable vclk_div2 gate */
regmap_update_bits(priv->hhi, HHI_VID_CLK_CNTL,
VCLK_DIV2_EN, VCLK_DIV2_EN);
/* select vclk_div2 for HDMI-TX */
regmap_update_bits(priv->hhi, HHI_HDMI_CLK_CNTL,
HDMI_TX_PIXEL_SEL_MASK, 1 << HDMI_TX_PIXEL_SEL_SHIFT);
break;
case 4:
/* enable vclk_div4 gate */
regmap_update_bits(priv->hhi, HHI_VID_CLK_CNTL,
VCLK_DIV4_EN, VCLK_DIV4_EN);
/* select vclk_div4 for HDMI-TX */
regmap_update_bits(priv->hhi, HHI_HDMI_CLK_CNTL,
HDMI_TX_PIXEL_SEL_MASK, 2 << HDMI_TX_PIXEL_SEL_SHIFT);
break;
case 6:
/* enable vclk_div6 gate */
regmap_update_bits(priv->hhi, HHI_VID_CLK_CNTL,
VCLK_DIV6_EN, VCLK_DIV6_EN);
/* select vclk_div6 for HDMI-TX */
regmap_update_bits(priv->hhi, HHI_HDMI_CLK_CNTL,
HDMI_TX_PIXEL_SEL_MASK, 3 << HDMI_TX_PIXEL_SEL_SHIFT);
break;
case 12:
/* enable vclk_div12 gate */
regmap_update_bits(priv->hhi, HHI_VID_CLK_CNTL,
VCLK_DIV12_EN, VCLK_DIV12_EN);
/* select vclk_div12 for HDMI-TX */
regmap_update_bits(priv->hhi, HHI_HDMI_CLK_CNTL,
HDMI_TX_PIXEL_SEL_MASK, 4 << HDMI_TX_PIXEL_SEL_SHIFT);
break;
}
regmap_update_bits(priv->hhi, HHI_VID_CLK_CNTL2,
HDMI_TX_PIXEL_EN, HDMI_TX_PIXEL_EN);
/* Set ENCI/ENCP Source */
switch (venc_div) {
case 1:
/* enable vclk_div1 gate */
regmap_update_bits(priv->hhi, HHI_VID_CLK_CNTL,
VCLK_DIV1_EN, VCLK_DIV1_EN);
if (hdmi_use_enci)
/* select vclk_div1 for enci */
regmap_update_bits(priv->hhi, HHI_VID_CLK_DIV,
CTS_ENCI_SEL_MASK, 0);
else
/* select vclk_div1 for encp */
regmap_update_bits(priv->hhi, HHI_VID_CLK_DIV,
CTS_ENCP_SEL_MASK, 0);
break;
case 2:
/* enable vclk_div2 gate */
regmap_update_bits(priv->hhi, HHI_VID_CLK_CNTL,
VCLK_DIV2_EN, VCLK_DIV2_EN);
if (hdmi_use_enci)
/* select vclk_div2 for enci */
regmap_update_bits(priv->hhi, HHI_VID_CLK_DIV,
CTS_ENCI_SEL_MASK, 1 << CTS_ENCI_SEL_SHIFT);
else
/* select vclk_div2 for encp */
regmap_update_bits(priv->hhi, HHI_VID_CLK_DIV,
CTS_ENCP_SEL_MASK, 1 << CTS_ENCP_SEL_SHIFT);
break;
case 4:
/* enable vclk_div4 gate */
regmap_update_bits(priv->hhi, HHI_VID_CLK_CNTL,
VCLK_DIV4_EN, VCLK_DIV4_EN);
if (hdmi_use_enci)
/* select vclk_div4 for enci */
regmap_update_bits(priv->hhi, HHI_VID_CLK_DIV,
CTS_ENCI_SEL_MASK, 2 << CTS_ENCI_SEL_SHIFT);
else
/* select vclk_div4 for encp */
regmap_update_bits(priv->hhi, HHI_VID_CLK_DIV,
CTS_ENCP_SEL_MASK, 2 << CTS_ENCP_SEL_SHIFT);
break;
case 6:
/* enable vclk_div6 gate */
regmap_update_bits(priv->hhi, HHI_VID_CLK_CNTL,
VCLK_DIV6_EN, VCLK_DIV6_EN);
if (hdmi_use_enci)
/* select vclk_div6 for enci */
regmap_update_bits(priv->hhi, HHI_VID_CLK_DIV,
CTS_ENCI_SEL_MASK, 3 << CTS_ENCI_SEL_SHIFT);
else
/* select vclk_div6 for encp */
regmap_update_bits(priv->hhi, HHI_VID_CLK_DIV,
CTS_ENCP_SEL_MASK, 3 << CTS_ENCP_SEL_SHIFT);
break;
case 12:
/* enable vclk_div12 gate */
regmap_update_bits(priv->hhi, HHI_VID_CLK_CNTL,
VCLK_DIV12_EN, VCLK_DIV12_EN);
if (hdmi_use_enci)
/* select vclk_div12 for enci */
regmap_update_bits(priv->hhi, HHI_VID_CLK_DIV,
CTS_ENCI_SEL_MASK, 4 << CTS_ENCI_SEL_SHIFT);
else
/* select vclk_div12 for encp */
regmap_update_bits(priv->hhi, HHI_VID_CLK_DIV,
CTS_ENCP_SEL_MASK, 4 << CTS_ENCP_SEL_SHIFT);
break;
}
if (hdmi_use_enci)
/* Enable ENCI clock gate */
regmap_update_bits(priv->hhi, HHI_VID_CLK_CNTL2,
CTS_ENCI_EN, CTS_ENCI_EN);
else
/* Enable ENCP clock gate */
regmap_update_bits(priv->hhi, HHI_VID_CLK_CNTL2,
CTS_ENCP_EN, CTS_ENCP_EN);
regmap_update_bits(priv->hhi, HHI_VID_CLK_CNTL, VCLK_EN, VCLK_EN);
}
void meson_vclk_setup(struct meson_drm *priv, unsigned int target,
unsigned int phy_freq, unsigned int vclk_freq,
unsigned int venc_freq, unsigned int dac_freq,
bool hdmi_use_enci)
{
bool vic_alternate_clock = false;
unsigned int freq;
unsigned int hdmi_tx_div;
unsigned int venc_div;
if (target == MESON_VCLK_TARGET_CVBS) {
meson_venci_cvbs_clock_config(priv);
return;
} else if (target == MESON_VCLK_TARGET_DMT) {
/*
* The DMT clock path is fixed after the PLL:
* - automatic PLL freq + OD management
* - vid_pll_div = VID_PLL_DIV_5
* - vclk_div = 2
* - hdmi_tx_div = 1
* - venc_div = 1
* - encp encoder
*/
meson_vclk_set(priv, phy_freq, 0, 0, 0,
VID_PLL_DIV_5, 2, 1, 1, false, false);
return;
}
hdmi_tx_div = vclk_freq / dac_freq;
if (hdmi_tx_div == 0) {
pr_err("Fatal Error, invalid HDMI-TX freq %d\n",
dac_freq);
return;
}
venc_div = vclk_freq / venc_freq;
if (venc_div == 0) {
pr_err("Fatal Error, invalid HDMI venc freq %d\n",
venc_freq);
return;
}
for (freq = 0 ; params[freq].pixel_freq ; ++freq) {
if ((phy_freq == params[freq].phy_freq ||
phy_freq == FREQ_1000_1001(params[freq].phy_freq/10)*10) &&
(vclk_freq == params[freq].vclk_freq ||
vclk_freq == FREQ_1000_1001(params[freq].vclk_freq))) {
if (vclk_freq != params[freq].vclk_freq)
vic_alternate_clock = true;
else
vic_alternate_clock = false;
if (freq == MESON_VCLK_HDMI_ENCI_54000 &&
!hdmi_use_enci)
continue;
if (freq == MESON_VCLK_HDMI_DDR_54000 &&
hdmi_use_enci)
continue;
if (freq == MESON_VCLK_HDMI_DDR_148500 &&
dac_freq == vclk_freq)
continue;
if (freq == MESON_VCLK_HDMI_148500 &&
dac_freq != vclk_freq)
continue;
break;
}
}
if (!params[freq].pixel_freq) {
pr_err("Fatal Error, invalid HDMI vclk freq %d\n", vclk_freq);
return;
}
meson_vclk_set(priv, params[freq].pll_freq,
params[freq].pll_od1, params[freq].pll_od2,
params[freq].pll_od3, params[freq].vid_pll_div,
params[freq].vclk_div, hdmi_tx_div, venc_div,
hdmi_use_enci, vic_alternate_clock);
}
EXPORT_SYMBOL_GPL(meson_vclk_setup);
| linux-master | drivers/gpu/drm/meson/meson_vclk.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Copyright (C) 2016 BayLibre, SAS
* Author: Neil Armstrong <[email protected]>
* Copyright (C) 2015 Amlogic, Inc. All rights reserved.
*/
#include <linux/clk.h>
#include <linux/component.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_graph.h>
#include <linux/platform_device.h>
#include <linux/regulator/consumer.h>
#include <linux/reset.h>
#include <drm/bridge/dw_hdmi.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_bridge.h>
#include <drm/drm_device.h>
#include <drm/drm_edid.h>
#include <drm/drm_probe_helper.h>
#include <drm/drm_print.h>
#include <linux/videodev2.h>
#include "meson_drv.h"
#include "meson_dw_hdmi.h"
#include "meson_registers.h"
#define DRIVER_NAME "meson-dw-hdmi"
#define DRIVER_DESC "Amlogic Meson HDMI-TX DRM driver"
/**
* DOC: HDMI Output
*
* HDMI Output is composed of :
*
* - A Synopsys DesignWare HDMI Controller IP
* - A TOP control block controlling the Clocks and PHY
* - A custom HDMI PHY in order convert video to TMDS signal
*
* .. code::
*
* ___________________________________
* | HDMI TOP |<= HPD
* |___________________________________|
* | | |
* | Synopsys HDMI | HDMI PHY |=> TMDS
* | Controller |________________|
* |___________________________________|<=> DDC
*
*
* The HDMI TOP block only supports HPD sensing.
* The Synopsys HDMI Controller interrupt is routed
* through the TOP Block interrupt.
* Communication to the TOP Block and the Synopsys
* HDMI Controller is done a pair of addr+read/write
* registers.
* The HDMI PHY is configured by registers in the
* HHI register block.
*
* Pixel data arrives in 4:4:4 format from the VENC
* block and the VPU HDMI mux selects either the ENCI
* encoder for the 576i or 480i formats or the ENCP
* encoder for all the other formats including
* interlaced HD formats.
* The VENC uses a DVI encoder on top of the ENCI
* or ENCP encoders to generate DVI timings for the
* HDMI controller.
*
* GXBB, GXL and GXM embeds the Synopsys DesignWare
* HDMI TX IP version 2.01a with HDCP and I2C & S/PDIF
* audio source interfaces.
*
* We handle the following features :
*
* - HPD Rise & Fall interrupt
* - HDMI Controller Interrupt
* - HDMI PHY Init for 480i to 1080p60
* - VENC & HDMI Clock setup for 480i to 1080p60
* - VENC Mode setup for 480i to 1080p60
*
* What is missing :
*
* - PHY, Clock and Mode setup for 2k && 4k modes
* - SDDC Scrambling mode for HDMI 2.0a
* - HDCP Setup
* - CEC Management
*/
/* TOP Block Communication Channel */
#define HDMITX_TOP_ADDR_REG 0x0
#define HDMITX_TOP_DATA_REG 0x4
#define HDMITX_TOP_CTRL_REG 0x8
#define HDMITX_TOP_G12A_OFFSET 0x8000
/* Controller Communication Channel */
#define HDMITX_DWC_ADDR_REG 0x10
#define HDMITX_DWC_DATA_REG 0x14
#define HDMITX_DWC_CTRL_REG 0x18
/* HHI Registers */
#define HHI_MEM_PD_REG0 0x100 /* 0x40 */
#define HHI_HDMI_CLK_CNTL 0x1cc /* 0x73 */
#define HHI_HDMI_PHY_CNTL0 0x3a0 /* 0xe8 */
#define HHI_HDMI_PHY_CNTL1 0x3a4 /* 0xe9 */
#define HHI_HDMI_PHY_CNTL2 0x3a8 /* 0xea */
#define HHI_HDMI_PHY_CNTL3 0x3ac /* 0xeb */
#define HHI_HDMI_PHY_CNTL4 0x3b0 /* 0xec */
#define HHI_HDMI_PHY_CNTL5 0x3b4 /* 0xed */
static DEFINE_SPINLOCK(reg_lock);
enum meson_venc_source {
MESON_VENC_SOURCE_NONE = 0,
MESON_VENC_SOURCE_ENCI = 1,
MESON_VENC_SOURCE_ENCP = 2,
};
struct meson_dw_hdmi;
struct meson_dw_hdmi_data {
unsigned int (*top_read)(struct meson_dw_hdmi *dw_hdmi,
unsigned int addr);
void (*top_write)(struct meson_dw_hdmi *dw_hdmi,
unsigned int addr, unsigned int data);
unsigned int (*dwc_read)(struct meson_dw_hdmi *dw_hdmi,
unsigned int addr);
void (*dwc_write)(struct meson_dw_hdmi *dw_hdmi,
unsigned int addr, unsigned int data);
};
struct meson_dw_hdmi {
struct dw_hdmi_plat_data dw_plat_data;
struct meson_drm *priv;
struct device *dev;
void __iomem *hdmitx;
const struct meson_dw_hdmi_data *data;
struct reset_control *hdmitx_apb;
struct reset_control *hdmitx_ctrl;
struct reset_control *hdmitx_phy;
u32 irq_stat;
struct dw_hdmi *hdmi;
struct drm_bridge *bridge;
};
static inline int dw_hdmi_is_compatible(struct meson_dw_hdmi *dw_hdmi,
const char *compat)
{
return of_device_is_compatible(dw_hdmi->dev->of_node, compat);
}
/* PHY (via TOP bridge) and Controller dedicated register interface */
static unsigned int dw_hdmi_top_read(struct meson_dw_hdmi *dw_hdmi,
unsigned int addr)
{
unsigned long flags;
unsigned int data;
spin_lock_irqsave(®_lock, flags);
/* ADDR must be written twice */
writel(addr & 0xffff, dw_hdmi->hdmitx + HDMITX_TOP_ADDR_REG);
writel(addr & 0xffff, dw_hdmi->hdmitx + HDMITX_TOP_ADDR_REG);
/* Read needs a second DATA read */
data = readl(dw_hdmi->hdmitx + HDMITX_TOP_DATA_REG);
data = readl(dw_hdmi->hdmitx + HDMITX_TOP_DATA_REG);
spin_unlock_irqrestore(®_lock, flags);
return data;
}
static unsigned int dw_hdmi_g12a_top_read(struct meson_dw_hdmi *dw_hdmi,
unsigned int addr)
{
return readl(dw_hdmi->hdmitx + HDMITX_TOP_G12A_OFFSET + (addr << 2));
}
static inline void dw_hdmi_top_write(struct meson_dw_hdmi *dw_hdmi,
unsigned int addr, unsigned int data)
{
unsigned long flags;
spin_lock_irqsave(®_lock, flags);
/* ADDR must be written twice */
writel(addr & 0xffff, dw_hdmi->hdmitx + HDMITX_TOP_ADDR_REG);
writel(addr & 0xffff, dw_hdmi->hdmitx + HDMITX_TOP_ADDR_REG);
/* Write needs single DATA write */
writel(data, dw_hdmi->hdmitx + HDMITX_TOP_DATA_REG);
spin_unlock_irqrestore(®_lock, flags);
}
static inline void dw_hdmi_g12a_top_write(struct meson_dw_hdmi *dw_hdmi,
unsigned int addr, unsigned int data)
{
writel(data, dw_hdmi->hdmitx + HDMITX_TOP_G12A_OFFSET + (addr << 2));
}
/* Helper to change specific bits in PHY registers */
static inline void dw_hdmi_top_write_bits(struct meson_dw_hdmi *dw_hdmi,
unsigned int addr,
unsigned int mask,
unsigned int val)
{
unsigned int data = dw_hdmi->data->top_read(dw_hdmi, addr);
data &= ~mask;
data |= val;
dw_hdmi->data->top_write(dw_hdmi, addr, data);
}
static unsigned int dw_hdmi_dwc_read(struct meson_dw_hdmi *dw_hdmi,
unsigned int addr)
{
unsigned long flags;
unsigned int data;
spin_lock_irqsave(®_lock, flags);
/* ADDR must be written twice */
writel(addr & 0xffff, dw_hdmi->hdmitx + HDMITX_DWC_ADDR_REG);
writel(addr & 0xffff, dw_hdmi->hdmitx + HDMITX_DWC_ADDR_REG);
/* Read needs a second DATA read */
data = readl(dw_hdmi->hdmitx + HDMITX_DWC_DATA_REG);
data = readl(dw_hdmi->hdmitx + HDMITX_DWC_DATA_REG);
spin_unlock_irqrestore(®_lock, flags);
return data;
}
static unsigned int dw_hdmi_g12a_dwc_read(struct meson_dw_hdmi *dw_hdmi,
unsigned int addr)
{
return readb(dw_hdmi->hdmitx + addr);
}
static inline void dw_hdmi_dwc_write(struct meson_dw_hdmi *dw_hdmi,
unsigned int addr, unsigned int data)
{
unsigned long flags;
spin_lock_irqsave(®_lock, flags);
/* ADDR must be written twice */
writel(addr & 0xffff, dw_hdmi->hdmitx + HDMITX_DWC_ADDR_REG);
writel(addr & 0xffff, dw_hdmi->hdmitx + HDMITX_DWC_ADDR_REG);
/* Write needs single DATA write */
writel(data, dw_hdmi->hdmitx + HDMITX_DWC_DATA_REG);
spin_unlock_irqrestore(®_lock, flags);
}
static inline void dw_hdmi_g12a_dwc_write(struct meson_dw_hdmi *dw_hdmi,
unsigned int addr, unsigned int data)
{
writeb(data, dw_hdmi->hdmitx + addr);
}
/* Helper to change specific bits in controller registers */
static inline void dw_hdmi_dwc_write_bits(struct meson_dw_hdmi *dw_hdmi,
unsigned int addr,
unsigned int mask,
unsigned int val)
{
unsigned int data = dw_hdmi->data->dwc_read(dw_hdmi, addr);
data &= ~mask;
data |= val;
dw_hdmi->data->dwc_write(dw_hdmi, addr, data);
}
/* Bridge */
/* Setup PHY bandwidth modes */
static void meson_hdmi_phy_setup_mode(struct meson_dw_hdmi *dw_hdmi,
const struct drm_display_mode *mode,
bool mode_is_420)
{
struct meson_drm *priv = dw_hdmi->priv;
unsigned int pixel_clock = mode->clock;
/* For 420, pixel clock is half unlike venc clock */
if (mode_is_420) pixel_clock /= 2;
if (dw_hdmi_is_compatible(dw_hdmi, "amlogic,meson-gxl-dw-hdmi") ||
dw_hdmi_is_compatible(dw_hdmi, "amlogic,meson-gxm-dw-hdmi")) {
if (pixel_clock >= 371250) {
/* 5.94Gbps, 3.7125Gbps */
regmap_write(priv->hhi, HHI_HDMI_PHY_CNTL0, 0x333d3282);
regmap_write(priv->hhi, HHI_HDMI_PHY_CNTL3, 0x2136315b);
} else if (pixel_clock >= 297000) {
/* 2.97Gbps */
regmap_write(priv->hhi, HHI_HDMI_PHY_CNTL0, 0x33303382);
regmap_write(priv->hhi, HHI_HDMI_PHY_CNTL3, 0x2036315b);
} else if (pixel_clock >= 148500) {
/* 1.485Gbps */
regmap_write(priv->hhi, HHI_HDMI_PHY_CNTL0, 0x33303362);
regmap_write(priv->hhi, HHI_HDMI_PHY_CNTL3, 0x2016315b);
} else {
/* 742.5Mbps, and below */
regmap_write(priv->hhi, HHI_HDMI_PHY_CNTL0, 0x33604142);
regmap_write(priv->hhi, HHI_HDMI_PHY_CNTL3, 0x0016315b);
}
} else if (dw_hdmi_is_compatible(dw_hdmi,
"amlogic,meson-gxbb-dw-hdmi")) {
if (pixel_clock >= 371250) {
/* 5.94Gbps, 3.7125Gbps */
regmap_write(priv->hhi, HHI_HDMI_PHY_CNTL0, 0x33353245);
regmap_write(priv->hhi, HHI_HDMI_PHY_CNTL3, 0x2100115b);
} else if (pixel_clock >= 297000) {
/* 2.97Gbps */
regmap_write(priv->hhi, HHI_HDMI_PHY_CNTL0, 0x33634283);
regmap_write(priv->hhi, HHI_HDMI_PHY_CNTL3, 0xb000115b);
} else {
/* 1.485Gbps, and below */
regmap_write(priv->hhi, HHI_HDMI_PHY_CNTL0, 0x33632122);
regmap_write(priv->hhi, HHI_HDMI_PHY_CNTL3, 0x2000115b);
}
} else if (dw_hdmi_is_compatible(dw_hdmi,
"amlogic,meson-g12a-dw-hdmi")) {
if (pixel_clock >= 371250) {
/* 5.94Gbps, 3.7125Gbps */
regmap_write(priv->hhi, HHI_HDMI_PHY_CNTL0, 0x37eb65c4);
regmap_write(priv->hhi, HHI_HDMI_PHY_CNTL3, 0x2ab0ff3b);
regmap_write(priv->hhi, HHI_HDMI_PHY_CNTL5, 0x0000080b);
} else if (pixel_clock >= 297000) {
/* 2.97Gbps */
regmap_write(priv->hhi, HHI_HDMI_PHY_CNTL0, 0x33eb6262);
regmap_write(priv->hhi, HHI_HDMI_PHY_CNTL3, 0x2ab0ff3b);
regmap_write(priv->hhi, HHI_HDMI_PHY_CNTL5, 0x00000003);
} else {
/* 1.485Gbps, and below */
regmap_write(priv->hhi, HHI_HDMI_PHY_CNTL0, 0x33eb4242);
regmap_write(priv->hhi, HHI_HDMI_PHY_CNTL3, 0x2ab0ff3b);
regmap_write(priv->hhi, HHI_HDMI_PHY_CNTL5, 0x00000003);
}
}
}
static inline void meson_dw_hdmi_phy_reset(struct meson_dw_hdmi *dw_hdmi)
{
struct meson_drm *priv = dw_hdmi->priv;
/* Enable and software reset */
regmap_update_bits(priv->hhi, HHI_HDMI_PHY_CNTL1, 0xf, 0xf);
mdelay(2);
/* Enable and unreset */
regmap_update_bits(priv->hhi, HHI_HDMI_PHY_CNTL1, 0xf, 0xe);
mdelay(2);
}
static int dw_hdmi_phy_init(struct dw_hdmi *hdmi, void *data,
const struct drm_display_info *display,
const struct drm_display_mode *mode)
{
struct meson_dw_hdmi *dw_hdmi = (struct meson_dw_hdmi *)data;
bool is_hdmi2_sink = display->hdmi.scdc.supported;
struct meson_drm *priv = dw_hdmi->priv;
unsigned int wr_clk =
readl_relaxed(priv->io_base + _REG(VPU_HDMI_SETTING));
bool mode_is_420 = false;
DRM_DEBUG_DRIVER("\"%s\" div%d\n", mode->name,
mode->clock > 340000 ? 40 : 10);
if (drm_mode_is_420_only(display, mode) ||
(!is_hdmi2_sink && drm_mode_is_420_also(display, mode)) ||
dw_hdmi_bus_fmt_is_420(hdmi))
mode_is_420 = true;
/* Enable clocks */
regmap_update_bits(priv->hhi, HHI_HDMI_CLK_CNTL, 0xffff, 0x100);
/* Bring HDMITX MEM output of power down */
regmap_update_bits(priv->hhi, HHI_MEM_PD_REG0, 0xff << 8, 0);
/* Bring out of reset */
dw_hdmi->data->top_write(dw_hdmi, HDMITX_TOP_SW_RESET, 0);
/* Enable internal pixclk, tmds_clk, spdif_clk, i2s_clk, cecclk */
dw_hdmi_top_write_bits(dw_hdmi, HDMITX_TOP_CLK_CNTL,
0x3, 0x3);
/* Enable cec_clk and hdcp22_tmdsclk_en */
dw_hdmi_top_write_bits(dw_hdmi, HDMITX_TOP_CLK_CNTL,
0x3 << 4, 0x3 << 4);
/* Enable normal output to PHY */
dw_hdmi->data->top_write(dw_hdmi, HDMITX_TOP_BIST_CNTL, BIT(12));
/* TMDS pattern setup */
if (mode->clock > 340000 && !mode_is_420) {
dw_hdmi->data->top_write(dw_hdmi, HDMITX_TOP_TMDS_CLK_PTTN_01,
0);
dw_hdmi->data->top_write(dw_hdmi, HDMITX_TOP_TMDS_CLK_PTTN_23,
0x03ff03ff);
} else {
dw_hdmi->data->top_write(dw_hdmi, HDMITX_TOP_TMDS_CLK_PTTN_01,
0x001f001f);
dw_hdmi->data->top_write(dw_hdmi, HDMITX_TOP_TMDS_CLK_PTTN_23,
0x001f001f);
}
/* Load TMDS pattern */
dw_hdmi->data->top_write(dw_hdmi, HDMITX_TOP_TMDS_CLK_PTTN_CNTL, 0x1);
msleep(20);
dw_hdmi->data->top_write(dw_hdmi, HDMITX_TOP_TMDS_CLK_PTTN_CNTL, 0x2);
/* Setup PHY parameters */
meson_hdmi_phy_setup_mode(dw_hdmi, mode, mode_is_420);
/* Setup PHY */
regmap_update_bits(priv->hhi, HHI_HDMI_PHY_CNTL1,
0xffff << 16, 0x0390 << 16);
/* BIT_INVERT */
if (dw_hdmi_is_compatible(dw_hdmi, "amlogic,meson-gxl-dw-hdmi") ||
dw_hdmi_is_compatible(dw_hdmi, "amlogic,meson-gxm-dw-hdmi") ||
dw_hdmi_is_compatible(dw_hdmi, "amlogic,meson-g12a-dw-hdmi"))
regmap_update_bits(priv->hhi, HHI_HDMI_PHY_CNTL1,
BIT(17), 0);
else
regmap_update_bits(priv->hhi, HHI_HDMI_PHY_CNTL1,
BIT(17), BIT(17));
/* Disable clock, fifo, fifo_wr */
regmap_update_bits(priv->hhi, HHI_HDMI_PHY_CNTL1, 0xf, 0);
dw_hdmi_set_high_tmds_clock_ratio(hdmi, display);
msleep(100);
/* Reset PHY 3 times in a row */
meson_dw_hdmi_phy_reset(dw_hdmi);
meson_dw_hdmi_phy_reset(dw_hdmi);
meson_dw_hdmi_phy_reset(dw_hdmi);
/* Temporary Disable VENC video stream */
if (priv->venc.hdmi_use_enci)
writel_relaxed(0, priv->io_base + _REG(ENCI_VIDEO_EN));
else
writel_relaxed(0, priv->io_base + _REG(ENCP_VIDEO_EN));
/* Temporary Disable HDMI video stream to HDMI-TX */
writel_bits_relaxed(0x3, 0,
priv->io_base + _REG(VPU_HDMI_SETTING));
writel_bits_relaxed(0xf << 8, 0,
priv->io_base + _REG(VPU_HDMI_SETTING));
/* Re-Enable VENC video stream */
if (priv->venc.hdmi_use_enci)
writel_relaxed(1, priv->io_base + _REG(ENCI_VIDEO_EN));
else
writel_relaxed(1, priv->io_base + _REG(ENCP_VIDEO_EN));
/* Push back HDMI clock settings */
writel_bits_relaxed(0xf << 8, wr_clk & (0xf << 8),
priv->io_base + _REG(VPU_HDMI_SETTING));
/* Enable and Select HDMI video source for HDMI-TX */
if (priv->venc.hdmi_use_enci)
writel_bits_relaxed(0x3, MESON_VENC_SOURCE_ENCI,
priv->io_base + _REG(VPU_HDMI_SETTING));
else
writel_bits_relaxed(0x3, MESON_VENC_SOURCE_ENCP,
priv->io_base + _REG(VPU_HDMI_SETTING));
return 0;
}
static void dw_hdmi_phy_disable(struct dw_hdmi *hdmi,
void *data)
{
struct meson_dw_hdmi *dw_hdmi = (struct meson_dw_hdmi *)data;
struct meson_drm *priv = dw_hdmi->priv;
DRM_DEBUG_DRIVER("\n");
regmap_write(priv->hhi, HHI_HDMI_PHY_CNTL0, 0);
}
static enum drm_connector_status dw_hdmi_read_hpd(struct dw_hdmi *hdmi,
void *data)
{
struct meson_dw_hdmi *dw_hdmi = (struct meson_dw_hdmi *)data;
return !!dw_hdmi->data->top_read(dw_hdmi, HDMITX_TOP_STAT0) ?
connector_status_connected : connector_status_disconnected;
}
static void dw_hdmi_setup_hpd(struct dw_hdmi *hdmi,
void *data)
{
struct meson_dw_hdmi *dw_hdmi = (struct meson_dw_hdmi *)data;
/* Setup HPD Filter */
dw_hdmi->data->top_write(dw_hdmi, HDMITX_TOP_HPD_FILTER,
(0xa << 12) | 0xa0);
/* Clear interrupts */
dw_hdmi->data->top_write(dw_hdmi, HDMITX_TOP_INTR_STAT_CLR,
HDMITX_TOP_INTR_HPD_RISE | HDMITX_TOP_INTR_HPD_FALL);
/* Unmask interrupts */
dw_hdmi_top_write_bits(dw_hdmi, HDMITX_TOP_INTR_MASKN,
HDMITX_TOP_INTR_HPD_RISE | HDMITX_TOP_INTR_HPD_FALL,
HDMITX_TOP_INTR_HPD_RISE | HDMITX_TOP_INTR_HPD_FALL);
}
static const struct dw_hdmi_phy_ops meson_dw_hdmi_phy_ops = {
.init = dw_hdmi_phy_init,
.disable = dw_hdmi_phy_disable,
.read_hpd = dw_hdmi_read_hpd,
.setup_hpd = dw_hdmi_setup_hpd,
};
static irqreturn_t dw_hdmi_top_irq(int irq, void *dev_id)
{
struct meson_dw_hdmi *dw_hdmi = dev_id;
u32 stat;
stat = dw_hdmi->data->top_read(dw_hdmi, HDMITX_TOP_INTR_STAT);
dw_hdmi->data->top_write(dw_hdmi, HDMITX_TOP_INTR_STAT_CLR, stat);
/* HPD Events, handle in the threaded interrupt handler */
if (stat & (HDMITX_TOP_INTR_HPD_RISE | HDMITX_TOP_INTR_HPD_FALL)) {
dw_hdmi->irq_stat = stat;
return IRQ_WAKE_THREAD;
}
/* HDMI Controller Interrupt */
if (stat & 1)
return IRQ_NONE;
/* TOFIX Handle HDCP Interrupts */
return IRQ_HANDLED;
}
/* Threaded interrupt handler to manage HPD events */
static irqreturn_t dw_hdmi_top_thread_irq(int irq, void *dev_id)
{
struct meson_dw_hdmi *dw_hdmi = dev_id;
u32 stat = dw_hdmi->irq_stat;
/* HPD Events */
if (stat & (HDMITX_TOP_INTR_HPD_RISE | HDMITX_TOP_INTR_HPD_FALL)) {
bool hpd_connected = false;
if (stat & HDMITX_TOP_INTR_HPD_RISE)
hpd_connected = true;
dw_hdmi_setup_rx_sense(dw_hdmi->hdmi, hpd_connected,
hpd_connected);
drm_helper_hpd_irq_event(dw_hdmi->bridge->dev);
drm_bridge_hpd_notify(dw_hdmi->bridge,
hpd_connected ? connector_status_connected
: connector_status_disconnected);
}
return IRQ_HANDLED;
}
/* DW HDMI Regmap */
static int meson_dw_hdmi_reg_read(void *context, unsigned int reg,
unsigned int *result)
{
struct meson_dw_hdmi *dw_hdmi = context;
*result = dw_hdmi->data->dwc_read(dw_hdmi, reg);
return 0;
}
static int meson_dw_hdmi_reg_write(void *context, unsigned int reg,
unsigned int val)
{
struct meson_dw_hdmi *dw_hdmi = context;
dw_hdmi->data->dwc_write(dw_hdmi, reg, val);
return 0;
}
static const struct regmap_config meson_dw_hdmi_regmap_config = {
.reg_bits = 32,
.val_bits = 8,
.reg_read = meson_dw_hdmi_reg_read,
.reg_write = meson_dw_hdmi_reg_write,
.max_register = 0x10000,
.fast_io = true,
};
static const struct meson_dw_hdmi_data meson_dw_hdmi_gx_data = {
.top_read = dw_hdmi_top_read,
.top_write = dw_hdmi_top_write,
.dwc_read = dw_hdmi_dwc_read,
.dwc_write = dw_hdmi_dwc_write,
};
static const struct meson_dw_hdmi_data meson_dw_hdmi_g12a_data = {
.top_read = dw_hdmi_g12a_top_read,
.top_write = dw_hdmi_g12a_top_write,
.dwc_read = dw_hdmi_g12a_dwc_read,
.dwc_write = dw_hdmi_g12a_dwc_write,
};
static void meson_dw_hdmi_init(struct meson_dw_hdmi *meson_dw_hdmi)
{
struct meson_drm *priv = meson_dw_hdmi->priv;
/* Enable clocks */
regmap_update_bits(priv->hhi, HHI_HDMI_CLK_CNTL, 0xffff, 0x100);
/* Bring HDMITX MEM output of power down */
regmap_update_bits(priv->hhi, HHI_MEM_PD_REG0, 0xff << 8, 0);
/* Reset HDMITX APB & TX & PHY */
reset_control_reset(meson_dw_hdmi->hdmitx_apb);
reset_control_reset(meson_dw_hdmi->hdmitx_ctrl);
reset_control_reset(meson_dw_hdmi->hdmitx_phy);
/* Enable APB3 fail on error */
if (!meson_vpu_is_compatible(priv, VPU_COMPATIBLE_G12A)) {
writel_bits_relaxed(BIT(15), BIT(15),
meson_dw_hdmi->hdmitx + HDMITX_TOP_CTRL_REG);
writel_bits_relaxed(BIT(15), BIT(15),
meson_dw_hdmi->hdmitx + HDMITX_DWC_CTRL_REG);
}
/* Bring out of reset */
meson_dw_hdmi->data->top_write(meson_dw_hdmi,
HDMITX_TOP_SW_RESET, 0);
msleep(20);
meson_dw_hdmi->data->top_write(meson_dw_hdmi,
HDMITX_TOP_CLK_CNTL, 0xff);
/* Enable HDMI-TX Interrupt */
meson_dw_hdmi->data->top_write(meson_dw_hdmi, HDMITX_TOP_INTR_STAT_CLR,
HDMITX_TOP_INTR_CORE);
meson_dw_hdmi->data->top_write(meson_dw_hdmi, HDMITX_TOP_INTR_MASKN,
HDMITX_TOP_INTR_CORE);
}
static void meson_disable_clk(void *data)
{
clk_disable_unprepare(data);
}
static int meson_enable_clk(struct device *dev, char *name)
{
struct clk *clk;
int ret;
clk = devm_clk_get(dev, name);
if (IS_ERR(clk)) {
dev_err(dev, "Unable to get %s pclk\n", name);
return PTR_ERR(clk);
}
ret = clk_prepare_enable(clk);
if (!ret)
ret = devm_add_action_or_reset(dev, meson_disable_clk, clk);
return ret;
}
static int meson_dw_hdmi_bind(struct device *dev, struct device *master,
void *data)
{
struct platform_device *pdev = to_platform_device(dev);
const struct meson_dw_hdmi_data *match;
struct meson_dw_hdmi *meson_dw_hdmi;
struct drm_device *drm = data;
struct meson_drm *priv = drm->dev_private;
struct dw_hdmi_plat_data *dw_plat_data;
int irq;
int ret;
DRM_DEBUG_DRIVER("\n");
match = of_device_get_match_data(&pdev->dev);
if (!match) {
dev_err(&pdev->dev, "failed to get match data\n");
return -ENODEV;
}
meson_dw_hdmi = devm_kzalloc(dev, sizeof(*meson_dw_hdmi),
GFP_KERNEL);
if (!meson_dw_hdmi)
return -ENOMEM;
meson_dw_hdmi->priv = priv;
meson_dw_hdmi->dev = dev;
meson_dw_hdmi->data = match;
dw_plat_data = &meson_dw_hdmi->dw_plat_data;
ret = devm_regulator_get_enable_optional(dev, "hdmi");
if (ret < 0 && ret != -ENODEV)
return ret;
meson_dw_hdmi->hdmitx_apb = devm_reset_control_get_exclusive(dev,
"hdmitx_apb");
if (IS_ERR(meson_dw_hdmi->hdmitx_apb)) {
dev_err(dev, "Failed to get hdmitx_apb reset\n");
return PTR_ERR(meson_dw_hdmi->hdmitx_apb);
}
meson_dw_hdmi->hdmitx_ctrl = devm_reset_control_get_exclusive(dev,
"hdmitx");
if (IS_ERR(meson_dw_hdmi->hdmitx_ctrl)) {
dev_err(dev, "Failed to get hdmitx reset\n");
return PTR_ERR(meson_dw_hdmi->hdmitx_ctrl);
}
meson_dw_hdmi->hdmitx_phy = devm_reset_control_get_exclusive(dev,
"hdmitx_phy");
if (IS_ERR(meson_dw_hdmi->hdmitx_phy)) {
dev_err(dev, "Failed to get hdmitx_phy reset\n");
return PTR_ERR(meson_dw_hdmi->hdmitx_phy);
}
meson_dw_hdmi->hdmitx = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(meson_dw_hdmi->hdmitx))
return PTR_ERR(meson_dw_hdmi->hdmitx);
ret = meson_enable_clk(dev, "isfr");
if (ret)
return ret;
ret = meson_enable_clk(dev, "iahb");
if (ret)
return ret;
ret = meson_enable_clk(dev, "venci");
if (ret)
return ret;
dw_plat_data->regm = devm_regmap_init(dev, NULL, meson_dw_hdmi,
&meson_dw_hdmi_regmap_config);
if (IS_ERR(dw_plat_data->regm))
return PTR_ERR(dw_plat_data->regm);
irq = platform_get_irq(pdev, 0);
if (irq < 0)
return irq;
ret = devm_request_threaded_irq(dev, irq, dw_hdmi_top_irq,
dw_hdmi_top_thread_irq, IRQF_SHARED,
"dw_hdmi_top_irq", meson_dw_hdmi);
if (ret) {
dev_err(dev, "Failed to request hdmi top irq\n");
return ret;
}
meson_dw_hdmi_init(meson_dw_hdmi);
/* Bridge / Connector */
dw_plat_data->priv_data = meson_dw_hdmi;
dw_plat_data->phy_ops = &meson_dw_hdmi_phy_ops;
dw_plat_data->phy_name = "meson_dw_hdmi_phy";
dw_plat_data->phy_data = meson_dw_hdmi;
dw_plat_data->input_bus_encoding = V4L2_YCBCR_ENC_709;
dw_plat_data->ycbcr_420_allowed = true;
dw_plat_data->disable_cec = true;
dw_plat_data->output_port = 1;
if (dw_hdmi_is_compatible(meson_dw_hdmi, "amlogic,meson-gxl-dw-hdmi") ||
dw_hdmi_is_compatible(meson_dw_hdmi, "amlogic,meson-gxm-dw-hdmi") ||
dw_hdmi_is_compatible(meson_dw_hdmi, "amlogic,meson-g12a-dw-hdmi"))
dw_plat_data->use_drm_infoframe = true;
platform_set_drvdata(pdev, meson_dw_hdmi);
meson_dw_hdmi->hdmi = dw_hdmi_probe(pdev, &meson_dw_hdmi->dw_plat_data);
if (IS_ERR(meson_dw_hdmi->hdmi))
return PTR_ERR(meson_dw_hdmi->hdmi);
meson_dw_hdmi->bridge = of_drm_find_bridge(pdev->dev.of_node);
DRM_DEBUG_DRIVER("HDMI controller initialized\n");
return 0;
}
static void meson_dw_hdmi_unbind(struct device *dev, struct device *master,
void *data)
{
struct meson_dw_hdmi *meson_dw_hdmi = dev_get_drvdata(dev);
dw_hdmi_unbind(meson_dw_hdmi->hdmi);
}
static const struct component_ops meson_dw_hdmi_ops = {
.bind = meson_dw_hdmi_bind,
.unbind = meson_dw_hdmi_unbind,
};
static int __maybe_unused meson_dw_hdmi_pm_suspend(struct device *dev)
{
struct meson_dw_hdmi *meson_dw_hdmi = dev_get_drvdata(dev);
if (!meson_dw_hdmi)
return 0;
/* Reset TOP */
meson_dw_hdmi->data->top_write(meson_dw_hdmi,
HDMITX_TOP_SW_RESET, 0);
return 0;
}
static int __maybe_unused meson_dw_hdmi_pm_resume(struct device *dev)
{
struct meson_dw_hdmi *meson_dw_hdmi = dev_get_drvdata(dev);
if (!meson_dw_hdmi)
return 0;
meson_dw_hdmi_init(meson_dw_hdmi);
dw_hdmi_resume(meson_dw_hdmi->hdmi);
return 0;
}
static int meson_dw_hdmi_probe(struct platform_device *pdev)
{
return component_add(&pdev->dev, &meson_dw_hdmi_ops);
}
static void meson_dw_hdmi_remove(struct platform_device *pdev)
{
component_del(&pdev->dev, &meson_dw_hdmi_ops);
}
static const struct dev_pm_ops meson_dw_hdmi_pm_ops = {
SET_SYSTEM_SLEEP_PM_OPS(meson_dw_hdmi_pm_suspend,
meson_dw_hdmi_pm_resume)
};
static const struct of_device_id meson_dw_hdmi_of_table[] = {
{ .compatible = "amlogic,meson-gxbb-dw-hdmi",
.data = &meson_dw_hdmi_gx_data },
{ .compatible = "amlogic,meson-gxl-dw-hdmi",
.data = &meson_dw_hdmi_gx_data },
{ .compatible = "amlogic,meson-gxm-dw-hdmi",
.data = &meson_dw_hdmi_gx_data },
{ .compatible = "amlogic,meson-g12a-dw-hdmi",
.data = &meson_dw_hdmi_g12a_data },
{ }
};
MODULE_DEVICE_TABLE(of, meson_dw_hdmi_of_table);
static struct platform_driver meson_dw_hdmi_platform_driver = {
.probe = meson_dw_hdmi_probe,
.remove_new = meson_dw_hdmi_remove,
.driver = {
.name = DRIVER_NAME,
.of_match_table = meson_dw_hdmi_of_table,
.pm = &meson_dw_hdmi_pm_ops,
},
};
module_platform_driver(meson_dw_hdmi_platform_driver);
MODULE_AUTHOR("Neil Armstrong <[email protected]>");
MODULE_DESCRIPTION(DRIVER_DESC);
MODULE_LICENSE("GPL");
| linux-master | drivers/gpu/drm/meson/meson_dw_hdmi.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Copyright (C) 2016 BayLibre, SAS
* Author: Neil Armstrong <[email protected]>
* Copyright (C) 2015 Amlogic, Inc. All rights reserved.
*/
#include <linux/bitfield.h>
#include <linux/export.h>
#include <linux/iopoll.h>
#include <drm/drm_modes.h>
#include "meson_drv.h"
#include "meson_registers.h"
#include "meson_venc.h"
#include "meson_vpp.h"
/**
* DOC: Video Encoder
*
* VENC Handle the pixels encoding to the output formats.
* We handle the following encodings :
*
* - CVBS Encoding via the ENCI encoder and VDAC digital to analog converter
* - TMDS/HDMI Encoding via ENCI_DIV and ENCP
* - Setup of more clock rates for HDMI modes
*
* What is missing :
*
* - LCD Panel encoding via ENCL
* - TV Panel encoding via ENCT
*
* VENC paths :
*
* .. code::
*
* _____ _____ ____________________
* vd1---| |-| | | VENC /---------|----VDAC
* vd2---| VIU |-| VPP |-|-----ENCI/-ENCI_DVI-|-|
* osd1--| |-| | | \ | X--HDMI-TX
* osd2--|_____|-|_____| | |\-ENCP--ENCP_DVI-|-|
* | | |
* | \--ENCL-----------|----LVDS
* |____________________|
*
* The ENCI is designed for PAl or NTSC encoding and can go through the VDAC
* directly for CVBS encoding or through the ENCI_DVI encoder for HDMI.
* The ENCP is designed for Progressive encoding but can also generate
* 1080i interlaced pixels, and was initially designed to encode pixels for
* VDAC to output RGB ou YUV analog outputs.
* It's output is only used through the ENCP_DVI encoder for HDMI.
* The ENCL LVDS encoder is not implemented.
*
* The ENCI and ENCP encoders needs specially defined parameters for each
* supported mode and thus cannot be determined from standard video timings.
*
* The ENCI end ENCP DVI encoders are more generic and can generate any timings
* from the pixel data generated by ENCI or ENCP, so can use the standard video
* timings are source for HW parameters.
*/
/* HHI Registers */
#define HHI_GCLK_MPEG2 0x148 /* 0x52 offset in data sheet */
#define HHI_VDAC_CNTL0 0x2F4 /* 0xbd offset in data sheet */
#define HHI_VDAC_CNTL0_G12A 0x2EC /* 0xbb offset in data sheet */
#define HHI_VDAC_CNTL1 0x2F8 /* 0xbe offset in data sheet */
#define HHI_VDAC_CNTL1_G12A 0x2F0 /* 0xbc offset in data sheet */
#define HHI_HDMI_PHY_CNTL0 0x3a0 /* 0xe8 offset in data sheet */
struct meson_cvbs_enci_mode meson_cvbs_enci_pal = {
.mode_tag = MESON_VENC_MODE_CVBS_PAL,
.hso_begin = 3,
.hso_end = 129,
.vso_even = 3,
.vso_odd = 260,
.macv_max_amp = 7,
.video_prog_mode = 0xff,
.video_mode = 0x13,
.sch_adjust = 0x28,
.yc_delay = 0x343,
.pixel_start = 251,
.pixel_end = 1691,
.top_field_line_start = 22,
.top_field_line_end = 310,
.bottom_field_line_start = 23,
.bottom_field_line_end = 311,
.video_saturation = 9,
.video_contrast = 0,
.video_brightness = 0,
.video_hue = 0,
.analog_sync_adj = 0x8080,
};
struct meson_cvbs_enci_mode meson_cvbs_enci_ntsc = {
.mode_tag = MESON_VENC_MODE_CVBS_NTSC,
.hso_begin = 5,
.hso_end = 129,
.vso_even = 3,
.vso_odd = 260,
.macv_max_amp = 0xb,
.video_prog_mode = 0xf0,
.video_mode = 0x8,
.sch_adjust = 0x20,
.yc_delay = 0x333,
.pixel_start = 227,
.pixel_end = 1667,
.top_field_line_start = 18,
.top_field_line_end = 258,
.bottom_field_line_start = 19,
.bottom_field_line_end = 259,
.video_saturation = 18,
.video_contrast = 3,
.video_brightness = 0,
.video_hue = 0,
.analog_sync_adj = 0x9c00,
};
union meson_hdmi_venc_mode {
struct {
unsigned int mode_tag;
unsigned int hso_begin;
unsigned int hso_end;
unsigned int vso_even;
unsigned int vso_odd;
unsigned int macv_max_amp;
unsigned int video_prog_mode;
unsigned int video_mode;
unsigned int sch_adjust;
unsigned int yc_delay;
unsigned int pixel_start;
unsigned int pixel_end;
unsigned int top_field_line_start;
unsigned int top_field_line_end;
unsigned int bottom_field_line_start;
unsigned int bottom_field_line_end;
} enci;
struct {
unsigned int dvi_settings;
unsigned int video_mode;
unsigned int video_mode_adv;
unsigned int video_prog_mode;
bool video_prog_mode_present;
unsigned int video_sync_mode;
bool video_sync_mode_present;
unsigned int video_yc_dly;
bool video_yc_dly_present;
unsigned int video_rgb_ctrl;
bool video_rgb_ctrl_present;
unsigned int video_filt_ctrl;
bool video_filt_ctrl_present;
unsigned int video_ofld_voav_ofst;
bool video_ofld_voav_ofst_present;
unsigned int yfp1_htime;
unsigned int yfp2_htime;
unsigned int max_pxcnt;
unsigned int hspuls_begin;
unsigned int hspuls_end;
unsigned int hspuls_switch;
unsigned int vspuls_begin;
unsigned int vspuls_end;
unsigned int vspuls_bline;
unsigned int vspuls_eline;
unsigned int eqpuls_begin;
bool eqpuls_begin_present;
unsigned int eqpuls_end;
bool eqpuls_end_present;
unsigned int eqpuls_bline;
bool eqpuls_bline_present;
unsigned int eqpuls_eline;
bool eqpuls_eline_present;
unsigned int havon_begin;
unsigned int havon_end;
unsigned int vavon_bline;
unsigned int vavon_eline;
unsigned int hso_begin;
unsigned int hso_end;
unsigned int vso_begin;
unsigned int vso_end;
unsigned int vso_bline;
unsigned int vso_eline;
bool vso_eline_present;
unsigned int sy_val;
bool sy_val_present;
unsigned int sy2_val;
bool sy2_val_present;
unsigned int max_lncnt;
} encp;
};
static union meson_hdmi_venc_mode meson_hdmi_enci_mode_480i = {
.enci = {
.hso_begin = 5,
.hso_end = 129,
.vso_even = 3,
.vso_odd = 260,
.macv_max_amp = 0xb,
.video_prog_mode = 0xf0,
.video_mode = 0x8,
.sch_adjust = 0x20,
.yc_delay = 0,
.pixel_start = 227,
.pixel_end = 1667,
.top_field_line_start = 18,
.top_field_line_end = 258,
.bottom_field_line_start = 19,
.bottom_field_line_end = 259,
},
};
static union meson_hdmi_venc_mode meson_hdmi_enci_mode_576i = {
.enci = {
.hso_begin = 3,
.hso_end = 129,
.vso_even = 3,
.vso_odd = 260,
.macv_max_amp = 0x7,
.video_prog_mode = 0xff,
.video_mode = 0x13,
.sch_adjust = 0x28,
.yc_delay = 0x333,
.pixel_start = 251,
.pixel_end = 1691,
.top_field_line_start = 22,
.top_field_line_end = 310,
.bottom_field_line_start = 23,
.bottom_field_line_end = 311,
},
};
static union meson_hdmi_venc_mode meson_hdmi_encp_mode_480p = {
.encp = {
.dvi_settings = 0x21,
.video_mode = 0x4000,
.video_mode_adv = 0x9,
.video_prog_mode = 0,
.video_prog_mode_present = true,
.video_sync_mode = 7,
.video_sync_mode_present = true,
/* video_yc_dly */
/* video_rgb_ctrl */
.video_filt_ctrl = 0x2052,
.video_filt_ctrl_present = true,
/* video_ofld_voav_ofst */
.yfp1_htime = 244,
.yfp2_htime = 1630,
.max_pxcnt = 1715,
.hspuls_begin = 0x22,
.hspuls_end = 0xa0,
.hspuls_switch = 88,
.vspuls_begin = 0,
.vspuls_end = 1589,
.vspuls_bline = 0,
.vspuls_eline = 5,
.havon_begin = 249,
.havon_end = 1689,
.vavon_bline = 42,
.vavon_eline = 521,
/* eqpuls_begin */
/* eqpuls_end */
/* eqpuls_bline */
/* eqpuls_eline */
.hso_begin = 3,
.hso_end = 5,
.vso_begin = 3,
.vso_end = 5,
.vso_bline = 0,
/* vso_eline */
.sy_val = 8,
.sy_val_present = true,
.sy2_val = 0x1d8,
.sy2_val_present = true,
.max_lncnt = 524,
},
};
static union meson_hdmi_venc_mode meson_hdmi_encp_mode_576p = {
.encp = {
.dvi_settings = 0x21,
.video_mode = 0x4000,
.video_mode_adv = 0x9,
.video_prog_mode = 0,
.video_prog_mode_present = true,
.video_sync_mode = 7,
.video_sync_mode_present = true,
/* video_yc_dly */
/* video_rgb_ctrl */
.video_filt_ctrl = 0x52,
.video_filt_ctrl_present = true,
/* video_ofld_voav_ofst */
.yfp1_htime = 235,
.yfp2_htime = 1674,
.max_pxcnt = 1727,
.hspuls_begin = 0,
.hspuls_end = 0x80,
.hspuls_switch = 88,
.vspuls_begin = 0,
.vspuls_end = 1599,
.vspuls_bline = 0,
.vspuls_eline = 4,
.havon_begin = 235,
.havon_end = 1674,
.vavon_bline = 44,
.vavon_eline = 619,
/* eqpuls_begin */
/* eqpuls_end */
/* eqpuls_bline */
/* eqpuls_eline */
.hso_begin = 0x80,
.hso_end = 0,
.vso_begin = 0,
.vso_end = 5,
.vso_bline = 0,
/* vso_eline */
.sy_val = 8,
.sy_val_present = true,
.sy2_val = 0x1d8,
.sy2_val_present = true,
.max_lncnt = 624,
},
};
static union meson_hdmi_venc_mode meson_hdmi_encp_mode_720p60 = {
.encp = {
.dvi_settings = 0x2029,
.video_mode = 0x4040,
.video_mode_adv = 0x19,
/* video_prog_mode */
/* video_sync_mode */
/* video_yc_dly */
/* video_rgb_ctrl */
/* video_filt_ctrl */
/* video_ofld_voav_ofst */
.yfp1_htime = 648,
.yfp2_htime = 3207,
.max_pxcnt = 3299,
.hspuls_begin = 80,
.hspuls_end = 240,
.hspuls_switch = 80,
.vspuls_begin = 688,
.vspuls_end = 3248,
.vspuls_bline = 4,
.vspuls_eline = 8,
.havon_begin = 648,
.havon_end = 3207,
.vavon_bline = 29,
.vavon_eline = 748,
/* eqpuls_begin */
/* eqpuls_end */
/* eqpuls_bline */
/* eqpuls_eline */
.hso_begin = 256,
.hso_end = 168,
.vso_begin = 168,
.vso_end = 256,
.vso_bline = 0,
.vso_eline = 5,
.vso_eline_present = true,
/* sy_val */
/* sy2_val */
.max_lncnt = 749,
},
};
static union meson_hdmi_venc_mode meson_hdmi_encp_mode_720p50 = {
.encp = {
.dvi_settings = 0x202d,
.video_mode = 0x4040,
.video_mode_adv = 0x19,
.video_prog_mode = 0x100,
.video_prog_mode_present = true,
.video_sync_mode = 0x407,
.video_sync_mode_present = true,
.video_yc_dly = 0,
.video_yc_dly_present = true,
/* video_rgb_ctrl */
/* video_filt_ctrl */
/* video_ofld_voav_ofst */
.yfp1_htime = 648,
.yfp2_htime = 3207,
.max_pxcnt = 3959,
.hspuls_begin = 80,
.hspuls_end = 240,
.hspuls_switch = 80,
.vspuls_begin = 688,
.vspuls_end = 3248,
.vspuls_bline = 4,
.vspuls_eline = 8,
.havon_begin = 648,
.havon_end = 3207,
.vavon_bline = 29,
.vavon_eline = 748,
/* eqpuls_begin */
/* eqpuls_end */
/* eqpuls_bline */
/* eqpuls_eline */
.hso_begin = 128,
.hso_end = 208,
.vso_begin = 128,
.vso_end = 128,
.vso_bline = 0,
.vso_eline = 5,
.vso_eline_present = true,
/* sy_val */
/* sy2_val */
.max_lncnt = 749,
},
};
static union meson_hdmi_venc_mode meson_hdmi_encp_mode_1080i60 = {
.encp = {
.dvi_settings = 0x2029,
.video_mode = 0x5ffc,
.video_mode_adv = 0x19,
.video_prog_mode = 0x100,
.video_prog_mode_present = true,
.video_sync_mode = 0x207,
.video_sync_mode_present = true,
/* video_yc_dly */
/* video_rgb_ctrl */
/* video_filt_ctrl */
.video_ofld_voav_ofst = 0x11,
.video_ofld_voav_ofst_present = true,
.yfp1_htime = 516,
.yfp2_htime = 4355,
.max_pxcnt = 4399,
.hspuls_begin = 88,
.hspuls_end = 264,
.hspuls_switch = 88,
.vspuls_begin = 440,
.vspuls_end = 2200,
.vspuls_bline = 0,
.vspuls_eline = 4,
.havon_begin = 516,
.havon_end = 4355,
.vavon_bline = 20,
.vavon_eline = 559,
.eqpuls_begin = 2288,
.eqpuls_begin_present = true,
.eqpuls_end = 2464,
.eqpuls_end_present = true,
.eqpuls_bline = 0,
.eqpuls_bline_present = true,
.eqpuls_eline = 4,
.eqpuls_eline_present = true,
.hso_begin = 264,
.hso_end = 176,
.vso_begin = 88,
.vso_end = 88,
.vso_bline = 0,
.vso_eline = 5,
.vso_eline_present = true,
/* sy_val */
/* sy2_val */
.max_lncnt = 1124,
},
};
static union meson_hdmi_venc_mode meson_hdmi_encp_mode_1080i50 = {
.encp = {
.dvi_settings = 0x202d,
.video_mode = 0x5ffc,
.video_mode_adv = 0x19,
.video_prog_mode = 0x100,
.video_prog_mode_present = true,
.video_sync_mode = 0x7,
.video_sync_mode_present = true,
/* video_yc_dly */
/* video_rgb_ctrl */
/* video_filt_ctrl */
.video_ofld_voav_ofst = 0x11,
.video_ofld_voav_ofst_present = true,
.yfp1_htime = 526,
.yfp2_htime = 4365,
.max_pxcnt = 5279,
.hspuls_begin = 88,
.hspuls_end = 264,
.hspuls_switch = 88,
.vspuls_begin = 440,
.vspuls_end = 2200,
.vspuls_bline = 0,
.vspuls_eline = 4,
.havon_begin = 526,
.havon_end = 4365,
.vavon_bline = 20,
.vavon_eline = 559,
.eqpuls_begin = 2288,
.eqpuls_begin_present = true,
.eqpuls_end = 2464,
.eqpuls_end_present = true,
.eqpuls_bline = 0,
.eqpuls_bline_present = true,
.eqpuls_eline = 4,
.eqpuls_eline_present = true,
.hso_begin = 142,
.hso_end = 230,
.vso_begin = 142,
.vso_end = 142,
.vso_bline = 0,
.vso_eline = 5,
.vso_eline_present = true,
/* sy_val */
/* sy2_val */
.max_lncnt = 1124,
},
};
static union meson_hdmi_venc_mode meson_hdmi_encp_mode_1080p24 = {
.encp = {
.dvi_settings = 0xd,
.video_mode = 0x4040,
.video_mode_adv = 0x18,
.video_prog_mode = 0x100,
.video_prog_mode_present = true,
.video_sync_mode = 0x7,
.video_sync_mode_present = true,
.video_yc_dly = 0,
.video_yc_dly_present = true,
.video_rgb_ctrl = 2,
.video_rgb_ctrl_present = true,
.video_filt_ctrl = 0x1052,
.video_filt_ctrl_present = true,
/* video_ofld_voav_ofst */
.yfp1_htime = 271,
.yfp2_htime = 2190,
.max_pxcnt = 2749,
.hspuls_begin = 44,
.hspuls_end = 132,
.hspuls_switch = 44,
.vspuls_begin = 220,
.vspuls_end = 2140,
.vspuls_bline = 0,
.vspuls_eline = 4,
.havon_begin = 271,
.havon_end = 2190,
.vavon_bline = 41,
.vavon_eline = 1120,
/* eqpuls_begin */
/* eqpuls_end */
.eqpuls_bline = 0,
.eqpuls_bline_present = true,
.eqpuls_eline = 4,
.eqpuls_eline_present = true,
.hso_begin = 79,
.hso_end = 123,
.vso_begin = 79,
.vso_end = 79,
.vso_bline = 0,
.vso_eline = 5,
.vso_eline_present = true,
/* sy_val */
/* sy2_val */
.max_lncnt = 1124,
},
};
static union meson_hdmi_venc_mode meson_hdmi_encp_mode_1080p30 = {
.encp = {
.dvi_settings = 0x1,
.video_mode = 0x4040,
.video_mode_adv = 0x18,
.video_prog_mode = 0x100,
.video_prog_mode_present = true,
/* video_sync_mode */
/* video_yc_dly */
/* video_rgb_ctrl */
.video_filt_ctrl = 0x1052,
.video_filt_ctrl_present = true,
/* video_ofld_voav_ofst */
.yfp1_htime = 140,
.yfp2_htime = 2060,
.max_pxcnt = 2199,
.hspuls_begin = 2156,
.hspuls_end = 44,
.hspuls_switch = 44,
.vspuls_begin = 140,
.vspuls_end = 2059,
.vspuls_bline = 0,
.vspuls_eline = 4,
.havon_begin = 148,
.havon_end = 2067,
.vavon_bline = 41,
.vavon_eline = 1120,
/* eqpuls_begin */
/* eqpuls_end */
/* eqpuls_bline */
/* eqpuls_eline */
.hso_begin = 44,
.hso_end = 2156,
.vso_begin = 2100,
.vso_end = 2164,
.vso_bline = 0,
.vso_eline = 5,
.vso_eline_present = true,
/* sy_val */
/* sy2_val */
.max_lncnt = 1124,
},
};
static union meson_hdmi_venc_mode meson_hdmi_encp_mode_1080p50 = {
.encp = {
.dvi_settings = 0xd,
.video_mode = 0x4040,
.video_mode_adv = 0x18,
.video_prog_mode = 0x100,
.video_prog_mode_present = true,
.video_sync_mode = 0x7,
.video_sync_mode_present = true,
.video_yc_dly = 0,
.video_yc_dly_present = true,
.video_rgb_ctrl = 2,
.video_rgb_ctrl_present = true,
/* video_filt_ctrl */
/* video_ofld_voav_ofst */
.yfp1_htime = 271,
.yfp2_htime = 2190,
.max_pxcnt = 2639,
.hspuls_begin = 44,
.hspuls_end = 132,
.hspuls_switch = 44,
.vspuls_begin = 220,
.vspuls_end = 2140,
.vspuls_bline = 0,
.vspuls_eline = 4,
.havon_begin = 271,
.havon_end = 2190,
.vavon_bline = 41,
.vavon_eline = 1120,
/* eqpuls_begin */
/* eqpuls_end */
.eqpuls_bline = 0,
.eqpuls_bline_present = true,
.eqpuls_eline = 4,
.eqpuls_eline_present = true,
.hso_begin = 79,
.hso_end = 123,
.vso_begin = 79,
.vso_end = 79,
.vso_bline = 0,
.vso_eline = 5,
.vso_eline_present = true,
/* sy_val */
/* sy2_val */
.max_lncnt = 1124,
},
};
static union meson_hdmi_venc_mode meson_hdmi_encp_mode_1080p60 = {
.encp = {
.dvi_settings = 0x1,
.video_mode = 0x4040,
.video_mode_adv = 0x18,
.video_prog_mode = 0x100,
.video_prog_mode_present = true,
/* video_sync_mode */
/* video_yc_dly */
/* video_rgb_ctrl */
.video_filt_ctrl = 0x1052,
.video_filt_ctrl_present = true,
/* video_ofld_voav_ofst */
.yfp1_htime = 140,
.yfp2_htime = 2060,
.max_pxcnt = 2199,
.hspuls_begin = 2156,
.hspuls_end = 44,
.hspuls_switch = 44,
.vspuls_begin = 140,
.vspuls_end = 2059,
.vspuls_bline = 0,
.vspuls_eline = 4,
.havon_begin = 148,
.havon_end = 2067,
.vavon_bline = 41,
.vavon_eline = 1120,
/* eqpuls_begin */
/* eqpuls_end */
/* eqpuls_bline */
/* eqpuls_eline */
.hso_begin = 44,
.hso_end = 2156,
.vso_begin = 2100,
.vso_end = 2164,
.vso_bline = 0,
.vso_eline = 5,
.vso_eline_present = true,
/* sy_val */
/* sy2_val */
.max_lncnt = 1124,
},
};
static union meson_hdmi_venc_mode meson_hdmi_encp_mode_2160p24 = {
.encp = {
.dvi_settings = 0x1,
.video_mode = 0x4040,
.video_mode_adv = 0x8,
/* video_sync_mode */
/* video_yc_dly */
/* video_rgb_ctrl */
.video_filt_ctrl = 0x1000,
.video_filt_ctrl_present = true,
/* video_ofld_voav_ofst */
.yfp1_htime = 140,
.yfp2_htime = 140+3840,
.max_pxcnt = 3840+1660-1,
.hspuls_begin = 2156+1920,
.hspuls_end = 44,
.hspuls_switch = 44,
.vspuls_begin = 140,
.vspuls_end = 2059+1920,
.vspuls_bline = 0,
.vspuls_eline = 4,
.havon_begin = 148,
.havon_end = 3987,
.vavon_bline = 89,
.vavon_eline = 2248,
/* eqpuls_begin */
/* eqpuls_end */
/* eqpuls_bline */
/* eqpuls_eline */
.hso_begin = 44,
.hso_end = 2156+1920,
.vso_begin = 2100+1920,
.vso_end = 2164+1920,
.vso_bline = 51,
.vso_eline = 53,
.vso_eline_present = true,
/* sy_val */
/* sy2_val */
.max_lncnt = 2249,
},
};
static union meson_hdmi_venc_mode meson_hdmi_encp_mode_2160p25 = {
.encp = {
.dvi_settings = 0x1,
.video_mode = 0x4040,
.video_mode_adv = 0x8,
/* video_sync_mode */
/* video_yc_dly */
/* video_rgb_ctrl */
.video_filt_ctrl = 0x1000,
.video_filt_ctrl_present = true,
/* video_ofld_voav_ofst */
.yfp1_htime = 140,
.yfp2_htime = 140+3840,
.max_pxcnt = 3840+1440-1,
.hspuls_begin = 2156+1920,
.hspuls_end = 44,
.hspuls_switch = 44,
.vspuls_begin = 140,
.vspuls_end = 2059+1920,
.vspuls_bline = 0,
.vspuls_eline = 4,
.havon_begin = 148,
.havon_end = 3987,
.vavon_bline = 89,
.vavon_eline = 2248,
/* eqpuls_begin */
/* eqpuls_end */
/* eqpuls_bline */
/* eqpuls_eline */
.hso_begin = 44,
.hso_end = 2156+1920,
.vso_begin = 2100+1920,
.vso_end = 2164+1920,
.vso_bline = 51,
.vso_eline = 53,
.vso_eline_present = true,
/* sy_val */
/* sy2_val */
.max_lncnt = 2249,
},
};
static union meson_hdmi_venc_mode meson_hdmi_encp_mode_2160p30 = {
.encp = {
.dvi_settings = 0x1,
.video_mode = 0x4040,
.video_mode_adv = 0x8,
/* video_sync_mode */
/* video_yc_dly */
/* video_rgb_ctrl */
.video_filt_ctrl = 0x1000,
.video_filt_ctrl_present = true,
/* video_ofld_voav_ofst */
.yfp1_htime = 140,
.yfp2_htime = 140+3840,
.max_pxcnt = 3840+560-1,
.hspuls_begin = 2156+1920,
.hspuls_end = 44,
.hspuls_switch = 44,
.vspuls_begin = 140,
.vspuls_end = 2059+1920,
.vspuls_bline = 0,
.vspuls_eline = 4,
.havon_begin = 148,
.havon_end = 3987,
.vavon_bline = 89,
.vavon_eline = 2248,
/* eqpuls_begin */
/* eqpuls_end */
/* eqpuls_bline */
/* eqpuls_eline */
.hso_begin = 44,
.hso_end = 2156+1920,
.vso_begin = 2100+1920,
.vso_end = 2164+1920,
.vso_bline = 51,
.vso_eline = 53,
.vso_eline_present = true,
/* sy_val */
/* sy2_val */
.max_lncnt = 2249,
},
};
static struct meson_hdmi_venc_vic_mode {
unsigned int vic;
union meson_hdmi_venc_mode *mode;
} meson_hdmi_venc_vic_modes[] = {
{ 6, &meson_hdmi_enci_mode_480i },
{ 7, &meson_hdmi_enci_mode_480i },
{ 21, &meson_hdmi_enci_mode_576i },
{ 22, &meson_hdmi_enci_mode_576i },
{ 2, &meson_hdmi_encp_mode_480p },
{ 3, &meson_hdmi_encp_mode_480p },
{ 17, &meson_hdmi_encp_mode_576p },
{ 18, &meson_hdmi_encp_mode_576p },
{ 4, &meson_hdmi_encp_mode_720p60 },
{ 19, &meson_hdmi_encp_mode_720p50 },
{ 5, &meson_hdmi_encp_mode_1080i60 },
{ 20, &meson_hdmi_encp_mode_1080i50 },
{ 32, &meson_hdmi_encp_mode_1080p24 },
{ 33, &meson_hdmi_encp_mode_1080p50 },
{ 34, &meson_hdmi_encp_mode_1080p30 },
{ 31, &meson_hdmi_encp_mode_1080p50 },
{ 16, &meson_hdmi_encp_mode_1080p60 },
{ 93, &meson_hdmi_encp_mode_2160p24 },
{ 94, &meson_hdmi_encp_mode_2160p25 },
{ 95, &meson_hdmi_encp_mode_2160p30 },
{ 96, &meson_hdmi_encp_mode_2160p25 },
{ 97, &meson_hdmi_encp_mode_2160p30 },
{ 0, NULL}, /* sentinel */
};
static signed int to_signed(unsigned int a)
{
if (a <= 7)
return a;
else
return a - 16;
}
static unsigned long modulo(unsigned long a, unsigned long b)
{
if (a >= b)
return a - b;
else
return a;
}
enum drm_mode_status
meson_venc_hdmi_supported_mode(const struct drm_display_mode *mode)
{
if (mode->flags & ~(DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NHSYNC |
DRM_MODE_FLAG_PVSYNC | DRM_MODE_FLAG_NVSYNC))
return MODE_BAD;
if (mode->hdisplay < 400 || mode->hdisplay > 1920)
return MODE_BAD_HVALUE;
if (mode->vdisplay < 480 || mode->vdisplay > 1920)
return MODE_BAD_VVALUE;
return MODE_OK;
}
EXPORT_SYMBOL_GPL(meson_venc_hdmi_supported_mode);
bool meson_venc_hdmi_supported_vic(int vic)
{
struct meson_hdmi_venc_vic_mode *vmode = meson_hdmi_venc_vic_modes;
while (vmode->vic && vmode->mode) {
if (vmode->vic == vic)
return true;
vmode++;
}
return false;
}
EXPORT_SYMBOL_GPL(meson_venc_hdmi_supported_vic);
static void meson_venc_hdmi_get_dmt_vmode(const struct drm_display_mode *mode,
union meson_hdmi_venc_mode *dmt_mode)
{
memset(dmt_mode, 0, sizeof(*dmt_mode));
dmt_mode->encp.dvi_settings = 0x21;
dmt_mode->encp.video_mode = 0x4040;
dmt_mode->encp.video_mode_adv = 0x18;
dmt_mode->encp.max_pxcnt = mode->htotal - 1;
dmt_mode->encp.havon_begin = mode->htotal - mode->hsync_start;
dmt_mode->encp.havon_end = dmt_mode->encp.havon_begin +
mode->hdisplay - 1;
dmt_mode->encp.vavon_bline = mode->vtotal - mode->vsync_start;
dmt_mode->encp.vavon_eline = dmt_mode->encp.vavon_bline +
mode->vdisplay - 1;
dmt_mode->encp.hso_begin = 0;
dmt_mode->encp.hso_end = mode->hsync_end - mode->hsync_start;
dmt_mode->encp.vso_begin = 30;
dmt_mode->encp.vso_end = 50;
dmt_mode->encp.vso_bline = 0;
dmt_mode->encp.vso_eline = mode->vsync_end - mode->vsync_start;
dmt_mode->encp.vso_eline_present = true;
dmt_mode->encp.max_lncnt = mode->vtotal - 1;
}
static union meson_hdmi_venc_mode *meson_venc_hdmi_get_vic_vmode(int vic)
{
struct meson_hdmi_venc_vic_mode *vmode = meson_hdmi_venc_vic_modes;
while (vmode->vic && vmode->mode) {
if (vmode->vic == vic)
return vmode->mode;
vmode++;
}
return NULL;
}
bool meson_venc_hdmi_venc_repeat(int vic)
{
/* Repeat VENC pixels for 480/576i/p, 720p50/60 and 1080p50/60 */
if (vic == 6 || vic == 7 || /* 480i */
vic == 21 || vic == 22 || /* 576i */
vic == 17 || vic == 18 || /* 576p */
vic == 2 || vic == 3 || /* 480p */
vic == 4 || /* 720p60 */
vic == 19 || /* 720p50 */
vic == 5 || /* 1080i60 */
vic == 20) /* 1080i50 */
return true;
return false;
}
EXPORT_SYMBOL_GPL(meson_venc_hdmi_venc_repeat);
void meson_venc_hdmi_mode_set(struct meson_drm *priv, int vic,
unsigned int ycrcb_map,
bool yuv420_mode,
const struct drm_display_mode *mode)
{
union meson_hdmi_venc_mode *vmode = NULL;
union meson_hdmi_venc_mode vmode_dmt;
bool use_enci = false;
bool venc_repeat = false;
bool hdmi_repeat = false;
unsigned int venc_hdmi_latency = 2;
unsigned long total_pixels_venc = 0;
unsigned long active_pixels_venc = 0;
unsigned long front_porch_venc = 0;
unsigned long hsync_pixels_venc = 0;
unsigned long de_h_begin = 0;
unsigned long de_h_end = 0;
unsigned long de_v_begin_even = 0;
unsigned long de_v_end_even = 0;
unsigned long de_v_begin_odd = 0;
unsigned long de_v_end_odd = 0;
unsigned long hs_begin = 0;
unsigned long hs_end = 0;
unsigned long vs_adjust = 0;
unsigned long vs_bline_evn = 0;
unsigned long vs_eline_evn = 0;
unsigned long vs_bline_odd = 0;
unsigned long vs_eline_odd = 0;
unsigned long vso_begin_evn = 0;
unsigned long vso_begin_odd = 0;
unsigned int eof_lines;
unsigned int sof_lines;
unsigned int vsync_lines;
u32 reg;
/* Use VENCI for 480i and 576i and double HDMI pixels */
if (mode->flags & DRM_MODE_FLAG_DBLCLK) {
hdmi_repeat = true;
use_enci = true;
venc_hdmi_latency = 1;
}
if (meson_venc_hdmi_supported_vic(vic)) {
vmode = meson_venc_hdmi_get_vic_vmode(vic);
if (!vmode) {
dev_err(priv->dev, "%s: Fatal Error, unsupported mode "
DRM_MODE_FMT "\n", __func__,
DRM_MODE_ARG(mode));
return;
}
} else {
meson_venc_hdmi_get_dmt_vmode(mode, &vmode_dmt);
vmode = &vmode_dmt;
use_enci = false;
}
/* Repeat VENC pixels for 480/576i/p, 720p50/60 and 1080p50/60 */
if (meson_venc_hdmi_venc_repeat(vic))
venc_repeat = true;
eof_lines = mode->vsync_start - mode->vdisplay;
if (mode->flags & DRM_MODE_FLAG_INTERLACE)
eof_lines /= 2;
sof_lines = mode->vtotal - mode->vsync_end;
if (mode->flags & DRM_MODE_FLAG_INTERLACE)
sof_lines /= 2;
vsync_lines = mode->vsync_end - mode->vsync_start;
if (mode->flags & DRM_MODE_FLAG_INTERLACE)
vsync_lines /= 2;
total_pixels_venc = mode->htotal;
if (hdmi_repeat)
total_pixels_venc /= 2;
if (venc_repeat)
total_pixels_venc *= 2;
active_pixels_venc = mode->hdisplay;
if (hdmi_repeat)
active_pixels_venc /= 2;
if (venc_repeat)
active_pixels_venc *= 2;
front_porch_venc = (mode->hsync_start - mode->hdisplay);
if (hdmi_repeat)
front_porch_venc /= 2;
if (venc_repeat)
front_porch_venc *= 2;
hsync_pixels_venc = (mode->hsync_end - mode->hsync_start);
if (hdmi_repeat)
hsync_pixels_venc /= 2;
if (venc_repeat)
hsync_pixels_venc *= 2;
/* Disable VDACs */
writel_bits_relaxed(0xff, 0xff,
priv->io_base + _REG(VENC_VDAC_SETTING));
writel_relaxed(0, priv->io_base + _REG(ENCI_VIDEO_EN));
writel_relaxed(0, priv->io_base + _REG(ENCP_VIDEO_EN));
if (use_enci) {
unsigned int lines_f0;
unsigned int lines_f1;
/* CVBS Filter settings */
writel_relaxed(ENCI_CFILT_CMPT_SEL_HIGH | 0x10,
priv->io_base + _REG(ENCI_CFILT_CTRL));
writel_relaxed(ENCI_CFILT_CMPT_CR_DLY(2) |
ENCI_CFILT_CMPT_CB_DLY(1),
priv->io_base + _REG(ENCI_CFILT_CTRL2));
/* Digital Video Select : Interlace, clk27 clk, external */
writel_relaxed(0, priv->io_base + _REG(VENC_DVI_SETTING));
/* Reset Video Mode */
writel_relaxed(0, priv->io_base + _REG(ENCI_VIDEO_MODE));
writel_relaxed(0, priv->io_base + _REG(ENCI_VIDEO_MODE_ADV));
/* Horizontal sync signal output */
writel_relaxed(vmode->enci.hso_begin,
priv->io_base + _REG(ENCI_SYNC_HSO_BEGIN));
writel_relaxed(vmode->enci.hso_end,
priv->io_base + _REG(ENCI_SYNC_HSO_END));
/* Vertical Sync lines */
writel_relaxed(vmode->enci.vso_even,
priv->io_base + _REG(ENCI_SYNC_VSO_EVNLN));
writel_relaxed(vmode->enci.vso_odd,
priv->io_base + _REG(ENCI_SYNC_VSO_ODDLN));
/* Macrovision max amplitude change */
writel_relaxed(ENCI_MACV_MAX_AMP_ENABLE_CHANGE |
ENCI_MACV_MAX_AMP_VAL(vmode->enci.macv_max_amp),
priv->io_base + _REG(ENCI_MACV_MAX_AMP));
/* Video mode */
writel_relaxed(vmode->enci.video_prog_mode,
priv->io_base + _REG(VENC_VIDEO_PROG_MODE));
writel_relaxed(vmode->enci.video_mode,
priv->io_base + _REG(ENCI_VIDEO_MODE));
/*
* Advanced Video Mode :
* Demux shifting 0x2
* Blank line end at line17/22
* High bandwidth Luma Filter
* Low bandwidth Chroma Filter
* Bypass luma low pass filter
* No macrovision on CSYNC
*/
writel_relaxed(ENCI_VIDEO_MODE_ADV_DMXMD(2) |
ENCI_VIDEO_MODE_ADV_VBICTL_LINE_17_22 |
ENCI_VIDEO_MODE_ADV_YBW_HIGH,
priv->io_base + _REG(ENCI_VIDEO_MODE_ADV));
writel(vmode->enci.sch_adjust,
priv->io_base + _REG(ENCI_VIDEO_SCH));
/* Sync mode : MASTER Master mode, free run, send HSO/VSO out */
writel_relaxed(0x07, priv->io_base + _REG(ENCI_SYNC_MODE));
if (vmode->enci.yc_delay)
writel_relaxed(vmode->enci.yc_delay,
priv->io_base + _REG(ENCI_YC_DELAY));
/* UNreset Interlaced TV Encoder */
writel_relaxed(0, priv->io_base + _REG(ENCI_DBG_PX_RST));
/*
* Enable Vfifo2vd and set Y_Cb_Y_Cr:
* Corresponding value:
* Y => 00 or 10
* Cb => 01
* Cr => 11
* Ex: 0x4e => 01001110 would mean Cb/Y/Cr/Y
*/
writel_relaxed(ENCI_VFIFO2VD_CTL_ENABLE |
ENCI_VFIFO2VD_CTL_VD_SEL(0x4e),
priv->io_base + _REG(ENCI_VFIFO2VD_CTL));
/* Timings */
writel_relaxed(vmode->enci.pixel_start,
priv->io_base + _REG(ENCI_VFIFO2VD_PIXEL_START));
writel_relaxed(vmode->enci.pixel_end,
priv->io_base + _REG(ENCI_VFIFO2VD_PIXEL_END));
writel_relaxed(vmode->enci.top_field_line_start,
priv->io_base + _REG(ENCI_VFIFO2VD_LINE_TOP_START));
writel_relaxed(vmode->enci.top_field_line_end,
priv->io_base + _REG(ENCI_VFIFO2VD_LINE_TOP_END));
writel_relaxed(vmode->enci.bottom_field_line_start,
priv->io_base + _REG(ENCI_VFIFO2VD_LINE_BOT_START));
writel_relaxed(vmode->enci.bottom_field_line_end,
priv->io_base + _REG(ENCI_VFIFO2VD_LINE_BOT_END));
/* Select ENCI for VIU */
meson_vpp_setup_mux(priv, MESON_VIU_VPP_MUX_ENCI);
/* Interlace video enable */
writel_relaxed(ENCI_VIDEO_EN_ENABLE,
priv->io_base + _REG(ENCI_VIDEO_EN));
lines_f0 = mode->vtotal >> 1;
lines_f1 = lines_f0 + 1;
de_h_begin = modulo(readl_relaxed(priv->io_base +
_REG(ENCI_VFIFO2VD_PIXEL_START))
+ venc_hdmi_latency,
total_pixels_venc);
de_h_end = modulo(de_h_begin + active_pixels_venc,
total_pixels_venc);
writel_relaxed(de_h_begin,
priv->io_base + _REG(ENCI_DE_H_BEGIN));
writel_relaxed(de_h_end,
priv->io_base + _REG(ENCI_DE_H_END));
de_v_begin_even = readl_relaxed(priv->io_base +
_REG(ENCI_VFIFO2VD_LINE_TOP_START));
de_v_end_even = de_v_begin_even + mode->vdisplay;
de_v_begin_odd = readl_relaxed(priv->io_base +
_REG(ENCI_VFIFO2VD_LINE_BOT_START));
de_v_end_odd = de_v_begin_odd + mode->vdisplay;
writel_relaxed(de_v_begin_even,
priv->io_base + _REG(ENCI_DE_V_BEGIN_EVEN));
writel_relaxed(de_v_end_even,
priv->io_base + _REG(ENCI_DE_V_END_EVEN));
writel_relaxed(de_v_begin_odd,
priv->io_base + _REG(ENCI_DE_V_BEGIN_ODD));
writel_relaxed(de_v_end_odd,
priv->io_base + _REG(ENCI_DE_V_END_ODD));
/* Program Hsync timing */
hs_begin = de_h_end + front_porch_venc;
if (de_h_end + front_porch_venc >= total_pixels_venc) {
hs_begin -= total_pixels_venc;
vs_adjust = 1;
} else {
hs_begin = de_h_end + front_porch_venc;
vs_adjust = 0;
}
hs_end = modulo(hs_begin + hsync_pixels_venc,
total_pixels_venc);
writel_relaxed(hs_begin,
priv->io_base + _REG(ENCI_DVI_HSO_BEGIN));
writel_relaxed(hs_end,
priv->io_base + _REG(ENCI_DVI_HSO_END));
/* Program Vsync timing for even field */
if (((de_v_end_odd - 1) + eof_lines + vs_adjust) >= lines_f1) {
vs_bline_evn = (de_v_end_odd - 1)
+ eof_lines
+ vs_adjust
- lines_f1;
vs_eline_evn = vs_bline_evn + vsync_lines;
writel_relaxed(vs_bline_evn,
priv->io_base + _REG(ENCI_DVI_VSO_BLINE_EVN));
writel_relaxed(vs_eline_evn,
priv->io_base + _REG(ENCI_DVI_VSO_ELINE_EVN));
writel_relaxed(hs_begin,
priv->io_base + _REG(ENCI_DVI_VSO_BEGIN_EVN));
writel_relaxed(hs_begin,
priv->io_base + _REG(ENCI_DVI_VSO_END_EVN));
} else {
vs_bline_odd = (de_v_end_odd - 1)
+ eof_lines
+ vs_adjust;
writel_relaxed(vs_bline_odd,
priv->io_base + _REG(ENCI_DVI_VSO_BLINE_ODD));
writel_relaxed(hs_begin,
priv->io_base + _REG(ENCI_DVI_VSO_BEGIN_ODD));
if ((vs_bline_odd + vsync_lines) >= lines_f1) {
vs_eline_evn = vs_bline_odd
+ vsync_lines
- lines_f1;
writel_relaxed(vs_eline_evn, priv->io_base
+ _REG(ENCI_DVI_VSO_ELINE_EVN));
writel_relaxed(hs_begin, priv->io_base
+ _REG(ENCI_DVI_VSO_END_EVN));
} else {
vs_eline_odd = vs_bline_odd
+ vsync_lines;
writel_relaxed(vs_eline_odd, priv->io_base
+ _REG(ENCI_DVI_VSO_ELINE_ODD));
writel_relaxed(hs_begin, priv->io_base
+ _REG(ENCI_DVI_VSO_END_ODD));
}
}
/* Program Vsync timing for odd field */
if (((de_v_end_even - 1) + (eof_lines + 1)) >= lines_f0) {
vs_bline_odd = (de_v_end_even - 1)
+ (eof_lines + 1)
- lines_f0;
vs_eline_odd = vs_bline_odd + vsync_lines;
writel_relaxed(vs_bline_odd,
priv->io_base + _REG(ENCI_DVI_VSO_BLINE_ODD));
writel_relaxed(vs_eline_odd,
priv->io_base + _REG(ENCI_DVI_VSO_ELINE_ODD));
vso_begin_odd = modulo(hs_begin
+ (total_pixels_venc >> 1),
total_pixels_venc);
writel_relaxed(vso_begin_odd,
priv->io_base + _REG(ENCI_DVI_VSO_BEGIN_ODD));
writel_relaxed(vso_begin_odd,
priv->io_base + _REG(ENCI_DVI_VSO_END_ODD));
} else {
vs_bline_evn = (de_v_end_even - 1)
+ (eof_lines + 1);
writel_relaxed(vs_bline_evn,
priv->io_base + _REG(ENCI_DVI_VSO_BLINE_EVN));
vso_begin_evn = modulo(hs_begin
+ (total_pixels_venc >> 1),
total_pixels_venc);
writel_relaxed(vso_begin_evn, priv->io_base
+ _REG(ENCI_DVI_VSO_BEGIN_EVN));
if (vs_bline_evn + vsync_lines >= lines_f0) {
vs_eline_odd = vs_bline_evn
+ vsync_lines
- lines_f0;
writel_relaxed(vs_eline_odd, priv->io_base
+ _REG(ENCI_DVI_VSO_ELINE_ODD));
writel_relaxed(vso_begin_evn, priv->io_base
+ _REG(ENCI_DVI_VSO_END_ODD));
} else {
vs_eline_evn = vs_bline_evn + vsync_lines;
writel_relaxed(vs_eline_evn, priv->io_base
+ _REG(ENCI_DVI_VSO_ELINE_EVN));
writel_relaxed(vso_begin_evn, priv->io_base
+ _REG(ENCI_DVI_VSO_END_EVN));
}
}
} else {
writel_relaxed(vmode->encp.dvi_settings,
priv->io_base + _REG(VENC_DVI_SETTING));
writel_relaxed(vmode->encp.video_mode,
priv->io_base + _REG(ENCP_VIDEO_MODE));
writel_relaxed(vmode->encp.video_mode_adv,
priv->io_base + _REG(ENCP_VIDEO_MODE_ADV));
if (vmode->encp.video_prog_mode_present)
writel_relaxed(vmode->encp.video_prog_mode,
priv->io_base + _REG(VENC_VIDEO_PROG_MODE));
if (vmode->encp.video_sync_mode_present)
writel_relaxed(vmode->encp.video_sync_mode,
priv->io_base + _REG(ENCP_VIDEO_SYNC_MODE));
if (vmode->encp.video_yc_dly_present)
writel_relaxed(vmode->encp.video_yc_dly,
priv->io_base + _REG(ENCP_VIDEO_YC_DLY));
if (vmode->encp.video_rgb_ctrl_present)
writel_relaxed(vmode->encp.video_rgb_ctrl,
priv->io_base + _REG(ENCP_VIDEO_RGB_CTRL));
if (vmode->encp.video_filt_ctrl_present)
writel_relaxed(vmode->encp.video_filt_ctrl,
priv->io_base + _REG(ENCP_VIDEO_FILT_CTRL));
if (vmode->encp.video_ofld_voav_ofst_present)
writel_relaxed(vmode->encp.video_ofld_voav_ofst,
priv->io_base
+ _REG(ENCP_VIDEO_OFLD_VOAV_OFST));
writel_relaxed(vmode->encp.yfp1_htime,
priv->io_base + _REG(ENCP_VIDEO_YFP1_HTIME));
writel_relaxed(vmode->encp.yfp2_htime,
priv->io_base + _REG(ENCP_VIDEO_YFP2_HTIME));
writel_relaxed(vmode->encp.max_pxcnt,
priv->io_base + _REG(ENCP_VIDEO_MAX_PXCNT));
writel_relaxed(vmode->encp.hspuls_begin,
priv->io_base + _REG(ENCP_VIDEO_HSPULS_BEGIN));
writel_relaxed(vmode->encp.hspuls_end,
priv->io_base + _REG(ENCP_VIDEO_HSPULS_END));
writel_relaxed(vmode->encp.hspuls_switch,
priv->io_base + _REG(ENCP_VIDEO_HSPULS_SWITCH));
writel_relaxed(vmode->encp.vspuls_begin,
priv->io_base + _REG(ENCP_VIDEO_VSPULS_BEGIN));
writel_relaxed(vmode->encp.vspuls_end,
priv->io_base + _REG(ENCP_VIDEO_VSPULS_END));
writel_relaxed(vmode->encp.vspuls_bline,
priv->io_base + _REG(ENCP_VIDEO_VSPULS_BLINE));
writel_relaxed(vmode->encp.vspuls_eline,
priv->io_base + _REG(ENCP_VIDEO_VSPULS_ELINE));
if (vmode->encp.eqpuls_begin_present)
writel_relaxed(vmode->encp.eqpuls_begin,
priv->io_base + _REG(ENCP_VIDEO_EQPULS_BEGIN));
if (vmode->encp.eqpuls_end_present)
writel_relaxed(vmode->encp.eqpuls_end,
priv->io_base + _REG(ENCP_VIDEO_EQPULS_END));
if (vmode->encp.eqpuls_bline_present)
writel_relaxed(vmode->encp.eqpuls_bline,
priv->io_base + _REG(ENCP_VIDEO_EQPULS_BLINE));
if (vmode->encp.eqpuls_eline_present)
writel_relaxed(vmode->encp.eqpuls_eline,
priv->io_base + _REG(ENCP_VIDEO_EQPULS_ELINE));
writel_relaxed(vmode->encp.havon_begin,
priv->io_base + _REG(ENCP_VIDEO_HAVON_BEGIN));
writel_relaxed(vmode->encp.havon_end,
priv->io_base + _REG(ENCP_VIDEO_HAVON_END));
writel_relaxed(vmode->encp.vavon_bline,
priv->io_base + _REG(ENCP_VIDEO_VAVON_BLINE));
writel_relaxed(vmode->encp.vavon_eline,
priv->io_base + _REG(ENCP_VIDEO_VAVON_ELINE));
writel_relaxed(vmode->encp.hso_begin,
priv->io_base + _REG(ENCP_VIDEO_HSO_BEGIN));
writel_relaxed(vmode->encp.hso_end,
priv->io_base + _REG(ENCP_VIDEO_HSO_END));
writel_relaxed(vmode->encp.vso_begin,
priv->io_base + _REG(ENCP_VIDEO_VSO_BEGIN));
writel_relaxed(vmode->encp.vso_end,
priv->io_base + _REG(ENCP_VIDEO_VSO_END));
writel_relaxed(vmode->encp.vso_bline,
priv->io_base + _REG(ENCP_VIDEO_VSO_BLINE));
if (vmode->encp.vso_eline_present)
writel_relaxed(vmode->encp.vso_eline,
priv->io_base + _REG(ENCP_VIDEO_VSO_ELINE));
if (vmode->encp.sy_val_present)
writel_relaxed(vmode->encp.sy_val,
priv->io_base + _REG(ENCP_VIDEO_SY_VAL));
if (vmode->encp.sy2_val_present)
writel_relaxed(vmode->encp.sy2_val,
priv->io_base + _REG(ENCP_VIDEO_SY2_VAL));
writel_relaxed(vmode->encp.max_lncnt,
priv->io_base + _REG(ENCP_VIDEO_MAX_LNCNT));
writel_relaxed(1, priv->io_base + _REG(ENCP_VIDEO_EN));
/* Set DE signal’s polarity is active high */
writel_bits_relaxed(ENCP_VIDEO_MODE_DE_V_HIGH,
ENCP_VIDEO_MODE_DE_V_HIGH,
priv->io_base + _REG(ENCP_VIDEO_MODE));
/* Program DE timing */
de_h_begin = modulo(readl_relaxed(priv->io_base +
_REG(ENCP_VIDEO_HAVON_BEGIN))
+ venc_hdmi_latency,
total_pixels_venc);
de_h_end = modulo(de_h_begin + active_pixels_venc,
total_pixels_venc);
writel_relaxed(de_h_begin,
priv->io_base + _REG(ENCP_DE_H_BEGIN));
writel_relaxed(de_h_end,
priv->io_base + _REG(ENCP_DE_H_END));
/* Program DE timing for even field */
de_v_begin_even = readl_relaxed(priv->io_base
+ _REG(ENCP_VIDEO_VAVON_BLINE));
if (mode->flags & DRM_MODE_FLAG_INTERLACE)
de_v_end_even = de_v_begin_even +
(mode->vdisplay / 2);
else
de_v_end_even = de_v_begin_even + mode->vdisplay;
writel_relaxed(de_v_begin_even,
priv->io_base + _REG(ENCP_DE_V_BEGIN_EVEN));
writel_relaxed(de_v_end_even,
priv->io_base + _REG(ENCP_DE_V_END_EVEN));
/* Program DE timing for odd field if needed */
if (mode->flags & DRM_MODE_FLAG_INTERLACE) {
unsigned int ofld_voav_ofst =
readl_relaxed(priv->io_base +
_REG(ENCP_VIDEO_OFLD_VOAV_OFST));
de_v_begin_odd = to_signed((ofld_voav_ofst & 0xf0) >> 4)
+ de_v_begin_even
+ ((mode->vtotal - 1) / 2);
de_v_end_odd = de_v_begin_odd + (mode->vdisplay / 2);
writel_relaxed(de_v_begin_odd,
priv->io_base + _REG(ENCP_DE_V_BEGIN_ODD));
writel_relaxed(de_v_end_odd,
priv->io_base + _REG(ENCP_DE_V_END_ODD));
}
/* Program Hsync timing */
if ((de_h_end + front_porch_venc) >= total_pixels_venc) {
hs_begin = de_h_end
+ front_porch_venc
- total_pixels_venc;
vs_adjust = 1;
} else {
hs_begin = de_h_end
+ front_porch_venc;
vs_adjust = 0;
}
hs_end = modulo(hs_begin + hsync_pixels_venc,
total_pixels_venc);
writel_relaxed(hs_begin,
priv->io_base + _REG(ENCP_DVI_HSO_BEGIN));
writel_relaxed(hs_end,
priv->io_base + _REG(ENCP_DVI_HSO_END));
/* Program Vsync timing for even field */
if (de_v_begin_even >=
(sof_lines + vsync_lines + (1 - vs_adjust)))
vs_bline_evn = de_v_begin_even
- sof_lines
- vsync_lines
- (1 - vs_adjust);
else
vs_bline_evn = mode->vtotal
+ de_v_begin_even
- sof_lines
- vsync_lines
- (1 - vs_adjust);
vs_eline_evn = modulo(vs_bline_evn + vsync_lines,
mode->vtotal);
writel_relaxed(vs_bline_evn,
priv->io_base + _REG(ENCP_DVI_VSO_BLINE_EVN));
writel_relaxed(vs_eline_evn,
priv->io_base + _REG(ENCP_DVI_VSO_ELINE_EVN));
vso_begin_evn = hs_begin;
writel_relaxed(vso_begin_evn,
priv->io_base + _REG(ENCP_DVI_VSO_BEGIN_EVN));
writel_relaxed(vso_begin_evn,
priv->io_base + _REG(ENCP_DVI_VSO_END_EVN));
/* Program Vsync timing for odd field if needed */
if (mode->flags & DRM_MODE_FLAG_INTERLACE) {
vs_bline_odd = (de_v_begin_odd - 1)
- sof_lines
- vsync_lines;
vs_eline_odd = (de_v_begin_odd - 1)
- vsync_lines;
vso_begin_odd = modulo(hs_begin
+ (total_pixels_venc >> 1),
total_pixels_venc);
writel_relaxed(vs_bline_odd,
priv->io_base + _REG(ENCP_DVI_VSO_BLINE_ODD));
writel_relaxed(vs_eline_odd,
priv->io_base + _REG(ENCP_DVI_VSO_ELINE_ODD));
writel_relaxed(vso_begin_odd,
priv->io_base + _REG(ENCP_DVI_VSO_BEGIN_ODD));
writel_relaxed(vso_begin_odd,
priv->io_base + _REG(ENCP_DVI_VSO_END_ODD));
}
/* Select ENCP for VIU */
meson_vpp_setup_mux(priv, MESON_VIU_VPP_MUX_ENCP);
}
/* Set VPU HDMI setting */
/* Select ENCP or ENCI data to HDMI */
if (use_enci)
reg = VPU_HDMI_ENCI_DATA_TO_HDMI;
else
reg = VPU_HDMI_ENCP_DATA_TO_HDMI;
/* Invert polarity of HSYNC from VENC */
if (mode->flags & DRM_MODE_FLAG_PHSYNC)
reg |= VPU_HDMI_INV_HSYNC;
/* Invert polarity of VSYNC from VENC */
if (mode->flags & DRM_MODE_FLAG_PVSYNC)
reg |= VPU_HDMI_INV_VSYNC;
/* Output data format */
reg |= ycrcb_map;
/*
* Write rate to the async FIFO between VENC and HDMI.
* One write every 2 wr_clk.
*/
if (venc_repeat || yuv420_mode)
reg |= VPU_HDMI_WR_RATE(2);
/*
* Read rate to the async FIFO between VENC and HDMI.
* One read every 2 wr_clk.
*/
if (hdmi_repeat)
reg |= VPU_HDMI_RD_RATE(2);
writel_relaxed(reg, priv->io_base + _REG(VPU_HDMI_SETTING));
priv->venc.hdmi_repeat = hdmi_repeat;
priv->venc.venc_repeat = venc_repeat;
priv->venc.hdmi_use_enci = use_enci;
priv->venc.current_mode = MESON_VENC_MODE_HDMI;
}
EXPORT_SYMBOL_GPL(meson_venc_hdmi_mode_set);
static unsigned short meson_encl_gamma_table[256] = {
0, 4, 8, 12, 16, 20, 24, 28, 32, 36, 40, 44, 48, 52, 56, 60,
64, 68, 72, 76, 80, 84, 88, 92, 96, 100, 104, 108, 112, 116, 120, 124,
128, 132, 136, 140, 144, 148, 152, 156, 160, 164, 168, 172, 176, 180, 184, 188,
192, 196, 200, 204, 208, 212, 216, 220, 224, 228, 232, 236, 240, 244, 248, 252,
256, 260, 264, 268, 272, 276, 280, 284, 288, 292, 296, 300, 304, 308, 312, 316,
320, 324, 328, 332, 336, 340, 344, 348, 352, 356, 360, 364, 368, 372, 376, 380,
384, 388, 392, 396, 400, 404, 408, 412, 416, 420, 424, 428, 432, 436, 440, 444,
448, 452, 456, 460, 464, 468, 472, 476, 480, 484, 488, 492, 496, 500, 504, 508,
512, 516, 520, 524, 528, 532, 536, 540, 544, 548, 552, 556, 560, 564, 568, 572,
576, 580, 584, 588, 592, 596, 600, 604, 608, 612, 616, 620, 624, 628, 632, 636,
640, 644, 648, 652, 656, 660, 664, 668, 672, 676, 680, 684, 688, 692, 696, 700,
704, 708, 712, 716, 720, 724, 728, 732, 736, 740, 744, 748, 752, 756, 760, 764,
768, 772, 776, 780, 784, 788, 792, 796, 800, 804, 808, 812, 816, 820, 824, 828,
832, 836, 840, 844, 848, 852, 856, 860, 864, 868, 872, 876, 880, 884, 888, 892,
896, 900, 904, 908, 912, 916, 920, 924, 928, 932, 936, 940, 944, 948, 952, 956,
960, 964, 968, 972, 976, 980, 984, 988, 992, 996, 1000, 1004, 1008, 1012, 1016, 1020,
};
static void meson_encl_set_gamma_table(struct meson_drm *priv, u16 *data,
u32 rgb_mask)
{
int i, ret;
u32 reg;
writel_bits_relaxed(L_GAMMA_CNTL_PORT_EN, 0,
priv->io_base + _REG(L_GAMMA_CNTL_PORT));
ret = readl_relaxed_poll_timeout(priv->io_base + _REG(L_GAMMA_CNTL_PORT),
reg, reg & L_GAMMA_CNTL_PORT_ADR_RDY, 10, 10000);
if (ret)
pr_warn("%s: GAMMA ADR_RDY timeout\n", __func__);
writel_relaxed(L_GAMMA_ADDR_PORT_AUTO_INC | rgb_mask |
FIELD_PREP(L_GAMMA_ADDR_PORT_ADDR, 0),
priv->io_base + _REG(L_GAMMA_ADDR_PORT));
for (i = 0; i < 256; i++) {
ret = readl_relaxed_poll_timeout(priv->io_base + _REG(L_GAMMA_CNTL_PORT),
reg, reg & L_GAMMA_CNTL_PORT_WR_RDY,
10, 10000);
if (ret)
pr_warn_once("%s: GAMMA WR_RDY timeout\n", __func__);
writel_relaxed(data[i], priv->io_base + _REG(L_GAMMA_DATA_PORT));
}
ret = readl_relaxed_poll_timeout(priv->io_base + _REG(L_GAMMA_CNTL_PORT),
reg, reg & L_GAMMA_CNTL_PORT_ADR_RDY, 10, 10000);
if (ret)
pr_warn("%s: GAMMA ADR_RDY timeout\n", __func__);
writel_relaxed(L_GAMMA_ADDR_PORT_AUTO_INC | rgb_mask |
FIELD_PREP(L_GAMMA_ADDR_PORT_ADDR, 0x23),
priv->io_base + _REG(L_GAMMA_ADDR_PORT));
}
void meson_encl_load_gamma(struct meson_drm *priv)
{
meson_encl_set_gamma_table(priv, meson_encl_gamma_table, L_GAMMA_ADDR_PORT_SEL_R);
meson_encl_set_gamma_table(priv, meson_encl_gamma_table, L_GAMMA_ADDR_PORT_SEL_G);
meson_encl_set_gamma_table(priv, meson_encl_gamma_table, L_GAMMA_ADDR_PORT_SEL_B);
writel_bits_relaxed(L_GAMMA_CNTL_PORT_EN, L_GAMMA_CNTL_PORT_EN,
priv->io_base + _REG(L_GAMMA_CNTL_PORT));
}
void meson_venc_mipi_dsi_mode_set(struct meson_drm *priv,
const struct drm_display_mode *mode)
{
unsigned int max_pxcnt;
unsigned int max_lncnt;
unsigned int havon_begin;
unsigned int havon_end;
unsigned int vavon_bline;
unsigned int vavon_eline;
unsigned int hso_begin;
unsigned int hso_end;
unsigned int vso_begin;
unsigned int vso_end;
unsigned int vso_bline;
unsigned int vso_eline;
max_pxcnt = mode->htotal - 1;
max_lncnt = mode->vtotal - 1;
havon_begin = mode->htotal - mode->hsync_start;
havon_end = havon_begin + mode->hdisplay - 1;
vavon_bline = mode->vtotal - mode->vsync_start;
vavon_eline = vavon_bline + mode->vdisplay - 1;
hso_begin = 0;
hso_end = mode->hsync_end - mode->hsync_start;
vso_begin = 0;
vso_end = 0;
vso_bline = 0;
vso_eline = mode->vsync_end - mode->vsync_start;
meson_vpp_setup_mux(priv, MESON_VIU_VPP_MUX_ENCL);
writel_relaxed(0, priv->io_base + _REG(ENCL_VIDEO_EN));
writel_relaxed(ENCL_PX_LN_CNT_SHADOW_EN, priv->io_base + _REG(ENCL_VIDEO_MODE));
writel_relaxed(ENCL_VIDEO_MODE_ADV_VFIFO_EN |
ENCL_VIDEO_MODE_ADV_GAIN_HDTV |
ENCL_SEL_GAMMA_RGB_IN, priv->io_base + _REG(ENCL_VIDEO_MODE_ADV));
writel_relaxed(ENCL_VIDEO_FILT_CTRL_BYPASS_FILTER,
priv->io_base + _REG(ENCL_VIDEO_FILT_CTRL));
writel_relaxed(max_pxcnt, priv->io_base + _REG(ENCL_VIDEO_MAX_PXCNT));
writel_relaxed(max_lncnt, priv->io_base + _REG(ENCL_VIDEO_MAX_LNCNT));
writel_relaxed(havon_begin, priv->io_base + _REG(ENCL_VIDEO_HAVON_BEGIN));
writel_relaxed(havon_end, priv->io_base + _REG(ENCL_VIDEO_HAVON_END));
writel_relaxed(vavon_bline, priv->io_base + _REG(ENCL_VIDEO_VAVON_BLINE));
writel_relaxed(vavon_eline, priv->io_base + _REG(ENCL_VIDEO_VAVON_ELINE));
writel_relaxed(hso_begin, priv->io_base + _REG(ENCL_VIDEO_HSO_BEGIN));
writel_relaxed(hso_end, priv->io_base + _REG(ENCL_VIDEO_HSO_END));
writel_relaxed(vso_begin, priv->io_base + _REG(ENCL_VIDEO_VSO_BEGIN));
writel_relaxed(vso_end, priv->io_base + _REG(ENCL_VIDEO_VSO_END));
writel_relaxed(vso_bline, priv->io_base + _REG(ENCL_VIDEO_VSO_BLINE));
writel_relaxed(vso_eline, priv->io_base + _REG(ENCL_VIDEO_VSO_ELINE));
writel_relaxed(ENCL_VIDEO_RGBIN_RGB | ENCL_VIDEO_RGBIN_ZBLK,
priv->io_base + _REG(ENCL_VIDEO_RGBIN_CTRL));
/* default black pattern */
writel_relaxed(0, priv->io_base + _REG(ENCL_TST_MDSEL));
writel_relaxed(0, priv->io_base + _REG(ENCL_TST_Y));
writel_relaxed(0, priv->io_base + _REG(ENCL_TST_CB));
writel_relaxed(0, priv->io_base + _REG(ENCL_TST_CR));
writel_relaxed(1, priv->io_base + _REG(ENCL_TST_EN));
writel_bits_relaxed(ENCL_VIDEO_MODE_ADV_VFIFO_EN, 0,
priv->io_base + _REG(ENCL_VIDEO_MODE_ADV));
writel_relaxed(1, priv->io_base + _REG(ENCL_VIDEO_EN));
writel_relaxed(0, priv->io_base + _REG(L_RGB_BASE_ADDR));
writel_relaxed(0x400, priv->io_base + _REG(L_RGB_COEFF_ADDR)); /* Magic value */
writel_relaxed(L_DITH_CNTL_DITH10_EN, priv->io_base + _REG(L_DITH_CNTL_ADDR));
/* DE signal for TTL */
writel_relaxed(havon_begin, priv->io_base + _REG(L_OEH_HS_ADDR));
writel_relaxed(havon_end + 1, priv->io_base + _REG(L_OEH_HE_ADDR));
writel_relaxed(vavon_bline, priv->io_base + _REG(L_OEH_VS_ADDR));
writel_relaxed(vavon_eline, priv->io_base + _REG(L_OEH_VE_ADDR));
/* DE signal for TTL */
writel_relaxed(havon_begin, priv->io_base + _REG(L_OEV1_HS_ADDR));
writel_relaxed(havon_end + 1, priv->io_base + _REG(L_OEV1_HE_ADDR));
writel_relaxed(vavon_bline, priv->io_base + _REG(L_OEV1_VS_ADDR));
writel_relaxed(vavon_eline, priv->io_base + _REG(L_OEV1_VE_ADDR));
/* Hsync signal for TTL */
if (mode->flags & DRM_MODE_FLAG_PHSYNC) {
writel_relaxed(hso_begin, priv->io_base + _REG(L_STH1_HS_ADDR));
writel_relaxed(hso_end, priv->io_base + _REG(L_STH1_HE_ADDR));
} else {
writel_relaxed(hso_end, priv->io_base + _REG(L_STH1_HS_ADDR));
writel_relaxed(hso_begin, priv->io_base + _REG(L_STH1_HE_ADDR));
}
writel_relaxed(0, priv->io_base + _REG(L_STH1_VS_ADDR));
writel_relaxed(max_lncnt, priv->io_base + _REG(L_STH1_VE_ADDR));
/* Vsync signal for TTL */
writel_relaxed(vso_begin, priv->io_base + _REG(L_STV1_HS_ADDR));
writel_relaxed(vso_end, priv->io_base + _REG(L_STV1_HE_ADDR));
if (mode->flags & DRM_MODE_FLAG_PVSYNC) {
writel_relaxed(vso_bline, priv->io_base + _REG(L_STV1_VS_ADDR));
writel_relaxed(vso_eline, priv->io_base + _REG(L_STV1_VE_ADDR));
} else {
writel_relaxed(vso_eline, priv->io_base + _REG(L_STV1_VS_ADDR));
writel_relaxed(vso_bline, priv->io_base + _REG(L_STV1_VE_ADDR));
}
/* DE signal */
writel_relaxed(havon_begin, priv->io_base + _REG(L_DE_HS_ADDR));
writel_relaxed(havon_end + 1, priv->io_base + _REG(L_DE_HE_ADDR));
writel_relaxed(vavon_bline, priv->io_base + _REG(L_DE_VS_ADDR));
writel_relaxed(vavon_eline, priv->io_base + _REG(L_DE_VE_ADDR));
/* Hsync signal */
writel_relaxed(hso_begin, priv->io_base + _REG(L_HSYNC_HS_ADDR));
writel_relaxed(hso_end, priv->io_base + _REG(L_HSYNC_HE_ADDR));
writel_relaxed(0, priv->io_base + _REG(L_HSYNC_VS_ADDR));
writel_relaxed(max_lncnt, priv->io_base + _REG(L_HSYNC_VE_ADDR));
/* Vsync signal */
writel_relaxed(vso_begin, priv->io_base + _REG(L_VSYNC_HS_ADDR));
writel_relaxed(vso_end, priv->io_base + _REG(L_VSYNC_HE_ADDR));
writel_relaxed(vso_bline, priv->io_base + _REG(L_VSYNC_VS_ADDR));
writel_relaxed(vso_eline, priv->io_base + _REG(L_VSYNC_VE_ADDR));
writel_relaxed(0, priv->io_base + _REG(L_INV_CNT_ADDR));
writel_relaxed(L_TCON_MISC_SEL_STV1 | L_TCON_MISC_SEL_STV2,
priv->io_base + _REG(L_TCON_MISC_SEL_ADDR));
priv->venc.current_mode = MESON_VENC_MODE_MIPI_DSI;
}
EXPORT_SYMBOL_GPL(meson_venc_mipi_dsi_mode_set);
void meson_venci_cvbs_mode_set(struct meson_drm *priv,
struct meson_cvbs_enci_mode *mode)
{
u32 reg;
if (mode->mode_tag == priv->venc.current_mode)
return;
/* CVBS Filter settings */
writel_relaxed(ENCI_CFILT_CMPT_SEL_HIGH | 0x10,
priv->io_base + _REG(ENCI_CFILT_CTRL));
writel_relaxed(ENCI_CFILT_CMPT_CR_DLY(2) |
ENCI_CFILT_CMPT_CB_DLY(1),
priv->io_base + _REG(ENCI_CFILT_CTRL2));
/* Digital Video Select : Interlace, clk27 clk, external */
writel_relaxed(0, priv->io_base + _REG(VENC_DVI_SETTING));
/* Reset Video Mode */
writel_relaxed(0, priv->io_base + _REG(ENCI_VIDEO_MODE));
writel_relaxed(0, priv->io_base + _REG(ENCI_VIDEO_MODE_ADV));
/* Horizontal sync signal output */
writel_relaxed(mode->hso_begin,
priv->io_base + _REG(ENCI_SYNC_HSO_BEGIN));
writel_relaxed(mode->hso_end,
priv->io_base + _REG(ENCI_SYNC_HSO_END));
/* Vertical Sync lines */
writel_relaxed(mode->vso_even,
priv->io_base + _REG(ENCI_SYNC_VSO_EVNLN));
writel_relaxed(mode->vso_odd,
priv->io_base + _REG(ENCI_SYNC_VSO_ODDLN));
/* Macrovision max amplitude change */
writel_relaxed(ENCI_MACV_MAX_AMP_ENABLE_CHANGE |
ENCI_MACV_MAX_AMP_VAL(mode->macv_max_amp),
priv->io_base + _REG(ENCI_MACV_MAX_AMP));
/* Video mode */
writel_relaxed(mode->video_prog_mode,
priv->io_base + _REG(VENC_VIDEO_PROG_MODE));
writel_relaxed(mode->video_mode,
priv->io_base + _REG(ENCI_VIDEO_MODE));
/*
* Advanced Video Mode :
* Demux shifting 0x2
* Blank line end at line17/22
* High bandwidth Luma Filter
* Low bandwidth Chroma Filter
* Bypass luma low pass filter
* No macrovision on CSYNC
*/
writel_relaxed(ENCI_VIDEO_MODE_ADV_DMXMD(2) |
ENCI_VIDEO_MODE_ADV_VBICTL_LINE_17_22 |
ENCI_VIDEO_MODE_ADV_YBW_HIGH,
priv->io_base + _REG(ENCI_VIDEO_MODE_ADV));
writel(mode->sch_adjust, priv->io_base + _REG(ENCI_VIDEO_SCH));
/* Sync mode : MASTER Master mode, free run, send HSO/VSO out */
writel_relaxed(0x07, priv->io_base + _REG(ENCI_SYNC_MODE));
/* 0x3 Y, C, and Component Y delay */
writel_relaxed(mode->yc_delay, priv->io_base + _REG(ENCI_YC_DELAY));
/* Timings */
writel_relaxed(mode->pixel_start,
priv->io_base + _REG(ENCI_VFIFO2VD_PIXEL_START));
writel_relaxed(mode->pixel_end,
priv->io_base + _REG(ENCI_VFIFO2VD_PIXEL_END));
writel_relaxed(mode->top_field_line_start,
priv->io_base + _REG(ENCI_VFIFO2VD_LINE_TOP_START));
writel_relaxed(mode->top_field_line_end,
priv->io_base + _REG(ENCI_VFIFO2VD_LINE_TOP_END));
writel_relaxed(mode->bottom_field_line_start,
priv->io_base + _REG(ENCI_VFIFO2VD_LINE_BOT_START));
writel_relaxed(mode->bottom_field_line_end,
priv->io_base + _REG(ENCI_VFIFO2VD_LINE_BOT_END));
/* Internal Venc, Internal VIU Sync, Internal Vencoder */
writel_relaxed(0, priv->io_base + _REG(VENC_SYNC_ROUTE));
/* UNreset Interlaced TV Encoder */
writel_relaxed(0, priv->io_base + _REG(ENCI_DBG_PX_RST));
/*
* Enable Vfifo2vd and set Y_Cb_Y_Cr:
* Corresponding value:
* Y => 00 or 10
* Cb => 01
* Cr => 11
* Ex: 0x4e => 01001110 would mean Cb/Y/Cr/Y
*/
writel_relaxed(ENCI_VFIFO2VD_CTL_ENABLE |
ENCI_VFIFO2VD_CTL_VD_SEL(0x4e),
priv->io_base + _REG(ENCI_VFIFO2VD_CTL));
/* Power UP Dacs */
writel_relaxed(0, priv->io_base + _REG(VENC_VDAC_SETTING));
/* Video Upsampling */
/*
* CTRL0, CTRL1 and CTRL2:
* Filter0: input data sample every 2 cloks
* Filter1: filtering and upsample enable
*/
reg = VENC_UPSAMPLE_CTRL_F0_2_CLK_RATIO | VENC_UPSAMPLE_CTRL_F1_EN |
VENC_UPSAMPLE_CTRL_F1_UPSAMPLE_EN;
/*
* Upsample CTRL0:
* Interlace High Bandwidth Luma
*/
writel_relaxed(VENC_UPSAMPLE_CTRL_INTERLACE_HIGH_LUMA | reg,
priv->io_base + _REG(VENC_UPSAMPLE_CTRL0));
/*
* Upsample CTRL1:
* Interlace Pb
*/
writel_relaxed(VENC_UPSAMPLE_CTRL_INTERLACE_PB | reg,
priv->io_base + _REG(VENC_UPSAMPLE_CTRL1));
/*
* Upsample CTRL2:
* Interlace R
*/
writel_relaxed(VENC_UPSAMPLE_CTRL_INTERLACE_PR | reg,
priv->io_base + _REG(VENC_UPSAMPLE_CTRL2));
/* Select Interlace Y DACs */
writel_relaxed(0, priv->io_base + _REG(VENC_VDAC_DACSEL0));
writel_relaxed(0, priv->io_base + _REG(VENC_VDAC_DACSEL1));
writel_relaxed(0, priv->io_base + _REG(VENC_VDAC_DACSEL2));
writel_relaxed(0, priv->io_base + _REG(VENC_VDAC_DACSEL3));
writel_relaxed(0, priv->io_base + _REG(VENC_VDAC_DACSEL4));
writel_relaxed(0, priv->io_base + _REG(VENC_VDAC_DACSEL5));
/* Select ENCI for VIU */
meson_vpp_setup_mux(priv, MESON_VIU_VPP_MUX_ENCI);
/* Enable ENCI FIFO */
writel_relaxed(VENC_VDAC_FIFO_EN_ENCI_ENABLE,
priv->io_base + _REG(VENC_VDAC_FIFO_CTRL));
/* Select ENCI DACs 0, 1, 4, and 5 */
writel_relaxed(0x11, priv->io_base + _REG(ENCI_DACSEL_0));
writel_relaxed(0x11, priv->io_base + _REG(ENCI_DACSEL_1));
/* Interlace video enable */
writel_relaxed(ENCI_VIDEO_EN_ENABLE,
priv->io_base + _REG(ENCI_VIDEO_EN));
/* Configure Video Saturation / Contrast / Brightness / Hue */
writel_relaxed(mode->video_saturation,
priv->io_base + _REG(ENCI_VIDEO_SAT));
writel_relaxed(mode->video_contrast,
priv->io_base + _REG(ENCI_VIDEO_CONT));
writel_relaxed(mode->video_brightness,
priv->io_base + _REG(ENCI_VIDEO_BRIGHT));
writel_relaxed(mode->video_hue,
priv->io_base + _REG(ENCI_VIDEO_HUE));
/* Enable DAC0 Filter */
writel_relaxed(VENC_VDAC_DAC0_FILT_CTRL0_EN,
priv->io_base + _REG(VENC_VDAC_DAC0_FILT_CTRL0));
writel_relaxed(0xfc48, priv->io_base + _REG(VENC_VDAC_DAC0_FILT_CTRL1));
/* 0 in Macrovision register 0 */
writel_relaxed(0, priv->io_base + _REG(ENCI_MACV_N0));
/* Analog Synchronization and color burst value adjust */
writel_relaxed(mode->analog_sync_adj,
priv->io_base + _REG(ENCI_SYNC_ADJ));
priv->venc.current_mode = mode->mode_tag;
}
/* Returns the current ENCI field polarity */
unsigned int meson_venci_get_field(struct meson_drm *priv)
{
return readl_relaxed(priv->io_base + _REG(ENCI_INFO_READ)) & BIT(29);
}
void meson_venc_enable_vsync(struct meson_drm *priv)
{
switch (priv->venc.current_mode) {
case MESON_VENC_MODE_MIPI_DSI:
writel_relaxed(VENC_INTCTRL_ENCP_LNRST_INT_EN,
priv->io_base + _REG(VENC_INTCTRL));
break;
default:
writel_relaxed(VENC_INTCTRL_ENCI_LNRST_INT_EN,
priv->io_base + _REG(VENC_INTCTRL));
}
regmap_update_bits(priv->hhi, HHI_GCLK_MPEG2, BIT(25), BIT(25));
}
void meson_venc_disable_vsync(struct meson_drm *priv)
{
regmap_update_bits(priv->hhi, HHI_GCLK_MPEG2, BIT(25), 0);
writel_relaxed(0, priv->io_base + _REG(VENC_INTCTRL));
}
void meson_venc_init(struct meson_drm *priv)
{
/* Disable CVBS VDAC */
if (meson_vpu_is_compatible(priv, VPU_COMPATIBLE_G12A)) {
regmap_write(priv->hhi, HHI_VDAC_CNTL0_G12A, 0);
regmap_write(priv->hhi, HHI_VDAC_CNTL1_G12A, 8);
} else {
regmap_write(priv->hhi, HHI_VDAC_CNTL0, 0);
regmap_write(priv->hhi, HHI_VDAC_CNTL1, 8);
}
/* Power Down Dacs */
writel_relaxed(0xff, priv->io_base + _REG(VENC_VDAC_SETTING));
/* Disable HDMI PHY */
regmap_write(priv->hhi, HHI_HDMI_PHY_CNTL0, 0);
/* Disable HDMI */
writel_bits_relaxed(VPU_HDMI_ENCI_DATA_TO_HDMI |
VPU_HDMI_ENCP_DATA_TO_HDMI, 0,
priv->io_base + _REG(VPU_HDMI_SETTING));
/* Disable all encoders */
writel_relaxed(0, priv->io_base + _REG(ENCI_VIDEO_EN));
writel_relaxed(0, priv->io_base + _REG(ENCP_VIDEO_EN));
writel_relaxed(0, priv->io_base + _REG(ENCL_VIDEO_EN));
/* Disable VSync IRQ */
meson_venc_disable_vsync(priv);
priv->venc.current_mode = MESON_VENC_MODE_NONE;
}
| linux-master | drivers/gpu/drm/meson/meson_venc.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Copyright (C) 2016 BayLibre, SAS
* Author: Neil Armstrong <[email protected]>
* Copyright (C) 2015 Amlogic, Inc. All rights reserved.
* Copyright (C) 2014 Endless Mobile
*
* Written by:
* Jasper St. Pierre <[email protected]>
*/
#include <linux/bitfield.h>
#include <linux/soc/amlogic/meson-canvas.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_device.h>
#include <drm/drm_print.h>
#include <drm/drm_probe_helper.h>
#include <drm/drm_vblank.h>
#include "meson_crtc.h"
#include "meson_plane.h"
#include "meson_registers.h"
#include "meson_venc.h"
#include "meson_viu.h"
#include "meson_rdma.h"
#include "meson_vpp.h"
#include "meson_osd_afbcd.h"
#define MESON_G12A_VIU_OFFSET 0x5ec0
/* CRTC definition */
struct meson_crtc {
struct drm_crtc base;
struct drm_pending_vblank_event *event;
struct meson_drm *priv;
void (*enable_osd1)(struct meson_drm *priv);
void (*enable_vd1)(struct meson_drm *priv);
void (*enable_osd1_afbc)(struct meson_drm *priv);
void (*disable_osd1_afbc)(struct meson_drm *priv);
unsigned int viu_offset;
bool vsync_forced;
bool vsync_disabled;
};
#define to_meson_crtc(x) container_of(x, struct meson_crtc, base)
/* CRTC */
static int meson_crtc_enable_vblank(struct drm_crtc *crtc)
{
struct meson_crtc *meson_crtc = to_meson_crtc(crtc);
struct meson_drm *priv = meson_crtc->priv;
meson_crtc->vsync_disabled = false;
meson_venc_enable_vsync(priv);
return 0;
}
static void meson_crtc_disable_vblank(struct drm_crtc *crtc)
{
struct meson_crtc *meson_crtc = to_meson_crtc(crtc);
struct meson_drm *priv = meson_crtc->priv;
if (!meson_crtc->vsync_forced) {
meson_crtc->vsync_disabled = true;
meson_venc_disable_vsync(priv);
}
}
static const struct drm_crtc_funcs meson_crtc_funcs = {
.atomic_destroy_state = drm_atomic_helper_crtc_destroy_state,
.atomic_duplicate_state = drm_atomic_helper_crtc_duplicate_state,
.destroy = drm_crtc_cleanup,
.page_flip = drm_atomic_helper_page_flip,
.reset = drm_atomic_helper_crtc_reset,
.set_config = drm_atomic_helper_set_config,
.enable_vblank = meson_crtc_enable_vblank,
.disable_vblank = meson_crtc_disable_vblank,
};
static void meson_g12a_crtc_atomic_enable(struct drm_crtc *crtc,
struct drm_atomic_state *state)
{
struct meson_crtc *meson_crtc = to_meson_crtc(crtc);
struct drm_crtc_state *crtc_state = crtc->state;
struct meson_drm *priv = meson_crtc->priv;
DRM_DEBUG_DRIVER("\n");
if (!crtc_state) {
DRM_ERROR("Invalid crtc_state\n");
return;
}
/* VD1 Preblend vertical start/end */
writel(FIELD_PREP(GENMASK(11, 0), 2303),
priv->io_base + _REG(VPP_PREBLEND_VD1_V_START_END));
/* Setup Blender */
writel(crtc_state->mode.hdisplay |
crtc_state->mode.vdisplay << 16,
priv->io_base + _REG(VPP_POSTBLEND_H_SIZE));
writel_relaxed(0 << 16 |
(crtc_state->mode.hdisplay - 1),
priv->io_base + _REG(VPP_OSD1_BLD_H_SCOPE));
writel_relaxed(0 << 16 |
(crtc_state->mode.vdisplay - 1),
priv->io_base + _REG(VPP_OSD1_BLD_V_SCOPE));
writel_relaxed(crtc_state->mode.hdisplay << 16 |
crtc_state->mode.vdisplay,
priv->io_base + _REG(VPP_OUT_H_V_SIZE));
drm_crtc_vblank_on(crtc);
}
static void meson_crtc_atomic_enable(struct drm_crtc *crtc,
struct drm_atomic_state *state)
{
struct meson_crtc *meson_crtc = to_meson_crtc(crtc);
struct drm_crtc_state *crtc_state = crtc->state;
struct meson_drm *priv = meson_crtc->priv;
DRM_DEBUG_DRIVER("\n");
if (!crtc_state) {
DRM_ERROR("Invalid crtc_state\n");
return;
}
/* Enable VPP Postblend */
writel(crtc_state->mode.hdisplay,
priv->io_base + _REG(VPP_POSTBLEND_H_SIZE));
/* VD1 Preblend vertical start/end */
writel(FIELD_PREP(GENMASK(11, 0), 2303),
priv->io_base + _REG(VPP_PREBLEND_VD1_V_START_END));
writel_bits_relaxed(VPP_POSTBLEND_ENABLE, VPP_POSTBLEND_ENABLE,
priv->io_base + _REG(VPP_MISC));
drm_crtc_vblank_on(crtc);
}
static void meson_g12a_crtc_atomic_disable(struct drm_crtc *crtc,
struct drm_atomic_state *state)
{
struct meson_crtc *meson_crtc = to_meson_crtc(crtc);
struct meson_drm *priv = meson_crtc->priv;
DRM_DEBUG_DRIVER("\n");
drm_crtc_vblank_off(crtc);
priv->viu.osd1_enabled = false;
priv->viu.osd1_commit = false;
priv->viu.vd1_enabled = false;
priv->viu.vd1_commit = false;
if (crtc->state->event && !crtc->state->active) {
spin_lock_irq(&crtc->dev->event_lock);
drm_crtc_send_vblank_event(crtc, crtc->state->event);
spin_unlock_irq(&crtc->dev->event_lock);
crtc->state->event = NULL;
}
}
static void meson_crtc_atomic_disable(struct drm_crtc *crtc,
struct drm_atomic_state *state)
{
struct meson_crtc *meson_crtc = to_meson_crtc(crtc);
struct meson_drm *priv = meson_crtc->priv;
DRM_DEBUG_DRIVER("\n");
drm_crtc_vblank_off(crtc);
priv->viu.osd1_enabled = false;
priv->viu.osd1_commit = false;
priv->viu.vd1_enabled = false;
priv->viu.vd1_commit = false;
/* Disable VPP Postblend */
writel_bits_relaxed(VPP_OSD1_POSTBLEND | VPP_VD1_POSTBLEND |
VPP_VD1_PREBLEND | VPP_POSTBLEND_ENABLE, 0,
priv->io_base + _REG(VPP_MISC));
if (crtc->state->event && !crtc->state->active) {
spin_lock_irq(&crtc->dev->event_lock);
drm_crtc_send_vblank_event(crtc, crtc->state->event);
spin_unlock_irq(&crtc->dev->event_lock);
crtc->state->event = NULL;
}
}
static void meson_crtc_atomic_begin(struct drm_crtc *crtc,
struct drm_atomic_state *state)
{
struct meson_crtc *meson_crtc = to_meson_crtc(crtc);
unsigned long flags;
if (crtc->state->event) {
WARN_ON(drm_crtc_vblank_get(crtc) != 0);
spin_lock_irqsave(&crtc->dev->event_lock, flags);
meson_crtc->event = crtc->state->event;
spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
crtc->state->event = NULL;
}
}
static void meson_crtc_atomic_flush(struct drm_crtc *crtc,
struct drm_atomic_state *state)
{
struct meson_crtc *meson_crtc = to_meson_crtc(crtc);
struct meson_drm *priv = meson_crtc->priv;
priv->viu.osd1_commit = true;
priv->viu.vd1_commit = true;
}
static const struct drm_crtc_helper_funcs meson_crtc_helper_funcs = {
.atomic_begin = meson_crtc_atomic_begin,
.atomic_flush = meson_crtc_atomic_flush,
.atomic_enable = meson_crtc_atomic_enable,
.atomic_disable = meson_crtc_atomic_disable,
};
static const struct drm_crtc_helper_funcs meson_g12a_crtc_helper_funcs = {
.atomic_begin = meson_crtc_atomic_begin,
.atomic_flush = meson_crtc_atomic_flush,
.atomic_enable = meson_g12a_crtc_atomic_enable,
.atomic_disable = meson_g12a_crtc_atomic_disable,
};
static void meson_crtc_enable_osd1(struct meson_drm *priv)
{
writel_bits_relaxed(VPP_OSD1_POSTBLEND, VPP_OSD1_POSTBLEND,
priv->io_base + _REG(VPP_MISC));
}
static void meson_crtc_g12a_enable_osd1_afbc(struct meson_drm *priv)
{
writel_relaxed(priv->viu.osd1_blk2_cfg4,
priv->io_base + _REG(VIU_OSD1_BLK2_CFG_W4));
writel_bits_relaxed(OSD_MEM_LINEAR_ADDR, OSD_MEM_LINEAR_ADDR,
priv->io_base + _REG(VIU_OSD1_CTRL_STAT));
writel_relaxed(priv->viu.osd1_blk1_cfg4,
priv->io_base + _REG(VIU_OSD1_BLK1_CFG_W4));
meson_viu_g12a_enable_osd1_afbc(priv);
writel_bits_relaxed(OSD_MEM_LINEAR_ADDR, OSD_MEM_LINEAR_ADDR,
priv->io_base + _REG(VIU_OSD1_CTRL_STAT));
writel_bits_relaxed(OSD_MALI_SRC_EN, OSD_MALI_SRC_EN,
priv->io_base + _REG(VIU_OSD1_BLK0_CFG_W0));
}
static void meson_g12a_crtc_enable_osd1(struct meson_drm *priv)
{
writel_relaxed(priv->viu.osd_blend_din0_scope_h,
priv->io_base +
_REG(VIU_OSD_BLEND_DIN0_SCOPE_H));
writel_relaxed(priv->viu.osd_blend_din0_scope_v,
priv->io_base +
_REG(VIU_OSD_BLEND_DIN0_SCOPE_V));
writel_relaxed(priv->viu.osb_blend0_size,
priv->io_base +
_REG(VIU_OSD_BLEND_BLEND0_SIZE));
writel_relaxed(priv->viu.osb_blend1_size,
priv->io_base +
_REG(VIU_OSD_BLEND_BLEND1_SIZE));
writel_bits_relaxed(3 << 8, 3 << 8,
priv->io_base + _REG(OSD1_BLEND_SRC_CTRL));
}
static void meson_crtc_enable_vd1(struct meson_drm *priv)
{
writel_bits_relaxed(VPP_VD1_PREBLEND | VPP_VD1_POSTBLEND |
VPP_COLOR_MNG_ENABLE,
VPP_VD1_PREBLEND | VPP_VD1_POSTBLEND |
VPP_COLOR_MNG_ENABLE,
priv->io_base + _REG(VPP_MISC));
writel_bits_relaxed(VIU_CTRL0_AFBC_TO_VD1,
priv->viu.vd1_afbc ? VIU_CTRL0_AFBC_TO_VD1 : 0,
priv->io_base + _REG(VIU_MISC_CTRL0));
}
static void meson_g12a_crtc_enable_vd1(struct meson_drm *priv)
{
writel_relaxed(VD_BLEND_PREBLD_SRC_VD1 |
VD_BLEND_PREBLD_PREMULT_EN |
VD_BLEND_POSTBLD_SRC_VD1 |
VD_BLEND_POSTBLD_PREMULT_EN,
priv->io_base + _REG(VD1_BLEND_SRC_CTRL));
writel_relaxed(priv->viu.vd1_afbc ?
(VD1_AXI_SEL_AFBC | AFBC_VD1_SEL) : 0,
priv->io_base + _REG(VD1_AFBCD0_MISC_CTRL));
}
void meson_crtc_irq(struct meson_drm *priv)
{
struct meson_crtc *meson_crtc = to_meson_crtc(priv->crtc);
unsigned long flags;
/* Update the OSD registers */
if (priv->viu.osd1_enabled && priv->viu.osd1_commit) {
writel_relaxed(priv->viu.osd1_ctrl_stat,
priv->io_base + _REG(VIU_OSD1_CTRL_STAT));
writel_relaxed(priv->viu.osd1_ctrl_stat2,
priv->io_base + _REG(VIU_OSD1_CTRL_STAT2));
writel_relaxed(priv->viu.osd1_blk0_cfg[0],
priv->io_base + _REG(VIU_OSD1_BLK0_CFG_W0));
writel_relaxed(priv->viu.osd1_blk0_cfg[1],
priv->io_base + _REG(VIU_OSD1_BLK0_CFG_W1));
writel_relaxed(priv->viu.osd1_blk0_cfg[2],
priv->io_base + _REG(VIU_OSD1_BLK0_CFG_W2));
writel_relaxed(priv->viu.osd1_blk0_cfg[3],
priv->io_base + _REG(VIU_OSD1_BLK0_CFG_W3));
writel_relaxed(priv->viu.osd1_blk0_cfg[4],
priv->io_base + _REG(VIU_OSD1_BLK0_CFG_W4));
if (priv->viu.osd1_afbcd) {
if (meson_crtc->enable_osd1_afbc)
meson_crtc->enable_osd1_afbc(priv);
} else {
if (meson_crtc->disable_osd1_afbc)
meson_crtc->disable_osd1_afbc(priv);
if (priv->afbcd.ops) {
priv->afbcd.ops->reset(priv);
priv->afbcd.ops->disable(priv);
}
meson_crtc->vsync_forced = false;
}
writel_relaxed(priv->viu.osd_sc_ctrl0,
priv->io_base + _REG(VPP_OSD_SC_CTRL0));
writel_relaxed(priv->viu.osd_sc_i_wh_m1,
priv->io_base + _REG(VPP_OSD_SCI_WH_M1));
writel_relaxed(priv->viu.osd_sc_o_h_start_end,
priv->io_base + _REG(VPP_OSD_SCO_H_START_END));
writel_relaxed(priv->viu.osd_sc_o_v_start_end,
priv->io_base + _REG(VPP_OSD_SCO_V_START_END));
writel_relaxed(priv->viu.osd_sc_v_ini_phase,
priv->io_base + _REG(VPP_OSD_VSC_INI_PHASE));
writel_relaxed(priv->viu.osd_sc_v_phase_step,
priv->io_base + _REG(VPP_OSD_VSC_PHASE_STEP));
writel_relaxed(priv->viu.osd_sc_h_ini_phase,
priv->io_base + _REG(VPP_OSD_HSC_INI_PHASE));
writel_relaxed(priv->viu.osd_sc_h_phase_step,
priv->io_base + _REG(VPP_OSD_HSC_PHASE_STEP));
writel_relaxed(priv->viu.osd_sc_h_ctrl0,
priv->io_base + _REG(VPP_OSD_HSC_CTRL0));
writel_relaxed(priv->viu.osd_sc_v_ctrl0,
priv->io_base + _REG(VPP_OSD_VSC_CTRL0));
if (!priv->viu.osd1_afbcd)
meson_canvas_config(priv->canvas, priv->canvas_id_osd1,
priv->viu.osd1_addr,
priv->viu.osd1_stride,
priv->viu.osd1_height,
MESON_CANVAS_WRAP_NONE,
MESON_CANVAS_BLKMODE_LINEAR, 0);
/* Enable OSD1 */
if (meson_crtc->enable_osd1)
meson_crtc->enable_osd1(priv);
if (priv->viu.osd1_afbcd) {
priv->afbcd.ops->reset(priv);
priv->afbcd.ops->setup(priv);
priv->afbcd.ops->enable(priv);
meson_crtc->vsync_forced = true;
}
priv->viu.osd1_commit = false;
}
/* Update the VD1 registers */
if (priv->viu.vd1_enabled && priv->viu.vd1_commit) {
if (priv->viu.vd1_afbc) {
writel_relaxed(priv->viu.vd1_afbc_head_addr,
priv->io_base +
_REG(AFBC_HEAD_BADDR));
writel_relaxed(priv->viu.vd1_afbc_body_addr,
priv->io_base +
_REG(AFBC_BODY_BADDR));
writel_relaxed(priv->viu.vd1_afbc_en,
priv->io_base +
_REG(AFBC_ENABLE));
writel_relaxed(priv->viu.vd1_afbc_mode,
priv->io_base +
_REG(AFBC_MODE));
writel_relaxed(priv->viu.vd1_afbc_size_in,
priv->io_base +
_REG(AFBC_SIZE_IN));
writel_relaxed(priv->viu.vd1_afbc_dec_def_color,
priv->io_base +
_REG(AFBC_DEC_DEF_COLOR));
writel_relaxed(priv->viu.vd1_afbc_conv_ctrl,
priv->io_base +
_REG(AFBC_CONV_CTRL));
writel_relaxed(priv->viu.vd1_afbc_size_out,
priv->io_base +
_REG(AFBC_SIZE_OUT));
writel_relaxed(priv->viu.vd1_afbc_vd_cfmt_ctrl,
priv->io_base +
_REG(AFBC_VD_CFMT_CTRL));
writel_relaxed(priv->viu.vd1_afbc_vd_cfmt_w,
priv->io_base +
_REG(AFBC_VD_CFMT_W));
writel_relaxed(priv->viu.vd1_afbc_mif_hor_scope,
priv->io_base +
_REG(AFBC_MIF_HOR_SCOPE));
writel_relaxed(priv->viu.vd1_afbc_mif_ver_scope,
priv->io_base +
_REG(AFBC_MIF_VER_SCOPE));
writel_relaxed(priv->viu.vd1_afbc_pixel_hor_scope,
priv->io_base+
_REG(AFBC_PIXEL_HOR_SCOPE));
writel_relaxed(priv->viu.vd1_afbc_pixel_ver_scope,
priv->io_base +
_REG(AFBC_PIXEL_VER_SCOPE));
writel_relaxed(priv->viu.vd1_afbc_vd_cfmt_h,
priv->io_base +
_REG(AFBC_VD_CFMT_H));
} else {
switch (priv->viu.vd1_planes) {
case 3:
meson_canvas_config(priv->canvas,
priv->canvas_id_vd1_2,
priv->viu.vd1_addr2,
priv->viu.vd1_stride2,
priv->viu.vd1_height2,
MESON_CANVAS_WRAP_NONE,
MESON_CANVAS_BLKMODE_LINEAR,
MESON_CANVAS_ENDIAN_SWAP64);
fallthrough;
case 2:
meson_canvas_config(priv->canvas,
priv->canvas_id_vd1_1,
priv->viu.vd1_addr1,
priv->viu.vd1_stride1,
priv->viu.vd1_height1,
MESON_CANVAS_WRAP_NONE,
MESON_CANVAS_BLKMODE_LINEAR,
MESON_CANVAS_ENDIAN_SWAP64);
fallthrough;
case 1:
meson_canvas_config(priv->canvas,
priv->canvas_id_vd1_0,
priv->viu.vd1_addr0,
priv->viu.vd1_stride0,
priv->viu.vd1_height0,
MESON_CANVAS_WRAP_NONE,
MESON_CANVAS_BLKMODE_LINEAR,
MESON_CANVAS_ENDIAN_SWAP64);
}
writel_relaxed(0, priv->io_base + _REG(AFBC_ENABLE));
}
writel_relaxed(priv->viu.vd1_if0_gen_reg,
priv->io_base + meson_crtc->viu_offset +
_REG(VD1_IF0_GEN_REG));
writel_relaxed(priv->viu.vd1_if0_gen_reg,
priv->io_base + meson_crtc->viu_offset +
_REG(VD2_IF0_GEN_REG));
writel_relaxed(priv->viu.vd1_if0_gen_reg2,
priv->io_base + meson_crtc->viu_offset +
_REG(VD1_IF0_GEN_REG2));
writel_relaxed(priv->viu.viu_vd1_fmt_ctrl,
priv->io_base + meson_crtc->viu_offset +
_REG(VIU_VD1_FMT_CTRL));
writel_relaxed(priv->viu.viu_vd1_fmt_ctrl,
priv->io_base + meson_crtc->viu_offset +
_REG(VIU_VD2_FMT_CTRL));
writel_relaxed(priv->viu.viu_vd1_fmt_w,
priv->io_base + meson_crtc->viu_offset +
_REG(VIU_VD1_FMT_W));
writel_relaxed(priv->viu.viu_vd1_fmt_w,
priv->io_base + meson_crtc->viu_offset +
_REG(VIU_VD2_FMT_W));
writel_relaxed(priv->viu.vd1_if0_canvas0,
priv->io_base + meson_crtc->viu_offset +
_REG(VD1_IF0_CANVAS0));
writel_relaxed(priv->viu.vd1_if0_canvas0,
priv->io_base + meson_crtc->viu_offset +
_REG(VD1_IF0_CANVAS1));
writel_relaxed(priv->viu.vd1_if0_canvas0,
priv->io_base + meson_crtc->viu_offset +
_REG(VD2_IF0_CANVAS0));
writel_relaxed(priv->viu.vd1_if0_canvas0,
priv->io_base + meson_crtc->viu_offset +
_REG(VD2_IF0_CANVAS1));
writel_relaxed(priv->viu.vd1_if0_luma_x0,
priv->io_base + meson_crtc->viu_offset +
_REG(VD1_IF0_LUMA_X0));
writel_relaxed(priv->viu.vd1_if0_luma_x0,
priv->io_base + meson_crtc->viu_offset +
_REG(VD1_IF0_LUMA_X1));
writel_relaxed(priv->viu.vd1_if0_luma_x0,
priv->io_base + meson_crtc->viu_offset +
_REG(VD2_IF0_LUMA_X0));
writel_relaxed(priv->viu.vd1_if0_luma_x0,
priv->io_base + meson_crtc->viu_offset +
_REG(VD2_IF0_LUMA_X1));
writel_relaxed(priv->viu.vd1_if0_luma_y0,
priv->io_base + meson_crtc->viu_offset +
_REG(VD1_IF0_LUMA_Y0));
writel_relaxed(priv->viu.vd1_if0_luma_y0,
priv->io_base + meson_crtc->viu_offset +
_REG(VD1_IF0_LUMA_Y1));
writel_relaxed(priv->viu.vd1_if0_luma_y0,
priv->io_base + meson_crtc->viu_offset +
_REG(VD2_IF0_LUMA_Y0));
writel_relaxed(priv->viu.vd1_if0_luma_y0,
priv->io_base + meson_crtc->viu_offset +
_REG(VD2_IF0_LUMA_Y1));
writel_relaxed(priv->viu.vd1_if0_chroma_x0,
priv->io_base + meson_crtc->viu_offset +
_REG(VD1_IF0_CHROMA_X0));
writel_relaxed(priv->viu.vd1_if0_chroma_x0,
priv->io_base + meson_crtc->viu_offset +
_REG(VD1_IF0_CHROMA_X1));
writel_relaxed(priv->viu.vd1_if0_chroma_x0,
priv->io_base + meson_crtc->viu_offset +
_REG(VD2_IF0_CHROMA_X0));
writel_relaxed(priv->viu.vd1_if0_chroma_x0,
priv->io_base + meson_crtc->viu_offset +
_REG(VD2_IF0_CHROMA_X1));
writel_relaxed(priv->viu.vd1_if0_chroma_y0,
priv->io_base + meson_crtc->viu_offset +
_REG(VD1_IF0_CHROMA_Y0));
writel_relaxed(priv->viu.vd1_if0_chroma_y0,
priv->io_base + meson_crtc->viu_offset +
_REG(VD1_IF0_CHROMA_Y1));
writel_relaxed(priv->viu.vd1_if0_chroma_y0,
priv->io_base + meson_crtc->viu_offset +
_REG(VD2_IF0_CHROMA_Y0));
writel_relaxed(priv->viu.vd1_if0_chroma_y0,
priv->io_base + meson_crtc->viu_offset +
_REG(VD2_IF0_CHROMA_Y1));
writel_relaxed(priv->viu.vd1_if0_repeat_loop,
priv->io_base + meson_crtc->viu_offset +
_REG(VD1_IF0_RPT_LOOP));
writel_relaxed(priv->viu.vd1_if0_repeat_loop,
priv->io_base + meson_crtc->viu_offset +
_REG(VD2_IF0_RPT_LOOP));
writel_relaxed(priv->viu.vd1_if0_luma0_rpt_pat,
priv->io_base + meson_crtc->viu_offset +
_REG(VD1_IF0_LUMA0_RPT_PAT));
writel_relaxed(priv->viu.vd1_if0_luma0_rpt_pat,
priv->io_base + meson_crtc->viu_offset +
_REG(VD2_IF0_LUMA0_RPT_PAT));
writel_relaxed(priv->viu.vd1_if0_luma0_rpt_pat,
priv->io_base + meson_crtc->viu_offset +
_REG(VD1_IF0_LUMA1_RPT_PAT));
writel_relaxed(priv->viu.vd1_if0_luma0_rpt_pat,
priv->io_base + meson_crtc->viu_offset +
_REG(VD2_IF0_LUMA1_RPT_PAT));
writel_relaxed(priv->viu.vd1_if0_chroma0_rpt_pat,
priv->io_base + meson_crtc->viu_offset +
_REG(VD1_IF0_CHROMA0_RPT_PAT));
writel_relaxed(priv->viu.vd1_if0_chroma0_rpt_pat,
priv->io_base + meson_crtc->viu_offset +
_REG(VD2_IF0_CHROMA0_RPT_PAT));
writel_relaxed(priv->viu.vd1_if0_chroma0_rpt_pat,
priv->io_base + meson_crtc->viu_offset +
_REG(VD1_IF0_CHROMA1_RPT_PAT));
writel_relaxed(priv->viu.vd1_if0_chroma0_rpt_pat,
priv->io_base + meson_crtc->viu_offset +
_REG(VD2_IF0_CHROMA1_RPT_PAT));
writel_relaxed(0, priv->io_base + meson_crtc->viu_offset +
_REG(VD1_IF0_LUMA_PSEL));
writel_relaxed(0, priv->io_base + meson_crtc->viu_offset +
_REG(VD1_IF0_CHROMA_PSEL));
writel_relaxed(0, priv->io_base + meson_crtc->viu_offset +
_REG(VD2_IF0_LUMA_PSEL));
writel_relaxed(0, priv->io_base + meson_crtc->viu_offset +
_REG(VD2_IF0_CHROMA_PSEL));
writel_relaxed(priv->viu.vd1_range_map_y,
priv->io_base + meson_crtc->viu_offset +
_REG(VD1_IF0_RANGE_MAP_Y));
writel_relaxed(priv->viu.vd1_range_map_cb,
priv->io_base + meson_crtc->viu_offset +
_REG(VD1_IF0_RANGE_MAP_CB));
writel_relaxed(priv->viu.vd1_range_map_cr,
priv->io_base + meson_crtc->viu_offset +
_REG(VD1_IF0_RANGE_MAP_CR));
writel_relaxed(VPP_VSC_BANK_LENGTH(4) |
VPP_HSC_BANK_LENGTH(4) |
VPP_SC_VD_EN_ENABLE |
VPP_SC_TOP_EN_ENABLE |
VPP_SC_HSC_EN_ENABLE |
VPP_SC_VSC_EN_ENABLE,
priv->io_base + _REG(VPP_SC_MISC));
writel_relaxed(priv->viu.vpp_pic_in_height,
priv->io_base + _REG(VPP_PIC_IN_HEIGHT));
writel_relaxed(priv->viu.vpp_postblend_vd1_h_start_end,
priv->io_base + _REG(VPP_POSTBLEND_VD1_H_START_END));
writel_relaxed(priv->viu.vpp_blend_vd2_h_start_end,
priv->io_base + _REG(VPP_BLEND_VD2_H_START_END));
writel_relaxed(priv->viu.vpp_postblend_vd1_v_start_end,
priv->io_base + _REG(VPP_POSTBLEND_VD1_V_START_END));
writel_relaxed(priv->viu.vpp_blend_vd2_v_start_end,
priv->io_base + _REG(VPP_BLEND_VD2_V_START_END));
writel_relaxed(priv->viu.vpp_hsc_region12_startp,
priv->io_base + _REG(VPP_HSC_REGION12_STARTP));
writel_relaxed(priv->viu.vpp_hsc_region34_startp,
priv->io_base + _REG(VPP_HSC_REGION34_STARTP));
writel_relaxed(priv->viu.vpp_hsc_region4_endp,
priv->io_base + _REG(VPP_HSC_REGION4_ENDP));
writel_relaxed(priv->viu.vpp_hsc_start_phase_step,
priv->io_base + _REG(VPP_HSC_START_PHASE_STEP));
writel_relaxed(priv->viu.vpp_hsc_region1_phase_slope,
priv->io_base + _REG(VPP_HSC_REGION1_PHASE_SLOPE));
writel_relaxed(priv->viu.vpp_hsc_region3_phase_slope,
priv->io_base + _REG(VPP_HSC_REGION3_PHASE_SLOPE));
writel_relaxed(priv->viu.vpp_line_in_length,
priv->io_base + _REG(VPP_LINE_IN_LENGTH));
writel_relaxed(priv->viu.vpp_preblend_h_size,
priv->io_base + _REG(VPP_PREBLEND_H_SIZE));
writel_relaxed(priv->viu.vpp_vsc_region12_startp,
priv->io_base + _REG(VPP_VSC_REGION12_STARTP));
writel_relaxed(priv->viu.vpp_vsc_region34_startp,
priv->io_base + _REG(VPP_VSC_REGION34_STARTP));
writel_relaxed(priv->viu.vpp_vsc_region4_endp,
priv->io_base + _REG(VPP_VSC_REGION4_ENDP));
writel_relaxed(priv->viu.vpp_vsc_start_phase_step,
priv->io_base + _REG(VPP_VSC_START_PHASE_STEP));
writel_relaxed(priv->viu.vpp_vsc_ini_phase,
priv->io_base + _REG(VPP_VSC_INI_PHASE));
writel_relaxed(priv->viu.vpp_vsc_phase_ctrl,
priv->io_base + _REG(VPP_VSC_PHASE_CTRL));
writel_relaxed(priv->viu.vpp_hsc_phase_ctrl,
priv->io_base + _REG(VPP_HSC_PHASE_CTRL));
writel_relaxed(0x42, priv->io_base + _REG(VPP_SCALE_COEF_IDX));
/* Enable VD1 */
if (meson_crtc->enable_vd1)
meson_crtc->enable_vd1(priv);
priv->viu.vd1_commit = false;
}
if (meson_crtc->vsync_disabled)
return;
drm_crtc_handle_vblank(priv->crtc);
spin_lock_irqsave(&priv->drm->event_lock, flags);
if (meson_crtc->event) {
drm_crtc_send_vblank_event(priv->crtc, meson_crtc->event);
drm_crtc_vblank_put(priv->crtc);
meson_crtc->event = NULL;
}
spin_unlock_irqrestore(&priv->drm->event_lock, flags);
}
int meson_crtc_create(struct meson_drm *priv)
{
struct meson_crtc *meson_crtc;
struct drm_crtc *crtc;
int ret;
meson_crtc = devm_kzalloc(priv->drm->dev, sizeof(*meson_crtc),
GFP_KERNEL);
if (!meson_crtc)
return -ENOMEM;
meson_crtc->priv = priv;
crtc = &meson_crtc->base;
ret = drm_crtc_init_with_planes(priv->drm, crtc,
priv->primary_plane, NULL,
&meson_crtc_funcs, "meson_crtc");
if (ret) {
dev_err(priv->drm->dev, "Failed to init CRTC\n");
return ret;
}
if (meson_vpu_is_compatible(priv, VPU_COMPATIBLE_G12A)) {
meson_crtc->enable_osd1 = meson_g12a_crtc_enable_osd1;
meson_crtc->enable_vd1 = meson_g12a_crtc_enable_vd1;
meson_crtc->viu_offset = MESON_G12A_VIU_OFFSET;
meson_crtc->enable_osd1_afbc =
meson_crtc_g12a_enable_osd1_afbc;
meson_crtc->disable_osd1_afbc =
meson_viu_g12a_disable_osd1_afbc;
drm_crtc_helper_add(crtc, &meson_g12a_crtc_helper_funcs);
} else {
meson_crtc->enable_osd1 = meson_crtc_enable_osd1;
meson_crtc->enable_vd1 = meson_crtc_enable_vd1;
if (meson_vpu_is_compatible(priv, VPU_COMPATIBLE_GXM)) {
meson_crtc->enable_osd1_afbc =
meson_viu_gxm_enable_osd1_afbc;
meson_crtc->disable_osd1_afbc =
meson_viu_gxm_disable_osd1_afbc;
}
drm_crtc_helper_add(crtc, &meson_crtc_helper_funcs);
}
priv->crtc = crtc;
return 0;
}
| linux-master | drivers/gpu/drm/meson/meson_crtc.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Copyright (C) 2021 BayLibre, SAS
* Author: Neil Armstrong <[email protected]>
* Copyright (C) 2015 Amlogic, Inc. All rights reserved.
*/
#include <linux/clk.h>
#include <linux/kernel.h>
#include <linux/mod_devicetable.h>
#include <linux/module.h>
#include <linux/of_graph.h>
#include <linux/platform_device.h>
#include <linux/reset.h>
#include <linux/phy/phy.h>
#include <linux/bitfield.h>
#include <video/mipi_display.h>
#include <drm/bridge/dw_mipi_dsi.h>
#include <drm/drm_mipi_dsi.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_device.h>
#include <drm/drm_probe_helper.h>
#include <drm/drm_print.h>
#include "meson_drv.h"
#include "meson_dw_mipi_dsi.h"
#include "meson_registers.h"
#include "meson_venc.h"
#define DRIVER_NAME "meson-dw-mipi-dsi"
#define DRIVER_DESC "Amlogic Meson MIPI-DSI DRM driver"
struct meson_dw_mipi_dsi {
struct meson_drm *priv;
struct device *dev;
void __iomem *base;
struct phy *phy;
union phy_configure_opts phy_opts;
struct dw_mipi_dsi *dmd;
struct dw_mipi_dsi_plat_data pdata;
struct mipi_dsi_device *dsi_device;
const struct drm_display_mode *mode;
struct clk *bit_clk;
struct clk *px_clk;
struct reset_control *top_rst;
};
#define encoder_to_meson_dw_mipi_dsi(x) \
container_of(x, struct meson_dw_mipi_dsi, encoder)
static void meson_dw_mipi_dsi_hw_init(struct meson_dw_mipi_dsi *mipi_dsi)
{
/* Software reset */
writel_bits_relaxed(MIPI_DSI_TOP_SW_RESET_DWC | MIPI_DSI_TOP_SW_RESET_INTR |
MIPI_DSI_TOP_SW_RESET_DPI | MIPI_DSI_TOP_SW_RESET_TIMING,
MIPI_DSI_TOP_SW_RESET_DWC | MIPI_DSI_TOP_SW_RESET_INTR |
MIPI_DSI_TOP_SW_RESET_DPI | MIPI_DSI_TOP_SW_RESET_TIMING,
mipi_dsi->base + MIPI_DSI_TOP_SW_RESET);
writel_bits_relaxed(MIPI_DSI_TOP_SW_RESET_DWC | MIPI_DSI_TOP_SW_RESET_INTR |
MIPI_DSI_TOP_SW_RESET_DPI | MIPI_DSI_TOP_SW_RESET_TIMING,
0, mipi_dsi->base + MIPI_DSI_TOP_SW_RESET);
/* Enable clocks */
writel_bits_relaxed(MIPI_DSI_TOP_CLK_SYSCLK_EN | MIPI_DSI_TOP_CLK_PIXCLK_EN,
MIPI_DSI_TOP_CLK_SYSCLK_EN | MIPI_DSI_TOP_CLK_PIXCLK_EN,
mipi_dsi->base + MIPI_DSI_TOP_CLK_CNTL);
/* Take memory out of power down */
writel_relaxed(0, mipi_dsi->base + MIPI_DSI_TOP_MEM_PD);
}
static int dw_mipi_dsi_phy_init(void *priv_data)
{
struct meson_dw_mipi_dsi *mipi_dsi = priv_data;
unsigned int dpi_data_format, venc_data_width;
int ret;
/* Set the bit clock rate to hs_clk_rate */
ret = clk_set_rate(mipi_dsi->bit_clk,
mipi_dsi->phy_opts.mipi_dphy.hs_clk_rate);
if (ret) {
dev_err(mipi_dsi->dev, "Failed to set DSI Bit clock rate %lu (ret %d)\n",
mipi_dsi->phy_opts.mipi_dphy.hs_clk_rate, ret);
return ret;
}
/* Make sure the rate of the bit clock is not modified by someone else */
ret = clk_rate_exclusive_get(mipi_dsi->bit_clk);
if (ret) {
dev_err(mipi_dsi->dev,
"Failed to set the exclusivity on the bit clock rate (ret %d)\n", ret);
return ret;
}
ret = clk_set_rate(mipi_dsi->px_clk, mipi_dsi->mode->clock * 1000);
if (ret) {
dev_err(mipi_dsi->dev, "Failed to set DSI Pixel clock rate %u (%d)\n",
mipi_dsi->mode->clock * 1000, ret);
return ret;
}
switch (mipi_dsi->dsi_device->format) {
case MIPI_DSI_FMT_RGB888:
dpi_data_format = DPI_COLOR_24BIT;
venc_data_width = VENC_IN_COLOR_24B;
break;
case MIPI_DSI_FMT_RGB666:
dpi_data_format = DPI_COLOR_18BIT_CFG_2;
venc_data_width = VENC_IN_COLOR_18B;
break;
case MIPI_DSI_FMT_RGB666_PACKED:
case MIPI_DSI_FMT_RGB565:
return -EINVAL;
}
/* Configure color format for DPI register */
writel_relaxed(FIELD_PREP(MIPI_DSI_TOP_DPI_COLOR_MODE, dpi_data_format) |
FIELD_PREP(MIPI_DSI_TOP_IN_COLOR_MODE, venc_data_width) |
FIELD_PREP(MIPI_DSI_TOP_COMP2_SEL, 2) |
FIELD_PREP(MIPI_DSI_TOP_COMP1_SEL, 1) |
FIELD_PREP(MIPI_DSI_TOP_COMP0_SEL, 0),
mipi_dsi->base + MIPI_DSI_TOP_CNTL);
return phy_configure(mipi_dsi->phy, &mipi_dsi->phy_opts);
}
static void dw_mipi_dsi_phy_power_on(void *priv_data)
{
struct meson_dw_mipi_dsi *mipi_dsi = priv_data;
if (phy_power_on(mipi_dsi->phy))
dev_warn(mipi_dsi->dev, "Failed to power on PHY\n");
}
static void dw_mipi_dsi_phy_power_off(void *priv_data)
{
struct meson_dw_mipi_dsi *mipi_dsi = priv_data;
if (phy_power_off(mipi_dsi->phy))
dev_warn(mipi_dsi->dev, "Failed to power off PHY\n");
/* Remove the exclusivity on the bit clock rate */
clk_rate_exclusive_put(mipi_dsi->bit_clk);
}
static int
dw_mipi_dsi_get_lane_mbps(void *priv_data, const struct drm_display_mode *mode,
unsigned long mode_flags, u32 lanes, u32 format,
unsigned int *lane_mbps)
{
struct meson_dw_mipi_dsi *mipi_dsi = priv_data;
int bpp;
mipi_dsi->mode = mode;
bpp = mipi_dsi_pixel_format_to_bpp(mipi_dsi->dsi_device->format);
phy_mipi_dphy_get_default_config(mode->clock * 1000,
bpp, mipi_dsi->dsi_device->lanes,
&mipi_dsi->phy_opts.mipi_dphy);
*lane_mbps = DIV_ROUND_UP(mipi_dsi->phy_opts.mipi_dphy.hs_clk_rate, USEC_PER_SEC);
return 0;
}
static int
dw_mipi_dsi_phy_get_timing(void *priv_data, unsigned int lane_mbps,
struct dw_mipi_dsi_dphy_timing *timing)
{
struct meson_dw_mipi_dsi *mipi_dsi = priv_data;
switch (mipi_dsi->mode->hdisplay) {
case 240:
case 768:
case 1920:
case 2560:
timing->clk_lp2hs = 23;
timing->clk_hs2lp = 38;
timing->data_lp2hs = 15;
timing->data_hs2lp = 9;
break;
default:
timing->clk_lp2hs = 37;
timing->clk_hs2lp = 135;
timing->data_lp2hs = 50;
timing->data_hs2lp = 3;
}
return 0;
}
static int
dw_mipi_dsi_get_esc_clk_rate(void *priv_data, unsigned int *esc_clk_rate)
{
*esc_clk_rate = 4; /* Mhz */
return 0;
}
static const struct dw_mipi_dsi_phy_ops meson_dw_mipi_dsi_phy_ops = {
.init = dw_mipi_dsi_phy_init,
.power_on = dw_mipi_dsi_phy_power_on,
.power_off = dw_mipi_dsi_phy_power_off,
.get_lane_mbps = dw_mipi_dsi_get_lane_mbps,
.get_timing = dw_mipi_dsi_phy_get_timing,
.get_esc_clk_rate = dw_mipi_dsi_get_esc_clk_rate,
};
static int meson_dw_mipi_dsi_host_attach(void *priv_data,
struct mipi_dsi_device *device)
{
struct meson_dw_mipi_dsi *mipi_dsi = priv_data;
int ret;
mipi_dsi->dsi_device = device;
switch (device->format) {
case MIPI_DSI_FMT_RGB888:
break;
case MIPI_DSI_FMT_RGB666:
break;
case MIPI_DSI_FMT_RGB666_PACKED:
case MIPI_DSI_FMT_RGB565:
dev_err(mipi_dsi->dev, "invalid pixel format %d\n", device->format);
return -EINVAL;
}
ret = phy_init(mipi_dsi->phy);
if (ret)
return ret;
meson_dw_mipi_dsi_hw_init(mipi_dsi);
return 0;
}
static int meson_dw_mipi_dsi_host_detach(void *priv_data,
struct mipi_dsi_device *device)
{
struct meson_dw_mipi_dsi *mipi_dsi = priv_data;
if (device == mipi_dsi->dsi_device)
mipi_dsi->dsi_device = NULL;
else
return -EINVAL;
return phy_exit(mipi_dsi->phy);
}
static const struct dw_mipi_dsi_host_ops meson_dw_mipi_dsi_host_ops = {
.attach = meson_dw_mipi_dsi_host_attach,
.detach = meson_dw_mipi_dsi_host_detach,
};
static int meson_dw_mipi_dsi_probe(struct platform_device *pdev)
{
struct meson_dw_mipi_dsi *mipi_dsi;
struct device *dev = &pdev->dev;
mipi_dsi = devm_kzalloc(dev, sizeof(*mipi_dsi), GFP_KERNEL);
if (!mipi_dsi)
return -ENOMEM;
mipi_dsi->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(mipi_dsi->base))
return PTR_ERR(mipi_dsi->base);
mipi_dsi->phy = devm_phy_get(dev, "dphy");
if (IS_ERR(mipi_dsi->phy))
return dev_err_probe(dev, PTR_ERR(mipi_dsi->phy),
"failed to get mipi dphy\n");
mipi_dsi->bit_clk = devm_clk_get_enabled(dev, "bit");
if (IS_ERR(mipi_dsi->bit_clk)) {
int ret = PTR_ERR(mipi_dsi->bit_clk);
/* TOFIX GP0 on some platforms fails to lock in early boot, defer probe */
if (ret == -EIO)
ret = -EPROBE_DEFER;
return dev_err_probe(dev, ret, "Unable to get enabled bit_clk\n");
}
mipi_dsi->px_clk = devm_clk_get_enabled(dev, "px");
if (IS_ERR(mipi_dsi->px_clk))
return dev_err_probe(dev, PTR_ERR(mipi_dsi->px_clk),
"Unable to get enabled px_clk\n");
/*
* We use a TOP reset signal because the APB reset signal
* is handled by the TOP control registers.
*/
mipi_dsi->top_rst = devm_reset_control_get_exclusive(dev, "top");
if (IS_ERR(mipi_dsi->top_rst))
return dev_err_probe(dev, PTR_ERR(mipi_dsi->top_rst),
"Unable to get reset control\n");
reset_control_assert(mipi_dsi->top_rst);
usleep_range(10, 20);
reset_control_deassert(mipi_dsi->top_rst);
/* MIPI DSI Controller */
mipi_dsi->dev = dev;
mipi_dsi->pdata.base = mipi_dsi->base;
mipi_dsi->pdata.max_data_lanes = 4;
mipi_dsi->pdata.phy_ops = &meson_dw_mipi_dsi_phy_ops;
mipi_dsi->pdata.host_ops = &meson_dw_mipi_dsi_host_ops;
mipi_dsi->pdata.priv_data = mipi_dsi;
platform_set_drvdata(pdev, mipi_dsi);
mipi_dsi->dmd = dw_mipi_dsi_probe(pdev, &mipi_dsi->pdata);
if (IS_ERR(mipi_dsi->dmd))
return dev_err_probe(dev, PTR_ERR(mipi_dsi->dmd),
"Failed to probe dw_mipi_dsi\n");
return 0;
}
static int meson_dw_mipi_dsi_remove(struct platform_device *pdev)
{
struct meson_dw_mipi_dsi *mipi_dsi = platform_get_drvdata(pdev);
dw_mipi_dsi_remove(mipi_dsi->dmd);
return 0;
}
static const struct of_device_id meson_dw_mipi_dsi_of_table[] = {
{ .compatible = "amlogic,meson-g12a-dw-mipi-dsi", },
{ }
};
MODULE_DEVICE_TABLE(of, meson_dw_mipi_dsi_of_table);
static struct platform_driver meson_dw_mipi_dsi_platform_driver = {
.probe = meson_dw_mipi_dsi_probe,
.remove = meson_dw_mipi_dsi_remove,
.driver = {
.name = DRIVER_NAME,
.of_match_table = meson_dw_mipi_dsi_of_table,
},
};
module_platform_driver(meson_dw_mipi_dsi_platform_driver);
MODULE_AUTHOR("Neil Armstrong <[email protected]>");
MODULE_DESCRIPTION(DRIVER_DESC);
MODULE_LICENSE("GPL");
| linux-master | drivers/gpu/drm/meson/meson_dw_mipi_dsi.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Copyright (C) 2016 BayLibre, SAS
* Author: Neil Armstrong <[email protected]>
* Copyright (C) 2015 Amlogic, Inc. All rights reserved.
*/
#include <linux/clk.h>
#include <linux/component.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_graph.h>
#include <linux/of_platform.h>
#include <linux/platform_device.h>
#include <linux/regulator/consumer.h>
#include <linux/reset.h>
#include <media/cec-notifier.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_bridge.h>
#include <drm/drm_bridge_connector.h>
#include <drm/drm_device.h>
#include <drm/drm_edid.h>
#include <drm/drm_probe_helper.h>
#include <drm/drm_simple_kms_helper.h>
#include <linux/media-bus-format.h>
#include <linux/videodev2.h>
#include "meson_drv.h"
#include "meson_registers.h"
#include "meson_vclk.h"
#include "meson_venc.h"
#include "meson_encoder_hdmi.h"
struct meson_encoder_hdmi {
struct drm_encoder encoder;
struct drm_bridge bridge;
struct drm_bridge *next_bridge;
struct drm_connector *connector;
struct meson_drm *priv;
unsigned long output_bus_fmt;
struct cec_notifier *cec_notifier;
};
#define bridge_to_meson_encoder_hdmi(x) \
container_of(x, struct meson_encoder_hdmi, bridge)
static int meson_encoder_hdmi_attach(struct drm_bridge *bridge,
enum drm_bridge_attach_flags flags)
{
struct meson_encoder_hdmi *encoder_hdmi = bridge_to_meson_encoder_hdmi(bridge);
return drm_bridge_attach(bridge->encoder, encoder_hdmi->next_bridge,
&encoder_hdmi->bridge, flags);
}
static void meson_encoder_hdmi_detach(struct drm_bridge *bridge)
{
struct meson_encoder_hdmi *encoder_hdmi = bridge_to_meson_encoder_hdmi(bridge);
cec_notifier_conn_unregister(encoder_hdmi->cec_notifier);
encoder_hdmi->cec_notifier = NULL;
}
static void meson_encoder_hdmi_set_vclk(struct meson_encoder_hdmi *encoder_hdmi,
const struct drm_display_mode *mode)
{
struct meson_drm *priv = encoder_hdmi->priv;
int vic = drm_match_cea_mode(mode);
unsigned int phy_freq;
unsigned int vclk_freq;
unsigned int venc_freq;
unsigned int hdmi_freq;
vclk_freq = mode->clock;
/* For 420, pixel clock is half unlike venc clock */
if (encoder_hdmi->output_bus_fmt == MEDIA_BUS_FMT_UYYVYY8_0_5X24)
vclk_freq /= 2;
/* TMDS clock is pixel_clock * 10 */
phy_freq = vclk_freq * 10;
if (!vic) {
meson_vclk_setup(priv, MESON_VCLK_TARGET_DMT, phy_freq,
vclk_freq, vclk_freq, vclk_freq, false);
return;
}
/* 480i/576i needs global pixel doubling */
if (mode->flags & DRM_MODE_FLAG_DBLCLK)
vclk_freq *= 2;
venc_freq = vclk_freq;
hdmi_freq = vclk_freq;
/* VENC double pixels for 1080i, 720p and YUV420 modes */
if (meson_venc_hdmi_venc_repeat(vic) ||
encoder_hdmi->output_bus_fmt == MEDIA_BUS_FMT_UYYVYY8_0_5X24)
venc_freq *= 2;
vclk_freq = max(venc_freq, hdmi_freq);
if (mode->flags & DRM_MODE_FLAG_DBLCLK)
venc_freq /= 2;
dev_dbg(priv->dev, "vclk:%d phy=%d venc=%d hdmi=%d enci=%d\n",
phy_freq, vclk_freq, venc_freq, hdmi_freq,
priv->venc.hdmi_use_enci);
meson_vclk_setup(priv, MESON_VCLK_TARGET_HDMI, phy_freq, vclk_freq,
venc_freq, hdmi_freq, priv->venc.hdmi_use_enci);
}
static enum drm_mode_status meson_encoder_hdmi_mode_valid(struct drm_bridge *bridge,
const struct drm_display_info *display_info,
const struct drm_display_mode *mode)
{
struct meson_encoder_hdmi *encoder_hdmi = bridge_to_meson_encoder_hdmi(bridge);
struct meson_drm *priv = encoder_hdmi->priv;
bool is_hdmi2_sink = display_info->hdmi.scdc.supported;
unsigned int phy_freq;
unsigned int vclk_freq;
unsigned int venc_freq;
unsigned int hdmi_freq;
int vic = drm_match_cea_mode(mode);
enum drm_mode_status status;
dev_dbg(priv->dev, "Modeline " DRM_MODE_FMT "\n", DRM_MODE_ARG(mode));
/* If sink does not support 540MHz, reject the non-420 HDMI2 modes */
if (display_info->max_tmds_clock &&
mode->clock > display_info->max_tmds_clock &&
!drm_mode_is_420_only(display_info, mode) &&
!drm_mode_is_420_also(display_info, mode))
return MODE_BAD;
/* Check against non-VIC supported modes */
if (!vic) {
status = meson_venc_hdmi_supported_mode(mode);
if (status != MODE_OK)
return status;
return meson_vclk_dmt_supported_freq(priv, mode->clock);
/* Check against supported VIC modes */
} else if (!meson_venc_hdmi_supported_vic(vic))
return MODE_BAD;
vclk_freq = mode->clock;
/* For 420, pixel clock is half unlike venc clock */
if (drm_mode_is_420_only(display_info, mode) ||
(!is_hdmi2_sink &&
drm_mode_is_420_also(display_info, mode)))
vclk_freq /= 2;
/* TMDS clock is pixel_clock * 10 */
phy_freq = vclk_freq * 10;
/* 480i/576i needs global pixel doubling */
if (mode->flags & DRM_MODE_FLAG_DBLCLK)
vclk_freq *= 2;
venc_freq = vclk_freq;
hdmi_freq = vclk_freq;
/* VENC double pixels for 1080i, 720p and YUV420 modes */
if (meson_venc_hdmi_venc_repeat(vic) ||
drm_mode_is_420_only(display_info, mode) ||
(!is_hdmi2_sink &&
drm_mode_is_420_also(display_info, mode)))
venc_freq *= 2;
vclk_freq = max(venc_freq, hdmi_freq);
if (mode->flags & DRM_MODE_FLAG_DBLCLK)
venc_freq /= 2;
dev_dbg(priv->dev, "%s: vclk:%d phy=%d venc=%d hdmi=%d\n",
__func__, phy_freq, vclk_freq, venc_freq, hdmi_freq);
return meson_vclk_vic_supported_freq(priv, phy_freq, vclk_freq);
}
static void meson_encoder_hdmi_atomic_enable(struct drm_bridge *bridge,
struct drm_bridge_state *bridge_state)
{
struct meson_encoder_hdmi *encoder_hdmi = bridge_to_meson_encoder_hdmi(bridge);
struct drm_atomic_state *state = bridge_state->base.state;
unsigned int ycrcb_map = VPU_HDMI_OUTPUT_CBYCR;
struct meson_drm *priv = encoder_hdmi->priv;
struct drm_connector_state *conn_state;
const struct drm_display_mode *mode;
struct drm_crtc_state *crtc_state;
struct drm_connector *connector;
bool yuv420_mode = false;
int vic;
connector = drm_atomic_get_new_connector_for_encoder(state, bridge->encoder);
if (WARN_ON(!connector))
return;
conn_state = drm_atomic_get_new_connector_state(state, connector);
if (WARN_ON(!conn_state))
return;
crtc_state = drm_atomic_get_new_crtc_state(state, conn_state->crtc);
if (WARN_ON(!crtc_state))
return;
mode = &crtc_state->adjusted_mode;
vic = drm_match_cea_mode(mode);
dev_dbg(priv->dev, "\"%s\" vic %d\n", mode->name, vic);
if (encoder_hdmi->output_bus_fmt == MEDIA_BUS_FMT_UYYVYY8_0_5X24) {
ycrcb_map = VPU_HDMI_OUTPUT_CRYCB;
yuv420_mode = true;
} else if (encoder_hdmi->output_bus_fmt == MEDIA_BUS_FMT_UYVY8_1X16)
ycrcb_map = VPU_HDMI_OUTPUT_CRYCB;
/* VENC + VENC-DVI Mode setup */
meson_venc_hdmi_mode_set(priv, vic, ycrcb_map, yuv420_mode, mode);
/* VCLK Set clock */
meson_encoder_hdmi_set_vclk(encoder_hdmi, mode);
if (encoder_hdmi->output_bus_fmt == MEDIA_BUS_FMT_UYYVYY8_0_5X24)
/* Setup YUV420 to HDMI-TX, no 10bit diphering */
writel_relaxed(2 | (2 << 2),
priv->io_base + _REG(VPU_HDMI_FMT_CTRL));
else if (encoder_hdmi->output_bus_fmt == MEDIA_BUS_FMT_UYVY8_1X16)
/* Setup YUV422 to HDMI-TX, no 10bit diphering */
writel_relaxed(1 | (2 << 2),
priv->io_base + _REG(VPU_HDMI_FMT_CTRL));
else
/* Setup YUV444 to HDMI-TX, no 10bit diphering */
writel_relaxed(0, priv->io_base + _REG(VPU_HDMI_FMT_CTRL));
dev_dbg(priv->dev, "%s\n", priv->venc.hdmi_use_enci ? "VENCI" : "VENCP");
if (priv->venc.hdmi_use_enci)
writel_relaxed(1, priv->io_base + _REG(ENCI_VIDEO_EN));
else
writel_relaxed(1, priv->io_base + _REG(ENCP_VIDEO_EN));
}
static void meson_encoder_hdmi_atomic_disable(struct drm_bridge *bridge,
struct drm_bridge_state *bridge_state)
{
struct meson_encoder_hdmi *encoder_hdmi = bridge_to_meson_encoder_hdmi(bridge);
struct meson_drm *priv = encoder_hdmi->priv;
writel_bits_relaxed(0x3, 0,
priv->io_base + _REG(VPU_HDMI_SETTING));
writel_relaxed(0, priv->io_base + _REG(ENCI_VIDEO_EN));
writel_relaxed(0, priv->io_base + _REG(ENCP_VIDEO_EN));
}
static const u32 meson_encoder_hdmi_out_bus_fmts[] = {
MEDIA_BUS_FMT_YUV8_1X24,
MEDIA_BUS_FMT_UYVY8_1X16,
MEDIA_BUS_FMT_UYYVYY8_0_5X24,
};
static u32 *
meson_encoder_hdmi_get_inp_bus_fmts(struct drm_bridge *bridge,
struct drm_bridge_state *bridge_state,
struct drm_crtc_state *crtc_state,
struct drm_connector_state *conn_state,
u32 output_fmt,
unsigned int *num_input_fmts)
{
u32 *input_fmts = NULL;
int i;
*num_input_fmts = 0;
for (i = 0 ; i < ARRAY_SIZE(meson_encoder_hdmi_out_bus_fmts) ; ++i) {
if (output_fmt == meson_encoder_hdmi_out_bus_fmts[i]) {
*num_input_fmts = 1;
input_fmts = kcalloc(*num_input_fmts,
sizeof(*input_fmts),
GFP_KERNEL);
if (!input_fmts)
return NULL;
input_fmts[0] = output_fmt;
break;
}
}
return input_fmts;
}
static int meson_encoder_hdmi_atomic_check(struct drm_bridge *bridge,
struct drm_bridge_state *bridge_state,
struct drm_crtc_state *crtc_state,
struct drm_connector_state *conn_state)
{
struct meson_encoder_hdmi *encoder_hdmi = bridge_to_meson_encoder_hdmi(bridge);
struct drm_connector_state *old_conn_state =
drm_atomic_get_old_connector_state(conn_state->state, conn_state->connector);
struct meson_drm *priv = encoder_hdmi->priv;
encoder_hdmi->output_bus_fmt = bridge_state->output_bus_cfg.format;
dev_dbg(priv->dev, "output_bus_fmt %lx\n", encoder_hdmi->output_bus_fmt);
if (!drm_connector_atomic_hdr_metadata_equal(old_conn_state, conn_state))
crtc_state->mode_changed = true;
return 0;
}
static void meson_encoder_hdmi_hpd_notify(struct drm_bridge *bridge,
enum drm_connector_status status)
{
struct meson_encoder_hdmi *encoder_hdmi = bridge_to_meson_encoder_hdmi(bridge);
struct edid *edid;
if (!encoder_hdmi->cec_notifier)
return;
if (status == connector_status_connected) {
edid = drm_bridge_get_edid(encoder_hdmi->next_bridge, encoder_hdmi->connector);
if (!edid)
return;
cec_notifier_set_phys_addr_from_edid(encoder_hdmi->cec_notifier, edid);
kfree(edid);
} else
cec_notifier_phys_addr_invalidate(encoder_hdmi->cec_notifier);
}
static const struct drm_bridge_funcs meson_encoder_hdmi_bridge_funcs = {
.attach = meson_encoder_hdmi_attach,
.detach = meson_encoder_hdmi_detach,
.mode_valid = meson_encoder_hdmi_mode_valid,
.hpd_notify = meson_encoder_hdmi_hpd_notify,
.atomic_enable = meson_encoder_hdmi_atomic_enable,
.atomic_disable = meson_encoder_hdmi_atomic_disable,
.atomic_get_input_bus_fmts = meson_encoder_hdmi_get_inp_bus_fmts,
.atomic_check = meson_encoder_hdmi_atomic_check,
.atomic_duplicate_state = drm_atomic_helper_bridge_duplicate_state,
.atomic_destroy_state = drm_atomic_helper_bridge_destroy_state,
.atomic_reset = drm_atomic_helper_bridge_reset,
};
int meson_encoder_hdmi_init(struct meson_drm *priv)
{
struct meson_encoder_hdmi *meson_encoder_hdmi;
struct platform_device *pdev;
struct device_node *remote;
int ret;
meson_encoder_hdmi = devm_kzalloc(priv->dev, sizeof(*meson_encoder_hdmi), GFP_KERNEL);
if (!meson_encoder_hdmi)
return -ENOMEM;
/* HDMI Transceiver Bridge */
remote = of_graph_get_remote_node(priv->dev->of_node, 1, 0);
if (!remote) {
dev_err(priv->dev, "HDMI transceiver device is disabled");
return 0;
}
meson_encoder_hdmi->next_bridge = of_drm_find_bridge(remote);
if (!meson_encoder_hdmi->next_bridge) {
dev_err(priv->dev, "Failed to find HDMI transceiver bridge\n");
ret = -EPROBE_DEFER;
goto err_put_node;
}
/* HDMI Encoder Bridge */
meson_encoder_hdmi->bridge.funcs = &meson_encoder_hdmi_bridge_funcs;
meson_encoder_hdmi->bridge.of_node = priv->dev->of_node;
meson_encoder_hdmi->bridge.type = DRM_MODE_CONNECTOR_HDMIA;
meson_encoder_hdmi->bridge.interlace_allowed = true;
drm_bridge_add(&meson_encoder_hdmi->bridge);
meson_encoder_hdmi->priv = priv;
/* Encoder */
ret = drm_simple_encoder_init(priv->drm, &meson_encoder_hdmi->encoder,
DRM_MODE_ENCODER_TMDS);
if (ret) {
dev_err(priv->dev, "Failed to init HDMI encoder: %d\n", ret);
goto err_put_node;
}
meson_encoder_hdmi->encoder.possible_crtcs = BIT(0);
/* Attach HDMI Encoder Bridge to Encoder */
ret = drm_bridge_attach(&meson_encoder_hdmi->encoder, &meson_encoder_hdmi->bridge, NULL,
DRM_BRIDGE_ATTACH_NO_CONNECTOR);
if (ret) {
dev_err(priv->dev, "Failed to attach bridge: %d\n", ret);
goto err_put_node;
}
/* Initialize & attach Bridge Connector */
meson_encoder_hdmi->connector = drm_bridge_connector_init(priv->drm,
&meson_encoder_hdmi->encoder);
if (IS_ERR(meson_encoder_hdmi->connector)) {
dev_err(priv->dev, "Unable to create HDMI bridge connector\n");
ret = PTR_ERR(meson_encoder_hdmi->connector);
goto err_put_node;
}
drm_connector_attach_encoder(meson_encoder_hdmi->connector,
&meson_encoder_hdmi->encoder);
/*
* We should have now in place:
* encoder->[hdmi encoder bridge]->[dw-hdmi bridge]->[display connector bridge]->[display connector]
*/
/*
* drm_connector_attach_max_bpc_property() requires the
* connector to have a state.
*/
drm_atomic_helper_connector_reset(meson_encoder_hdmi->connector);
if (meson_vpu_is_compatible(priv, VPU_COMPATIBLE_GXL) ||
meson_vpu_is_compatible(priv, VPU_COMPATIBLE_GXM) ||
meson_vpu_is_compatible(priv, VPU_COMPATIBLE_G12A))
drm_connector_attach_hdr_output_metadata_property(meson_encoder_hdmi->connector);
drm_connector_attach_max_bpc_property(meson_encoder_hdmi->connector, 8, 8);
/* Handle this here until handled by drm_bridge_connector_init() */
meson_encoder_hdmi->connector->ycbcr_420_allowed = true;
pdev = of_find_device_by_node(remote);
of_node_put(remote);
if (pdev) {
struct cec_connector_info conn_info;
struct cec_notifier *notifier;
cec_fill_conn_info_from_drm(&conn_info, meson_encoder_hdmi->connector);
notifier = cec_notifier_conn_register(&pdev->dev, NULL, &conn_info);
if (!notifier) {
put_device(&pdev->dev);
return -ENOMEM;
}
meson_encoder_hdmi->cec_notifier = notifier;
}
priv->encoders[MESON_ENC_HDMI] = meson_encoder_hdmi;
dev_dbg(priv->dev, "HDMI encoder initialized\n");
return 0;
err_put_node:
of_node_put(remote);
return ret;
}
void meson_encoder_hdmi_remove(struct meson_drm *priv)
{
struct meson_encoder_hdmi *meson_encoder_hdmi;
if (priv->encoders[MESON_ENC_HDMI]) {
meson_encoder_hdmi = priv->encoders[MESON_ENC_HDMI];
drm_bridge_remove(&meson_encoder_hdmi->bridge);
drm_bridge_remove(meson_encoder_hdmi->next_bridge);
}
}
| linux-master | drivers/gpu/drm/meson/meson_encoder_hdmi.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Copyright (C) 2016 BayLibre, SAS
* Author: Neil Armstrong <[email protected]>
* Copyright (C) 2014 Endless Mobile
*
* Written by:
* Jasper St. Pierre <[email protected]>
*/
#include <linux/component.h>
#include <linux/module.h>
#include <linux/of_graph.h>
#include <linux/sys_soc.h>
#include <linux/platform_device.h>
#include <linux/soc/amlogic/meson-canvas.h>
#include <drm/drm_aperture.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_drv.h>
#include <drm/drm_fbdev_dma.h>
#include <drm/drm_gem_dma_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_modeset_helper_vtables.h>
#include <drm/drm_module.h>
#include <drm/drm_probe_helper.h>
#include <drm/drm_vblank.h>
#include "meson_crtc.h"
#include "meson_drv.h"
#include "meson_overlay.h"
#include "meson_plane.h"
#include "meson_osd_afbcd.h"
#include "meson_registers.h"
#include "meson_encoder_cvbs.h"
#include "meson_encoder_hdmi.h"
#include "meson_encoder_dsi.h"
#include "meson_viu.h"
#include "meson_vpp.h"
#include "meson_rdma.h"
#define DRIVER_NAME "meson"
#define DRIVER_DESC "Amlogic Meson DRM driver"
/**
* DOC: Video Processing Unit
*
* VPU Handles the Global Video Processing, it includes management of the
* clocks gates, blocks reset lines and power domains.
*
* What is missing :
*
* - Full reset of entire video processing HW blocks
* - Scaling and setup of the VPU clock
* - Bus clock gates
* - Powering up video processing HW blocks
* - Powering Up HDMI controller and PHY
*/
static const struct drm_mode_config_funcs meson_mode_config_funcs = {
.atomic_check = drm_atomic_helper_check,
.atomic_commit = drm_atomic_helper_commit,
.fb_create = drm_gem_fb_create,
};
static const struct drm_mode_config_helper_funcs meson_mode_config_helpers = {
.atomic_commit_tail = drm_atomic_helper_commit_tail_rpm,
};
static irqreturn_t meson_irq(int irq, void *arg)
{
struct drm_device *dev = arg;
struct meson_drm *priv = dev->dev_private;
(void)readl_relaxed(priv->io_base + _REG(VENC_INTFLAG));
meson_crtc_irq(priv);
return IRQ_HANDLED;
}
static int meson_dumb_create(struct drm_file *file, struct drm_device *dev,
struct drm_mode_create_dumb *args)
{
/*
* We need 64bytes aligned stride, and PAGE aligned size
*/
args->pitch = ALIGN(DIV_ROUND_UP(args->width * args->bpp, 8), SZ_64);
args->size = PAGE_ALIGN(args->pitch * args->height);
return drm_gem_dma_dumb_create_internal(file, dev, args);
}
DEFINE_DRM_GEM_DMA_FOPS(fops);
static const struct drm_driver meson_driver = {
.driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC,
/* DMA Ops */
DRM_GEM_DMA_DRIVER_OPS_WITH_DUMB_CREATE(meson_dumb_create),
/* Misc */
.fops = &fops,
.name = DRIVER_NAME,
.desc = DRIVER_DESC,
.date = "20161109",
.major = 1,
.minor = 0,
};
static bool meson_vpu_has_available_connectors(struct device *dev)
{
struct device_node *ep, *remote;
/* Parses each endpoint and check if remote exists */
for_each_endpoint_of_node(dev->of_node, ep) {
/* If the endpoint node exists, consider it enabled */
remote = of_graph_get_remote_port(ep);
if (remote) {
of_node_put(remote);
of_node_put(ep);
return true;
}
}
return false;
}
static struct regmap_config meson_regmap_config = {
.reg_bits = 32,
.val_bits = 32,
.reg_stride = 4,
.max_register = 0x1000,
};
static void meson_vpu_init(struct meson_drm *priv)
{
u32 value;
/*
* Slave dc0 and dc5 connected to master port 1.
* By default other slaves are connected to master port 0.
*/
value = VPU_RDARB_SLAVE_TO_MASTER_PORT(0, 1) |
VPU_RDARB_SLAVE_TO_MASTER_PORT(5, 1);
writel_relaxed(value, priv->io_base + _REG(VPU_RDARB_MODE_L1C1));
/* Slave dc0 connected to master port 1 */
value = VPU_RDARB_SLAVE_TO_MASTER_PORT(0, 1);
writel_relaxed(value, priv->io_base + _REG(VPU_RDARB_MODE_L1C2));
/* Slave dc4 and dc7 connected to master port 1 */
value = VPU_RDARB_SLAVE_TO_MASTER_PORT(4, 1) |
VPU_RDARB_SLAVE_TO_MASTER_PORT(7, 1);
writel_relaxed(value, priv->io_base + _REG(VPU_RDARB_MODE_L2C1));
/* Slave dc1 connected to master port 1 */
value = VPU_RDARB_SLAVE_TO_MASTER_PORT(1, 1);
writel_relaxed(value, priv->io_base + _REG(VPU_WRARB_MODE_L2C1));
}
struct meson_drm_soc_attr {
struct meson_drm_soc_limits limits;
const struct soc_device_attribute *attrs;
};
static const struct meson_drm_soc_attr meson_drm_soc_attrs[] = {
/* S805X/S805Y HDMI PLL won't lock for HDMI PHY freq > 1,65GHz */
{
.limits = {
.max_hdmi_phy_freq = 1650000,
},
.attrs = (const struct soc_device_attribute []) {
{ .soc_id = "GXL (S805*)", },
{ /* sentinel */ }
}
},
};
static int meson_drv_bind_master(struct device *dev, bool has_components)
{
struct platform_device *pdev = to_platform_device(dev);
const struct meson_drm_match_data *match;
struct meson_drm *priv;
struct drm_device *drm;
struct resource *res;
void __iomem *regs;
int ret, i;
/* Checks if an output connector is available */
if (!meson_vpu_has_available_connectors(dev)) {
dev_err(dev, "No output connector available\n");
return -ENODEV;
}
match = of_device_get_match_data(dev);
if (!match)
return -ENODEV;
drm = drm_dev_alloc(&meson_driver, dev);
if (IS_ERR(drm))
return PTR_ERR(drm);
priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
if (!priv) {
ret = -ENOMEM;
goto free_drm;
}
drm->dev_private = priv;
priv->drm = drm;
priv->dev = dev;
priv->compat = match->compat;
priv->afbcd.ops = match->afbcd_ops;
regs = devm_platform_ioremap_resource_byname(pdev, "vpu");
if (IS_ERR(regs)) {
ret = PTR_ERR(regs);
goto free_drm;
}
priv->io_base = regs;
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "hhi");
if (!res) {
ret = -EINVAL;
goto free_drm;
}
/* Simply ioremap since it may be a shared register zone */
regs = devm_ioremap(dev, res->start, resource_size(res));
if (!regs) {
ret = -EADDRNOTAVAIL;
goto free_drm;
}
priv->hhi = devm_regmap_init_mmio(dev, regs,
&meson_regmap_config);
if (IS_ERR(priv->hhi)) {
dev_err(&pdev->dev, "Couldn't create the HHI regmap\n");
ret = PTR_ERR(priv->hhi);
goto free_drm;
}
priv->canvas = meson_canvas_get(dev);
if (IS_ERR(priv->canvas)) {
ret = PTR_ERR(priv->canvas);
goto free_drm;
}
ret = meson_canvas_alloc(priv->canvas, &priv->canvas_id_osd1);
if (ret)
goto free_drm;
ret = meson_canvas_alloc(priv->canvas, &priv->canvas_id_vd1_0);
if (ret) {
meson_canvas_free(priv->canvas, priv->canvas_id_osd1);
goto free_drm;
}
ret = meson_canvas_alloc(priv->canvas, &priv->canvas_id_vd1_1);
if (ret) {
meson_canvas_free(priv->canvas, priv->canvas_id_osd1);
meson_canvas_free(priv->canvas, priv->canvas_id_vd1_0);
goto free_drm;
}
ret = meson_canvas_alloc(priv->canvas, &priv->canvas_id_vd1_2);
if (ret) {
meson_canvas_free(priv->canvas, priv->canvas_id_osd1);
meson_canvas_free(priv->canvas, priv->canvas_id_vd1_0);
meson_canvas_free(priv->canvas, priv->canvas_id_vd1_1);
goto free_drm;
}
priv->vsync_irq = platform_get_irq(pdev, 0);
ret = drm_vblank_init(drm, 1);
if (ret)
goto free_drm;
/* Assign limits per soc revision/package */
for (i = 0 ; i < ARRAY_SIZE(meson_drm_soc_attrs) ; ++i) {
if (soc_device_match(meson_drm_soc_attrs[i].attrs)) {
priv->limits = &meson_drm_soc_attrs[i].limits;
break;
}
}
/*
* Remove early framebuffers (ie. simplefb). The framebuffer can be
* located anywhere in RAM
*/
ret = drm_aperture_remove_framebuffers(&meson_driver);
if (ret)
goto free_drm;
ret = drmm_mode_config_init(drm);
if (ret)
goto free_drm;
drm->mode_config.max_width = 3840;
drm->mode_config.max_height = 2160;
drm->mode_config.funcs = &meson_mode_config_funcs;
drm->mode_config.helper_private = &meson_mode_config_helpers;
/* Hardware Initialization */
meson_vpu_init(priv);
meson_venc_init(priv);
meson_vpp_init(priv);
meson_viu_init(priv);
if (priv->afbcd.ops) {
ret = priv->afbcd.ops->init(priv);
if (ret)
goto free_drm;
}
/* Encoder Initialization */
ret = meson_encoder_cvbs_init(priv);
if (ret)
goto exit_afbcd;
if (has_components) {
ret = component_bind_all(dev, drm);
if (ret) {
dev_err(drm->dev, "Couldn't bind all components\n");
/* Do not try to unbind */
has_components = false;
goto exit_afbcd;
}
}
ret = meson_encoder_hdmi_init(priv);
if (ret)
goto exit_afbcd;
if (meson_vpu_is_compatible(priv, VPU_COMPATIBLE_G12A)) {
ret = meson_encoder_dsi_init(priv);
if (ret)
goto exit_afbcd;
}
ret = meson_plane_create(priv);
if (ret)
goto exit_afbcd;
ret = meson_overlay_create(priv);
if (ret)
goto exit_afbcd;
ret = meson_crtc_create(priv);
if (ret)
goto exit_afbcd;
ret = request_irq(priv->vsync_irq, meson_irq, 0, drm->driver->name, drm);
if (ret)
goto exit_afbcd;
drm_mode_config_reset(drm);
drm_kms_helper_poll_init(drm);
platform_set_drvdata(pdev, priv);
ret = drm_dev_register(drm, 0);
if (ret)
goto uninstall_irq;
drm_fbdev_dma_setup(drm, 32);
return 0;
uninstall_irq:
free_irq(priv->vsync_irq, drm);
exit_afbcd:
if (priv->afbcd.ops)
priv->afbcd.ops->exit(priv);
free_drm:
drm_dev_put(drm);
meson_encoder_dsi_remove(priv);
meson_encoder_hdmi_remove(priv);
meson_encoder_cvbs_remove(priv);
if (has_components)
component_unbind_all(dev, drm);
return ret;
}
static int meson_drv_bind(struct device *dev)
{
return meson_drv_bind_master(dev, true);
}
static void meson_drv_unbind(struct device *dev)
{
struct meson_drm *priv = dev_get_drvdata(dev);
struct drm_device *drm = priv->drm;
if (priv->canvas) {
meson_canvas_free(priv->canvas, priv->canvas_id_osd1);
meson_canvas_free(priv->canvas, priv->canvas_id_vd1_0);
meson_canvas_free(priv->canvas, priv->canvas_id_vd1_1);
meson_canvas_free(priv->canvas, priv->canvas_id_vd1_2);
}
drm_dev_unregister(drm);
drm_kms_helper_poll_fini(drm);
drm_atomic_helper_shutdown(drm);
free_irq(priv->vsync_irq, drm);
drm_dev_put(drm);
meson_encoder_dsi_remove(priv);
meson_encoder_hdmi_remove(priv);
meson_encoder_cvbs_remove(priv);
component_unbind_all(dev, drm);
if (priv->afbcd.ops)
priv->afbcd.ops->exit(priv);
}
static const struct component_master_ops meson_drv_master_ops = {
.bind = meson_drv_bind,
.unbind = meson_drv_unbind,
};
static int __maybe_unused meson_drv_pm_suspend(struct device *dev)
{
struct meson_drm *priv = dev_get_drvdata(dev);
if (!priv)
return 0;
return drm_mode_config_helper_suspend(priv->drm);
}
static int __maybe_unused meson_drv_pm_resume(struct device *dev)
{
struct meson_drm *priv = dev_get_drvdata(dev);
if (!priv)
return 0;
meson_vpu_init(priv);
meson_venc_init(priv);
meson_vpp_init(priv);
meson_viu_init(priv);
if (priv->afbcd.ops)
priv->afbcd.ops->init(priv);
return drm_mode_config_helper_resume(priv->drm);
}
static void meson_drv_shutdown(struct platform_device *pdev)
{
struct meson_drm *priv = dev_get_drvdata(&pdev->dev);
if (!priv)
return;
drm_kms_helper_poll_fini(priv->drm);
drm_atomic_helper_shutdown(priv->drm);
}
/*
* Only devices to use as components
* TOFIX: get rid of components when we can finally
* get meson_dx_hdmi to stop using the meson_drm
* private structure for HHI registers.
*/
static const struct of_device_id components_dev_match[] = {
{ .compatible = "amlogic,meson-gxbb-dw-hdmi" },
{ .compatible = "amlogic,meson-gxl-dw-hdmi" },
{ .compatible = "amlogic,meson-gxm-dw-hdmi" },
{ .compatible = "amlogic,meson-g12a-dw-hdmi" },
{}
};
static int meson_drv_probe(struct platform_device *pdev)
{
struct component_match *match = NULL;
struct device_node *np = pdev->dev.of_node;
struct device_node *ep, *remote;
int count = 0;
for_each_endpoint_of_node(np, ep) {
remote = of_graph_get_remote_port_parent(ep);
if (!remote || !of_device_is_available(remote)) {
of_node_put(remote);
continue;
}
if (of_match_node(components_dev_match, remote)) {
component_match_add(&pdev->dev, &match, component_compare_of, remote);
dev_dbg(&pdev->dev, "parent %pOF remote match add %pOF parent %s\n",
np, remote, dev_name(&pdev->dev));
}
of_node_put(remote);
++count;
}
if (count && !match)
return meson_drv_bind_master(&pdev->dev, false);
/* If some endpoints were found, initialize the nodes */
if (count) {
dev_info(&pdev->dev, "Queued %d outputs on vpu\n", count);
return component_master_add_with_match(&pdev->dev,
&meson_drv_master_ops,
match);
}
/* If no output endpoints were available, simply bail out */
return 0;
};
static void meson_drv_remove(struct platform_device *pdev)
{
component_master_del(&pdev->dev, &meson_drv_master_ops);
}
static struct meson_drm_match_data meson_drm_gxbb_data = {
.compat = VPU_COMPATIBLE_GXBB,
};
static struct meson_drm_match_data meson_drm_gxl_data = {
.compat = VPU_COMPATIBLE_GXL,
};
static struct meson_drm_match_data meson_drm_gxm_data = {
.compat = VPU_COMPATIBLE_GXM,
.afbcd_ops = &meson_afbcd_gxm_ops,
};
static struct meson_drm_match_data meson_drm_g12a_data = {
.compat = VPU_COMPATIBLE_G12A,
.afbcd_ops = &meson_afbcd_g12a_ops,
};
static const struct of_device_id dt_match[] = {
{ .compatible = "amlogic,meson-gxbb-vpu",
.data = (void *)&meson_drm_gxbb_data },
{ .compatible = "amlogic,meson-gxl-vpu",
.data = (void *)&meson_drm_gxl_data },
{ .compatible = "amlogic,meson-gxm-vpu",
.data = (void *)&meson_drm_gxm_data },
{ .compatible = "amlogic,meson-g12a-vpu",
.data = (void *)&meson_drm_g12a_data },
{}
};
MODULE_DEVICE_TABLE(of, dt_match);
static const struct dev_pm_ops meson_drv_pm_ops = {
SET_SYSTEM_SLEEP_PM_OPS(meson_drv_pm_suspend, meson_drv_pm_resume)
};
static struct platform_driver meson_drm_platform_driver = {
.probe = meson_drv_probe,
.remove_new = meson_drv_remove,
.shutdown = meson_drv_shutdown,
.driver = {
.name = "meson-drm",
.of_match_table = dt_match,
.pm = &meson_drv_pm_ops,
},
};
drm_module_platform_driver(meson_drm_platform_driver);
MODULE_AUTHOR("Jasper St. Pierre <[email protected]>");
MODULE_AUTHOR("Neil Armstrong <[email protected]>");
MODULE_DESCRIPTION(DRIVER_DESC);
MODULE_LICENSE("GPL");
| linux-master | drivers/gpu/drm/meson/meson_drv.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* Copyright (C) 2019 BayLibre, SAS
* Author: Neil Armstrong <[email protected]>
*/
#include <linux/bitfield.h>
#include <linux/dma-mapping.h>
#include "meson_drv.h"
#include "meson_registers.h"
#include "meson_rdma.h"
/*
* The VPU embeds a "Register DMA" that can write a sequence of registers
* on the VPU AHB bus, either manually or triggered by an internal IRQ
* event like VSYNC or a line input counter.
* The initial implementation handles a single channel (over 8), triggered
* by the VSYNC irq and does not handle the RDMA irq.
*/
#define RDMA_DESC_SIZE (sizeof(uint32_t) * 2)
int meson_rdma_init(struct meson_drm *priv)
{
if (!priv->rdma.addr) {
/* Allocate a PAGE buffer */
priv->rdma.addr =
dma_alloc_coherent(priv->dev, SZ_4K,
&priv->rdma.addr_dma,
GFP_KERNEL);
if (!priv->rdma.addr)
return -ENOMEM;
}
priv->rdma.offset = 0;
writel_relaxed(RDMA_CTRL_SW_RESET,
priv->io_base + _REG(RDMA_CTRL));
writel_relaxed(RDMA_DEFAULT_CONFIG |
FIELD_PREP(RDMA_CTRL_AHB_WR_BURST, 3) |
FIELD_PREP(RDMA_CTRL_AHB_RD_BURST, 0),
priv->io_base + _REG(RDMA_CTRL));
return 0;
}
void meson_rdma_free(struct meson_drm *priv)
{
if (!priv->rdma.addr && !priv->rdma.addr_dma)
return;
meson_rdma_stop(priv);
dma_free_coherent(priv->dev, SZ_4K,
priv->rdma.addr, priv->rdma.addr_dma);
priv->rdma.addr = NULL;
priv->rdma.addr_dma = (dma_addr_t)0;
}
void meson_rdma_setup(struct meson_drm *priv)
{
/* Channel 1: Write Flag, No Address Increment */
writel_bits_relaxed(RDMA_ACCESS_RW_FLAG_CHAN1 |
RDMA_ACCESS_ADDR_INC_CHAN1,
RDMA_ACCESS_RW_FLAG_CHAN1,
priv->io_base + _REG(RDMA_ACCESS_AUTO));
}
void meson_rdma_stop(struct meson_drm *priv)
{
writel_bits_relaxed(RDMA_IRQ_CLEAR_CHAN1,
RDMA_IRQ_CLEAR_CHAN1,
priv->io_base + _REG(RDMA_CTRL));
/* Stop Channel 1 */
writel_bits_relaxed(RDMA_ACCESS_TRIGGER_CHAN1,
FIELD_PREP(RDMA_ACCESS_ADDR_INC_CHAN1,
RDMA_ACCESS_TRIGGER_STOP),
priv->io_base + _REG(RDMA_ACCESS_AUTO));
}
void meson_rdma_reset(struct meson_drm *priv)
{
meson_rdma_stop(priv);
priv->rdma.offset = 0;
}
static void meson_rdma_writel(struct meson_drm *priv, uint32_t val,
uint32_t reg)
{
if (priv->rdma.offset >= (SZ_4K / RDMA_DESC_SIZE)) {
dev_warn_once(priv->dev, "%s: overflow\n", __func__);
return;
}
priv->rdma.addr[priv->rdma.offset++] = reg;
priv->rdma.addr[priv->rdma.offset++] = val;
}
/*
* This will add the register to the RDMA buffer and write it to the
* hardware at the same time.
* When meson_rdma_flush is called, the RDMA will replay the register
* writes in order.
*/
void meson_rdma_writel_sync(struct meson_drm *priv, uint32_t val, uint32_t reg)
{
meson_rdma_writel(priv, val, reg);
writel_relaxed(val, priv->io_base + _REG(reg));
}
void meson_rdma_flush(struct meson_drm *priv)
{
meson_rdma_stop(priv);
/* Start of Channel 1 register writes buffer */
writel(priv->rdma.addr_dma,
priv->io_base + _REG(RDMA_AHB_START_ADDR_1));
/* Last byte on Channel 1 register writes buffer */
writel(priv->rdma.addr_dma + (priv->rdma.offset * RDMA_DESC_SIZE) - 1,
priv->io_base + _REG(RDMA_AHB_END_ADDR_1));
/* Trigger Channel 1 on VSYNC event */
writel_bits_relaxed(RDMA_ACCESS_TRIGGER_CHAN1,
FIELD_PREP(RDMA_ACCESS_TRIGGER_CHAN1,
RDMA_ACCESS_TRIGGER_VSYNC),
priv->io_base + _REG(RDMA_ACCESS_AUTO));
priv->rdma.offset = 0;
}
| linux-master | drivers/gpu/drm/meson/meson_rdma.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* Copyright (C) 2019 BayLibre, SAS
* Author: Neil Armstrong <[email protected]>
*/
#include <linux/bitfield.h>
#include <drm/drm_print.h>
#include <drm/drm_fourcc.h>
#include "meson_drv.h"
#include "meson_registers.h"
#include "meson_viu.h"
#include "meson_rdma.h"
#include "meson_osd_afbcd.h"
/*
* DOC: Driver for the ARM FrameBuffer Compression Decoders
*
* The Amlogic GXM and G12A SoC families embeds an AFBC Decoder,
* to decode compressed buffers generated by the ARM Mali GPU.
*
* For the GXM Family, Amlogic designed their own Decoder, named in
* the vendor source as "MESON_AFBC", and a single decoder is available
* for the 2 OSD planes.
* This decoder is compatible with the AFBC 1.0 specifications and the
* Mali T820 GPU capabilities.
* It supports :
* - basic AFBC buffer for RGB32 only, thus YTR feature is mandatory
* - SPARSE layout and SPLIT layout
* - only 16x16 superblock
*
* The decoder reads the data from the SDRAM, decodes and sends the
* decoded pixel stream to the OSD1 Plane pixel composer.
*
* For the G12A Family, Amlogic integrated an ARM AFBC Decoder, named
* in the vendor source as "MALI_AFBC", and the decoder can decode up
* to 4 surfaces, one for each of the 4 available OSDs.
* This decoder is compatible with the AFBC 1.2 specifications for the
* Mali G31 and G52 GPUs.
* Is supports :
* - basic AFBC buffer for multiple RGB and YUV pixel formats
* - SPARSE layout and SPLIT layout
* - 16x16 and 32x8 "wideblk" superblocks
* - Tiled header
*
* The ARM AFBC Decoder independent from the VPU Pixel Pipeline, so
* the ARM AFBC Decoder reads the data from the SDRAM then decodes
* into a private internal physical address where the OSD1 Plane pixel
* composer unpacks the decoded data.
*/
/* Amlogic AFBC Decoder for GXM Family */
#define OSD1_AFBCD_RGB32 0x15
static int meson_gxm_afbcd_pixel_fmt(u64 modifier, uint32_t format)
{
switch (format) {
case DRM_FORMAT_XBGR8888:
case DRM_FORMAT_ABGR8888:
return OSD1_AFBCD_RGB32;
/* TOFIX support mode formats */
default:
DRM_DEBUG("unsupported afbc format[%08x]\n", format);
return -EINVAL;
}
}
static bool meson_gxm_afbcd_supported_fmt(u64 modifier, uint32_t format)
{
if (modifier & AFBC_FORMAT_MOD_BLOCK_SIZE_32x8)
return false;
if (!(modifier & AFBC_FORMAT_MOD_YTR))
return false;
return meson_gxm_afbcd_pixel_fmt(modifier, format) >= 0;
}
static int meson_gxm_afbcd_reset(struct meson_drm *priv)
{
writel_relaxed(VIU_SW_RESET_OSD1_AFBCD,
priv->io_base + _REG(VIU_SW_RESET));
writel_relaxed(0, priv->io_base + _REG(VIU_SW_RESET));
return 0;
}
static int meson_gxm_afbcd_init(struct meson_drm *priv)
{
return 0;
}
static void meson_gxm_afbcd_exit(struct meson_drm *priv)
{
meson_gxm_afbcd_reset(priv);
}
static int meson_gxm_afbcd_enable(struct meson_drm *priv)
{
writel_relaxed(FIELD_PREP(OSD1_AFBCD_ID_FIFO_THRD, 0x40) |
OSD1_AFBCD_DEC_ENABLE,
priv->io_base + _REG(OSD1_AFBCD_ENABLE));
return 0;
}
static int meson_gxm_afbcd_disable(struct meson_drm *priv)
{
writel_bits_relaxed(OSD1_AFBCD_DEC_ENABLE, 0,
priv->io_base + _REG(OSD1_AFBCD_ENABLE));
return 0;
}
static int meson_gxm_afbcd_setup(struct meson_drm *priv)
{
u32 conv_lbuf_len;
u32 mode = FIELD_PREP(OSD1_AFBCD_MIF_URGENT, 3) |
FIELD_PREP(OSD1_AFBCD_HOLD_LINE_NUM, 4) |
FIELD_PREP(OSD1_AFBCD_RGBA_EXCHAN_CTRL, 0x34) |
meson_gxm_afbcd_pixel_fmt(priv->afbcd.modifier,
priv->afbcd.format);
if (priv->afbcd.modifier & AFBC_FORMAT_MOD_SPARSE)
mode |= OSD1_AFBCD_HREG_HALF_BLOCK;
if (priv->afbcd.modifier & AFBC_FORMAT_MOD_SPLIT)
mode |= OSD1_AFBCD_HREG_BLOCK_SPLIT;
writel_relaxed(mode, priv->io_base + _REG(OSD1_AFBCD_MODE));
writel_relaxed(FIELD_PREP(OSD1_AFBCD_HREG_VSIZE_IN,
priv->viu.osd1_width) |
FIELD_PREP(OSD1_AFBCD_HREG_HSIZE_IN,
priv->viu.osd1_height),
priv->io_base + _REG(OSD1_AFBCD_SIZE_IN));
writel_relaxed(priv->viu.osd1_addr >> 4,
priv->io_base + _REG(OSD1_AFBCD_HDR_PTR));
writel_relaxed(priv->viu.osd1_addr >> 4,
priv->io_base + _REG(OSD1_AFBCD_FRAME_PTR));
/* TOFIX: bits 31:24 are not documented, nor the meaning of 0xe4 */
writel_relaxed((0xe4 << 24) | (priv->viu.osd1_addr & 0xffffff),
priv->io_base + _REG(OSD1_AFBCD_CHROMA_PTR));
if (priv->viu.osd1_width <= 128)
conv_lbuf_len = 32;
else if (priv->viu.osd1_width <= 256)
conv_lbuf_len = 64;
else if (priv->viu.osd1_width <= 512)
conv_lbuf_len = 128;
else if (priv->viu.osd1_width <= 1024)
conv_lbuf_len = 256;
else if (priv->viu.osd1_width <= 2048)
conv_lbuf_len = 512;
else
conv_lbuf_len = 1024;
writel_relaxed(conv_lbuf_len,
priv->io_base + _REG(OSD1_AFBCD_CONV_CTRL));
writel_relaxed(FIELD_PREP(OSD1_AFBCD_DEC_PIXEL_BGN_H, 0) |
FIELD_PREP(OSD1_AFBCD_DEC_PIXEL_END_H,
priv->viu.osd1_width - 1),
priv->io_base + _REG(OSD1_AFBCD_PIXEL_HSCOPE));
writel_relaxed(FIELD_PREP(OSD1_AFBCD_DEC_PIXEL_BGN_V, 0) |
FIELD_PREP(OSD1_AFBCD_DEC_PIXEL_END_V,
priv->viu.osd1_height - 1),
priv->io_base + _REG(OSD1_AFBCD_PIXEL_VSCOPE));
return 0;
}
struct meson_afbcd_ops meson_afbcd_gxm_ops = {
.init = meson_gxm_afbcd_init,
.exit = meson_gxm_afbcd_exit,
.reset = meson_gxm_afbcd_reset,
.enable = meson_gxm_afbcd_enable,
.disable = meson_gxm_afbcd_disable,
.setup = meson_gxm_afbcd_setup,
.supported_fmt = meson_gxm_afbcd_supported_fmt,
};
/* ARM AFBC Decoder for G12A Family */
/* Amlogic G12A Mali AFBC Decoder supported formats */
enum {
MAFBC_FMT_RGB565 = 0,
MAFBC_FMT_RGBA5551,
MAFBC_FMT_RGBA1010102,
MAFBC_FMT_YUV420_10B,
MAFBC_FMT_RGB888,
MAFBC_FMT_RGBA8888,
MAFBC_FMT_RGBA4444,
MAFBC_FMT_R8,
MAFBC_FMT_RG88,
MAFBC_FMT_YUV420_8B,
MAFBC_FMT_YUV422_8B = 11,
MAFBC_FMT_YUV422_10B = 14,
};
static int meson_g12a_afbcd_pixel_fmt(u64 modifier, uint32_t format)
{
switch (format) {
case DRM_FORMAT_XRGB8888:
case DRM_FORMAT_ARGB8888:
/* YTR is forbidden for non XBGR formats */
if (modifier & AFBC_FORMAT_MOD_YTR)
return -EINVAL;
fallthrough;
case DRM_FORMAT_XBGR8888:
case DRM_FORMAT_ABGR8888:
return MAFBC_FMT_RGBA8888;
case DRM_FORMAT_RGB888:
/* YTR is forbidden for non XBGR formats */
if (modifier & AFBC_FORMAT_MOD_YTR)
return -EINVAL;
return MAFBC_FMT_RGB888;
case DRM_FORMAT_RGB565:
/* YTR is forbidden for non XBGR formats */
if (modifier & AFBC_FORMAT_MOD_YTR)
return -EINVAL;
return MAFBC_FMT_RGB565;
/* TOFIX support mode formats */
default:
DRM_DEBUG("unsupported afbc format[%08x]\n", format);
return -EINVAL;
}
}
static int meson_g12a_afbcd_bpp(uint32_t format)
{
switch (format) {
case DRM_FORMAT_XRGB8888:
case DRM_FORMAT_ARGB8888:
case DRM_FORMAT_XBGR8888:
case DRM_FORMAT_ABGR8888:
return 32;
case DRM_FORMAT_RGB888:
return 24;
case DRM_FORMAT_RGB565:
return 16;
/* TOFIX support mode formats */
default:
DRM_ERROR("unsupported afbc format[%08x]\n", format);
return 0;
}
}
static int meson_g12a_afbcd_fmt_to_blk_mode(u64 modifier, uint32_t format)
{
switch (format) {
case DRM_FORMAT_XRGB8888:
case DRM_FORMAT_ARGB8888:
case DRM_FORMAT_XBGR8888:
case DRM_FORMAT_ABGR8888:
return OSD_MALI_COLOR_MODE_RGBA8888;
case DRM_FORMAT_RGB888:
return OSD_MALI_COLOR_MODE_RGB888;
case DRM_FORMAT_RGB565:
return OSD_MALI_COLOR_MODE_RGB565;
/* TOFIX support mode formats */
default:
DRM_DEBUG("unsupported afbc format[%08x]\n", format);
return -EINVAL;
}
}
static bool meson_g12a_afbcd_supported_fmt(u64 modifier, uint32_t format)
{
return meson_g12a_afbcd_pixel_fmt(modifier, format) >= 0;
}
static int meson_g12a_afbcd_reset(struct meson_drm *priv)
{
meson_rdma_reset(priv);
meson_rdma_writel_sync(priv, VIU_SW_RESET_G12A_AFBC_ARB |
VIU_SW_RESET_G12A_OSD1_AFBCD,
VIU_SW_RESET);
meson_rdma_writel_sync(priv, 0, VIU_SW_RESET);
return 0;
}
static int meson_g12a_afbcd_init(struct meson_drm *priv)
{
int ret;
ret = meson_rdma_init(priv);
if (ret)
return ret;
meson_rdma_setup(priv);
/* Handle AFBC Decoder reset manually */
writel_bits_relaxed(MALI_AFBCD_MANUAL_RESET, MALI_AFBCD_MANUAL_RESET,
priv->io_base + _REG(MALI_AFBCD_TOP_CTRL));
return 0;
}
static void meson_g12a_afbcd_exit(struct meson_drm *priv)
{
meson_g12a_afbcd_reset(priv);
meson_rdma_free(priv);
}
static int meson_g12a_afbcd_enable(struct meson_drm *priv)
{
meson_rdma_writel_sync(priv, VPU_MAFBC_IRQ_SURFACES_COMPLETED |
VPU_MAFBC_IRQ_CONFIGURATION_SWAPPED |
VPU_MAFBC_IRQ_DECODE_ERROR |
VPU_MAFBC_IRQ_DETILING_ERROR,
VPU_MAFBC_IRQ_MASK);
meson_rdma_writel_sync(priv, VPU_MAFBC_S0_ENABLE,
VPU_MAFBC_SURFACE_CFG);
meson_rdma_writel_sync(priv, VPU_MAFBC_DIRECT_SWAP,
VPU_MAFBC_COMMAND);
/* This will enable the RDMA replaying the register writes on vsync */
meson_rdma_flush(priv);
return 0;
}
static int meson_g12a_afbcd_disable(struct meson_drm *priv)
{
writel_bits_relaxed(VPU_MAFBC_S0_ENABLE, 0,
priv->io_base + _REG(VPU_MAFBC_SURFACE_CFG));
return 0;
}
static int meson_g12a_afbcd_setup(struct meson_drm *priv)
{
u32 format = meson_g12a_afbcd_pixel_fmt(priv->afbcd.modifier,
priv->afbcd.format);
if (priv->afbcd.modifier & AFBC_FORMAT_MOD_YTR)
format |= VPU_MAFBC_YUV_TRANSFORM;
if (priv->afbcd.modifier & AFBC_FORMAT_MOD_SPLIT)
format |= VPU_MAFBC_BLOCK_SPLIT;
if (priv->afbcd.modifier & AFBC_FORMAT_MOD_TILED)
format |= VPU_MAFBC_TILED_HEADER_EN;
if ((priv->afbcd.modifier & AFBC_FORMAT_MOD_BLOCK_SIZE_MASK) ==
AFBC_FORMAT_MOD_BLOCK_SIZE_32x8)
format |= FIELD_PREP(VPU_MAFBC_SUPER_BLOCK_ASPECT, 1);
meson_rdma_writel_sync(priv, format,
VPU_MAFBC_FORMAT_SPECIFIER_S0);
meson_rdma_writel_sync(priv, priv->viu.osd1_addr,
VPU_MAFBC_HEADER_BUF_ADDR_LOW_S0);
meson_rdma_writel_sync(priv, 0,
VPU_MAFBC_HEADER_BUF_ADDR_HIGH_S0);
meson_rdma_writel_sync(priv, priv->viu.osd1_width,
VPU_MAFBC_BUFFER_WIDTH_S0);
meson_rdma_writel_sync(priv, ALIGN(priv->viu.osd1_height, 32),
VPU_MAFBC_BUFFER_HEIGHT_S0);
meson_rdma_writel_sync(priv, 0,
VPU_MAFBC_BOUNDING_BOX_X_START_S0);
meson_rdma_writel_sync(priv, priv->viu.osd1_width - 1,
VPU_MAFBC_BOUNDING_BOX_X_END_S0);
meson_rdma_writel_sync(priv, 0,
VPU_MAFBC_BOUNDING_BOX_Y_START_S0);
meson_rdma_writel_sync(priv, priv->viu.osd1_height - 1,
VPU_MAFBC_BOUNDING_BOX_Y_END_S0);
meson_rdma_writel_sync(priv, MESON_G12A_AFBCD_OUT_ADDR,
VPU_MAFBC_OUTPUT_BUF_ADDR_LOW_S0);
meson_rdma_writel_sync(priv, 0,
VPU_MAFBC_OUTPUT_BUF_ADDR_HIGH_S0);
meson_rdma_writel_sync(priv, priv->viu.osd1_width *
(meson_g12a_afbcd_bpp(priv->afbcd.format) / 8),
VPU_MAFBC_OUTPUT_BUF_STRIDE_S0);
return 0;
}
struct meson_afbcd_ops meson_afbcd_g12a_ops = {
.init = meson_g12a_afbcd_init,
.exit = meson_g12a_afbcd_exit,
.reset = meson_g12a_afbcd_reset,
.enable = meson_g12a_afbcd_enable,
.disable = meson_g12a_afbcd_disable,
.setup = meson_g12a_afbcd_setup,
.fmt_to_blk_mode = meson_g12a_afbcd_fmt_to_blk_mode,
.supported_fmt = meson_g12a_afbcd_supported_fmt,
};
| linux-master | drivers/gpu/drm/meson/meson_osd_afbcd.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Copyright (C) 2016 BayLibre, SAS
* Author: Neil Armstrong <[email protected]>
* Copyright (C) 2015 Amlogic, Inc. All rights reserved.
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/of_graph.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_simple_kms_helper.h>
#include <drm/drm_bridge.h>
#include <drm/drm_bridge_connector.h>
#include <drm/drm_device.h>
#include <drm/drm_probe_helper.h>
#include "meson_drv.h"
#include "meson_encoder_dsi.h"
#include "meson_registers.h"
#include "meson_venc.h"
#include "meson_vclk.h"
struct meson_encoder_dsi {
struct drm_encoder encoder;
struct drm_bridge bridge;
struct drm_bridge *next_bridge;
struct meson_drm *priv;
};
#define bridge_to_meson_encoder_dsi(x) \
container_of(x, struct meson_encoder_dsi, bridge)
static int meson_encoder_dsi_attach(struct drm_bridge *bridge,
enum drm_bridge_attach_flags flags)
{
struct meson_encoder_dsi *encoder_dsi = bridge_to_meson_encoder_dsi(bridge);
return drm_bridge_attach(bridge->encoder, encoder_dsi->next_bridge,
&encoder_dsi->bridge, flags);
}
static void meson_encoder_dsi_atomic_enable(struct drm_bridge *bridge,
struct drm_bridge_state *bridge_state)
{
struct meson_encoder_dsi *encoder_dsi = bridge_to_meson_encoder_dsi(bridge);
struct drm_atomic_state *state = bridge_state->base.state;
struct meson_drm *priv = encoder_dsi->priv;
struct drm_connector_state *conn_state;
struct drm_crtc_state *crtc_state;
struct drm_connector *connector;
connector = drm_atomic_get_new_connector_for_encoder(state, bridge->encoder);
if (WARN_ON(!connector))
return;
conn_state = drm_atomic_get_new_connector_state(state, connector);
if (WARN_ON(!conn_state))
return;
crtc_state = drm_atomic_get_new_crtc_state(state, conn_state->crtc);
if (WARN_ON(!crtc_state))
return;
/* ENCL clock setup is handled by CCF */
meson_venc_mipi_dsi_mode_set(priv, &crtc_state->adjusted_mode);
meson_encl_load_gamma(priv);
writel_relaxed(0, priv->io_base + _REG(ENCL_VIDEO_EN));
writel_bits_relaxed(ENCL_VIDEO_MODE_ADV_VFIFO_EN, ENCL_VIDEO_MODE_ADV_VFIFO_EN,
priv->io_base + _REG(ENCL_VIDEO_MODE_ADV));
writel_relaxed(0, priv->io_base + _REG(ENCL_TST_EN));
writel_bits_relaxed(BIT(0), 0, priv->io_base + _REG(VPP_WRAP_OSD1_MATRIX_EN_CTRL));
writel_relaxed(1, priv->io_base + _REG(ENCL_VIDEO_EN));
}
static void meson_encoder_dsi_atomic_disable(struct drm_bridge *bridge,
struct drm_bridge_state *bridge_state)
{
struct meson_encoder_dsi *meson_encoder_dsi =
bridge_to_meson_encoder_dsi(bridge);
struct meson_drm *priv = meson_encoder_dsi->priv;
writel_relaxed(0, priv->io_base + _REG(ENCL_VIDEO_EN));
writel_bits_relaxed(BIT(0), BIT(0), priv->io_base + _REG(VPP_WRAP_OSD1_MATRIX_EN_CTRL));
}
static const struct drm_bridge_funcs meson_encoder_dsi_bridge_funcs = {
.attach = meson_encoder_dsi_attach,
.atomic_enable = meson_encoder_dsi_atomic_enable,
.atomic_disable = meson_encoder_dsi_atomic_disable,
.atomic_duplicate_state = drm_atomic_helper_bridge_duplicate_state,
.atomic_destroy_state = drm_atomic_helper_bridge_destroy_state,
.atomic_reset = drm_atomic_helper_bridge_reset,
};
int meson_encoder_dsi_init(struct meson_drm *priv)
{
struct meson_encoder_dsi *meson_encoder_dsi;
struct device_node *remote;
int ret;
meson_encoder_dsi = devm_kzalloc(priv->dev, sizeof(*meson_encoder_dsi), GFP_KERNEL);
if (!meson_encoder_dsi)
return -ENOMEM;
/* DSI Transceiver Bridge */
remote = of_graph_get_remote_node(priv->dev->of_node, 2, 0);
if (!remote) {
dev_err(priv->dev, "DSI transceiver device is disabled");
return 0;
}
meson_encoder_dsi->next_bridge = of_drm_find_bridge(remote);
if (!meson_encoder_dsi->next_bridge) {
dev_dbg(priv->dev, "Failed to find DSI transceiver bridge\n");
return -EPROBE_DEFER;
}
/* DSI Encoder Bridge */
meson_encoder_dsi->bridge.funcs = &meson_encoder_dsi_bridge_funcs;
meson_encoder_dsi->bridge.of_node = priv->dev->of_node;
meson_encoder_dsi->bridge.type = DRM_MODE_CONNECTOR_DSI;
drm_bridge_add(&meson_encoder_dsi->bridge);
meson_encoder_dsi->priv = priv;
/* Encoder */
ret = drm_simple_encoder_init(priv->drm, &meson_encoder_dsi->encoder,
DRM_MODE_ENCODER_DSI);
if (ret) {
dev_err(priv->dev, "Failed to init DSI encoder: %d\n", ret);
return ret;
}
meson_encoder_dsi->encoder.possible_crtcs = BIT(0);
/* Attach DSI Encoder Bridge to Encoder */
ret = drm_bridge_attach(&meson_encoder_dsi->encoder, &meson_encoder_dsi->bridge, NULL, 0);
if (ret) {
dev_err(priv->dev, "Failed to attach bridge: %d\n", ret);
return ret;
}
/*
* We should have now in place:
* encoder->[dsi encoder bridge]->[dw-mipi-dsi bridge]->[panel bridge]->[panel]
*/
priv->encoders[MESON_ENC_DSI] = meson_encoder_dsi;
dev_dbg(priv->dev, "DSI encoder initialized\n");
return 0;
}
void meson_encoder_dsi_remove(struct meson_drm *priv)
{
struct meson_encoder_dsi *meson_encoder_dsi;
if (priv->encoders[MESON_ENC_DSI]) {
meson_encoder_dsi = priv->encoders[MESON_ENC_DSI];
drm_bridge_remove(&meson_encoder_dsi->bridge);
drm_bridge_remove(meson_encoder_dsi->next_bridge);
}
}
| linux-master | drivers/gpu/drm/meson/meson_encoder_dsi.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Copyright (C) 2016 BayLibre, SAS
* Author: Neil Armstrong <[email protected]>
* Copyright (C) 2015 Amlogic, Inc. All rights reserved.
* Copyright (C) 2014 Endless Mobile
*/
#include <linux/export.h>
#include "meson_drv.h"
#include "meson_registers.h"
#include "meson_vpp.h"
/**
* DOC: Video Post Processing
*
* VPP Handles all the Post Processing after the Scanout from the VIU
* We handle the following post processings :
*
* - Postblend, Blends the OSD1 only
* We exclude OSD2, VS1, VS1 and Preblend output
* - Vertical OSD Scaler for OSD1 only, we disable vertical scaler and
* use it only for interlace scanout
* - Intermediate FIFO with default Amlogic values
*
* What is missing :
*
* - Preblend for video overlay pre-scaling
* - OSD2 support for cursor framebuffer
* - Video pre-scaling before postblend
* - Full Vertical/Horizontal OSD scaling to support TV overscan
* - HDR conversion
*/
void meson_vpp_setup_mux(struct meson_drm *priv, unsigned int mux)
{
writel(mux, priv->io_base + _REG(VPU_VIU_VENC_MUX_CTRL));
}
static unsigned int vpp_filter_coefs_4point_bspline[] = {
0x15561500, 0x14561600, 0x13561700, 0x12561800,
0x11551a00, 0x11541b00, 0x10541c00, 0x0f541d00,
0x0f531e00, 0x0e531f00, 0x0d522100, 0x0c522200,
0x0b522300, 0x0b512400, 0x0a502600, 0x0a4f2700,
0x094e2900, 0x084e2a00, 0x084d2b00, 0x074c2c01,
0x074b2d01, 0x064a2f01, 0x06493001, 0x05483201,
0x05473301, 0x05463401, 0x04453601, 0x04433702,
0x04423802, 0x03413a02, 0x03403b02, 0x033f3c02,
0x033d3d03
};
static void meson_vpp_write_scaling_filter_coefs(struct meson_drm *priv,
const unsigned int *coefs,
bool is_horizontal)
{
int i;
writel_relaxed(is_horizontal ? VPP_SCALE_HORIZONTAL_COEF : 0,
priv->io_base + _REG(VPP_OSD_SCALE_COEF_IDX));
for (i = 0; i < 33; i++)
writel_relaxed(coefs[i],
priv->io_base + _REG(VPP_OSD_SCALE_COEF));
}
static const uint32_t vpp_filter_coefs_bicubic[] = {
0x00800000, 0x007f0100, 0xff7f0200, 0xfe7f0300,
0xfd7e0500, 0xfc7e0600, 0xfb7d0800, 0xfb7c0900,
0xfa7b0b00, 0xfa7a0dff, 0xf9790fff, 0xf97711ff,
0xf87613ff, 0xf87416fe, 0xf87218fe, 0xf8701afe,
0xf76f1dfd, 0xf76d1ffd, 0xf76b21fd, 0xf76824fd,
0xf76627fc, 0xf76429fc, 0xf7612cfc, 0xf75f2ffb,
0xf75d31fb, 0xf75a34fb, 0xf75837fa, 0xf7553afa,
0xf8523cfa, 0xf8503ff9, 0xf84d42f9, 0xf84a45f9,
0xf84848f8
};
static void meson_vpp_write_vd_scaling_filter_coefs(struct meson_drm *priv,
const unsigned int *coefs,
bool is_horizontal)
{
int i;
writel_relaxed(is_horizontal ? VPP_SCALE_HORIZONTAL_COEF : 0,
priv->io_base + _REG(VPP_SCALE_COEF_IDX));
for (i = 0; i < 33; i++)
writel_relaxed(coefs[i],
priv->io_base + _REG(VPP_SCALE_COEF));
}
void meson_vpp_init(struct meson_drm *priv)
{
/* set dummy data default YUV black */
if (meson_vpu_is_compatible(priv, VPU_COMPATIBLE_GXL))
writel_relaxed(0x108080, priv->io_base + _REG(VPP_DUMMY_DATA1));
else if (meson_vpu_is_compatible(priv, VPU_COMPATIBLE_GXM)) {
writel_bits_relaxed(0xff << 16, 0xff << 16,
priv->io_base + _REG(VIU_MISC_CTRL1));
writel_relaxed(VPP_PPS_DUMMY_DATA_MODE,
priv->io_base + _REG(VPP_DOLBY_CTRL));
writel_relaxed(0x1020080,
priv->io_base + _REG(VPP_DUMMY_DATA1));
writel_relaxed(0x42020,
priv->io_base + _REG(VPP_DUMMY_DATA));
} else if (meson_vpu_is_compatible(priv, VPU_COMPATIBLE_G12A))
writel_relaxed(0xf, priv->io_base + _REG(DOLBY_PATH_CTRL));
/* Initialize vpu fifo control registers */
if (meson_vpu_is_compatible(priv, VPU_COMPATIBLE_G12A))
writel_relaxed(VPP_OFIFO_SIZE_DEFAULT,
priv->io_base + _REG(VPP_OFIFO_SIZE));
else
writel_bits_relaxed(VPP_OFIFO_SIZE_MASK, 0x77f,
priv->io_base + _REG(VPP_OFIFO_SIZE));
writel_relaxed(VPP_POSTBLEND_HOLD_LINES(4) | VPP_PREBLEND_HOLD_LINES(4),
priv->io_base + _REG(VPP_HOLD_LINES));
if (!meson_vpu_is_compatible(priv, VPU_COMPATIBLE_G12A)) {
/* Turn off preblend */
writel_bits_relaxed(VPP_PREBLEND_ENABLE, 0,
priv->io_base + _REG(VPP_MISC));
/* Turn off POSTBLEND */
writel_bits_relaxed(VPP_POSTBLEND_ENABLE, 0,
priv->io_base + _REG(VPP_MISC));
/* Force all planes off */
writel_bits_relaxed(VPP_OSD1_POSTBLEND | VPP_OSD2_POSTBLEND |
VPP_VD1_POSTBLEND | VPP_VD2_POSTBLEND |
VPP_VD1_PREBLEND | VPP_VD2_PREBLEND, 0,
priv->io_base + _REG(VPP_MISC));
/* Setup default VD settings */
writel_relaxed(4096,
priv->io_base + _REG(VPP_PREBLEND_VD1_H_START_END));
writel_relaxed(4096,
priv->io_base + _REG(VPP_BLEND_VD2_H_START_END));
}
/* Disable Scalers */
writel_relaxed(0, priv->io_base + _REG(VPP_OSD_SC_CTRL0));
writel_relaxed(0, priv->io_base + _REG(VPP_OSD_VSC_CTRL0));
writel_relaxed(0, priv->io_base + _REG(VPP_OSD_HSC_CTRL0));
/* Set horizontal/vertical bank length and enable video scale out */
writel_relaxed(VPP_VSC_BANK_LENGTH(4) | VPP_HSC_BANK_LENGTH(4) |
VPP_SC_VD_EN_ENABLE,
priv->io_base + _REG(VPP_SC_MISC));
/* Enable minus black level for vadj1 */
writel_relaxed(VPP_MINUS_BLACK_LVL_VADJ1_ENABLE,
priv->io_base + _REG(VPP_VADJ_CTRL));
/* Write in the proper filter coefficients. */
meson_vpp_write_scaling_filter_coefs(priv,
vpp_filter_coefs_4point_bspline, false);
meson_vpp_write_scaling_filter_coefs(priv,
vpp_filter_coefs_4point_bspline, true);
/* Write the VD proper filter coefficients. */
meson_vpp_write_vd_scaling_filter_coefs(priv, vpp_filter_coefs_bicubic,
false);
meson_vpp_write_vd_scaling_filter_coefs(priv, vpp_filter_coefs_bicubic,
true);
}
| linux-master | drivers/gpu/drm/meson/meson_vpp.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* Copyright (C) 2018 BayLibre, SAS
* Author: Neil Armstrong <[email protected]>
* Copyright (C) 2015 Amlogic, Inc. All rights reserved.
*/
#include <linux/bitfield.h>
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_blend.h>
#include <drm/drm_device.h>
#include <drm/drm_fb_dma_helper.h>
#include <drm/drm_fourcc.h>
#include <drm/drm_framebuffer.h>
#include <drm/drm_gem_atomic_helper.h>
#include <drm/drm_gem_dma_helper.h>
#include "meson_overlay.h"
#include "meson_registers.h"
#include "meson_viu.h"
#include "meson_vpp.h"
/* VD1_IF0_GEN_REG */
#define VD_URGENT_CHROMA BIT(28)
#define VD_URGENT_LUMA BIT(27)
#define VD_HOLD_LINES(lines) FIELD_PREP(GENMASK(24, 19), lines)
#define VD_DEMUX_MODE_RGB BIT(16)
#define VD_BYTES_PER_PIXEL(val) FIELD_PREP(GENMASK(15, 14), val)
#define VD_CHRO_RPT_LASTL_CTRL BIT(6)
#define VD_LITTLE_ENDIAN BIT(4)
#define VD_SEPARATE_EN BIT(1)
#define VD_ENABLE BIT(0)
/* VD1_IF0_CANVAS0 */
#define CANVAS_ADDR2(addr) FIELD_PREP(GENMASK(23, 16), addr)
#define CANVAS_ADDR1(addr) FIELD_PREP(GENMASK(15, 8), addr)
#define CANVAS_ADDR0(addr) FIELD_PREP(GENMASK(7, 0), addr)
/* VD1_IF0_LUMA_X0 VD1_IF0_CHROMA_X0 */
#define VD_X_START(value) FIELD_PREP(GENMASK(14, 0), value)
#define VD_X_END(value) FIELD_PREP(GENMASK(30, 16), value)
/* VD1_IF0_LUMA_Y0 VD1_IF0_CHROMA_Y0 */
#define VD_Y_START(value) FIELD_PREP(GENMASK(12, 0), value)
#define VD_Y_END(value) FIELD_PREP(GENMASK(28, 16), value)
/* VD1_IF0_GEN_REG2 */
#define VD_COLOR_MAP(value) FIELD_PREP(GENMASK(1, 0), value)
/* VIU_VD1_FMT_CTRL */
#define VD_HORZ_Y_C_RATIO(value) FIELD_PREP(GENMASK(22, 21), value)
#define VD_HORZ_FMT_EN BIT(20)
#define VD_VERT_RPT_LINE0 BIT(16)
#define VD_VERT_INITIAL_PHASE(value) FIELD_PREP(GENMASK(11, 8), value)
#define VD_VERT_PHASE_STEP(value) FIELD_PREP(GENMASK(7, 1), value)
#define VD_VERT_FMT_EN BIT(0)
/* VPP_POSTBLEND_VD1_H_START_END */
#define VD_H_END(value) FIELD_PREP(GENMASK(11, 0), value)
#define VD_H_START(value) FIELD_PREP(GENMASK(27, 16), \
((value) & GENMASK(13, 0)))
/* VPP_POSTBLEND_VD1_V_START_END */
#define VD_V_END(value) FIELD_PREP(GENMASK(11, 0), value)
#define VD_V_START(value) FIELD_PREP(GENMASK(27, 16), value)
/* VPP_BLEND_VD2_V_START_END */
#define VD2_V_END(value) FIELD_PREP(GENMASK(11, 0), value)
#define VD2_V_START(value) FIELD_PREP(GENMASK(27, 16), value)
/* VIU_VD1_FMT_W */
#define VD_V_WIDTH(value) FIELD_PREP(GENMASK(11, 0), value)
#define VD_H_WIDTH(value) FIELD_PREP(GENMASK(27, 16), value)
/* VPP_HSC_REGION12_STARTP VPP_HSC_REGION34_STARTP */
#define VD_REGION24_START(value) FIELD_PREP(GENMASK(11, 0), value)
#define VD_REGION13_END(value) FIELD_PREP(GENMASK(27, 16), value)
/* AFBC_ENABLE */
#define AFBC_DEC_ENABLE BIT(8)
#define AFBC_FRM_START BIT(0)
/* AFBC_MODE */
#define AFBC_HORZ_SKIP_UV(value) FIELD_PREP(GENMASK(1, 0), value)
#define AFBC_VERT_SKIP_UV(value) FIELD_PREP(GENMASK(3, 2), value)
#define AFBC_HORZ_SKIP_Y(value) FIELD_PREP(GENMASK(5, 4), value)
#define AFBC_VERT_SKIP_Y(value) FIELD_PREP(GENMASK(7, 6), value)
#define AFBC_COMPBITS_YUV(value) FIELD_PREP(GENMASK(13, 8), value)
#define AFBC_COMPBITS_8BIT 0
#define AFBC_COMPBITS_10BIT (2 | (2 << 2) | (2 << 4))
#define AFBC_BURST_LEN(value) FIELD_PREP(GENMASK(15, 14), value)
#define AFBC_HOLD_LINE_NUM(value) FIELD_PREP(GENMASK(22, 16), value)
#define AFBC_MIF_URGENT(value) FIELD_PREP(GENMASK(25, 24), value)
#define AFBC_REV_MODE(value) FIELD_PREP(GENMASK(27, 26), value)
#define AFBC_BLK_MEM_MODE BIT(28)
#define AFBC_SCATTER_MODE BIT(29)
#define AFBC_SOFT_RESET BIT(31)
/* AFBC_SIZE_IN */
#define AFBC_HSIZE_IN(value) FIELD_PREP(GENMASK(28, 16), value)
#define AFBC_VSIZE_IN(value) FIELD_PREP(GENMASK(12, 0), value)
/* AFBC_DEC_DEF_COLOR */
#define AFBC_DEF_COLOR_Y(value) FIELD_PREP(GENMASK(29, 20), value)
#define AFBC_DEF_COLOR_U(value) FIELD_PREP(GENMASK(19, 10), value)
#define AFBC_DEF_COLOR_V(value) FIELD_PREP(GENMASK(9, 0), value)
/* AFBC_CONV_CTRL */
#define AFBC_CONV_LBUF_LEN(value) FIELD_PREP(GENMASK(11, 0), value)
/* AFBC_LBUF_DEPTH */
#define AFBC_DEC_LBUF_DEPTH(value) FIELD_PREP(GENMASK(27, 16), value)
#define AFBC_MIF_LBUF_DEPTH(value) FIELD_PREP(GENMASK(11, 0), value)
/* AFBC_OUT_XSCOPE/AFBC_SIZE_OUT */
#define AFBC_HSIZE_OUT(value) FIELD_PREP(GENMASK(28, 16), value)
#define AFBC_VSIZE_OUT(value) FIELD_PREP(GENMASK(12, 0), value)
#define AFBC_OUT_HORZ_BGN(value) FIELD_PREP(GENMASK(28, 16), value)
#define AFBC_OUT_HORZ_END(value) FIELD_PREP(GENMASK(12, 0), value)
/* AFBC_OUT_YSCOPE */
#define AFBC_OUT_VERT_BGN(value) FIELD_PREP(GENMASK(28, 16), value)
#define AFBC_OUT_VERT_END(value) FIELD_PREP(GENMASK(12, 0), value)
/* AFBC_VD_CFMT_CTRL */
#define AFBC_HORZ_RPT_PIXEL0 BIT(23)
#define AFBC_HORZ_Y_C_RATIO(value) FIELD_PREP(GENMASK(22, 21), value)
#define AFBC_HORZ_FMT_EN BIT(20)
#define AFBC_VERT_RPT_LINE0 BIT(16)
#define AFBC_VERT_INITIAL_PHASE(value) FIELD_PREP(GENMASK(11, 8), value)
#define AFBC_VERT_PHASE_STEP(value) FIELD_PREP(GENMASK(7, 1), value)
#define AFBC_VERT_FMT_EN BIT(0)
/* AFBC_VD_CFMT_W */
#define AFBC_VD_V_WIDTH(value) FIELD_PREP(GENMASK(11, 0), value)
#define AFBC_VD_H_WIDTH(value) FIELD_PREP(GENMASK(27, 16), value)
/* AFBC_MIF_HOR_SCOPE */
#define AFBC_MIF_BLK_BGN_H(value) FIELD_PREP(GENMASK(25, 16), value)
#define AFBC_MIF_BLK_END_H(value) FIELD_PREP(GENMASK(9, 0), value)
/* AFBC_MIF_VER_SCOPE */
#define AFBC_MIF_BLK_BGN_V(value) FIELD_PREP(GENMASK(27, 16), value)
#define AFBC_MIF_BLK_END_V(value) FIELD_PREP(GENMASK(11, 0), value)
/* AFBC_PIXEL_HOR_SCOPE */
#define AFBC_DEC_PIXEL_BGN_H(value) FIELD_PREP(GENMASK(28, 16), \
((value) & GENMASK(12, 0)))
#define AFBC_DEC_PIXEL_END_H(value) FIELD_PREP(GENMASK(12, 0), value)
/* AFBC_PIXEL_VER_SCOPE */
#define AFBC_DEC_PIXEL_BGN_V(value) FIELD_PREP(GENMASK(28, 16), value)
#define AFBC_DEC_PIXEL_END_V(value) FIELD_PREP(GENMASK(12, 0), value)
/* AFBC_VD_CFMT_H */
#define AFBC_VD_HEIGHT(value) FIELD_PREP(GENMASK(12, 0), value)
struct meson_overlay {
struct drm_plane base;
struct meson_drm *priv;
};
#define to_meson_overlay(x) container_of(x, struct meson_overlay, base)
#define FRAC_16_16(mult, div) (((mult) << 16) / (div))
static int meson_overlay_atomic_check(struct drm_plane *plane,
struct drm_atomic_state *state)
{
struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state,
plane);
struct drm_crtc_state *crtc_state;
if (!new_plane_state->crtc)
return 0;
crtc_state = drm_atomic_get_crtc_state(state,
new_plane_state->crtc);
if (IS_ERR(crtc_state))
return PTR_ERR(crtc_state);
return drm_atomic_helper_check_plane_state(new_plane_state,
crtc_state,
FRAC_16_16(1, 5),
FRAC_16_16(5, 1),
true, true);
}
/* Takes a fixed 16.16 number and converts it to integer. */
static inline int64_t fixed16_to_int(int64_t value)
{
return value >> 16;
}
static const uint8_t skip_tab[6] = {
0x24, 0x04, 0x68, 0x48, 0x28, 0x08,
};
static void meson_overlay_get_vertical_phase(unsigned int ratio_y, int *phase,
int *repeat, bool interlace)
{
int offset_in = 0;
int offset_out = 0;
int repeat_skip = 0;
if (!interlace && ratio_y > (1 << 18))
offset_out = (1 * ratio_y) >> 10;
while ((offset_in + (4 << 8)) <= offset_out) {
repeat_skip++;
offset_in += 4 << 8;
}
*phase = (offset_out - offset_in) >> 2;
if (*phase > 0x100)
repeat_skip++;
*phase = *phase & 0xff;
if (repeat_skip > 5)
repeat_skip = 5;
*repeat = skip_tab[repeat_skip];
}
static void meson_overlay_setup_scaler_params(struct meson_drm *priv,
struct drm_plane *plane,
bool interlace_mode)
{
struct drm_crtc_state *crtc_state = priv->crtc->state;
int video_top, video_left, video_width, video_height;
struct drm_plane_state *state = plane->state;
unsigned int vd_start_lines, vd_end_lines;
unsigned int hd_start_lines, hd_end_lines;
unsigned int crtc_height, crtc_width;
unsigned int vsc_startp, vsc_endp;
unsigned int hsc_startp, hsc_endp;
unsigned int crop_top, crop_left;
int vphase, vphase_repeat_skip;
unsigned int ratio_x, ratio_y;
int temp_height, temp_width;
unsigned int w_in, h_in;
int afbc_left, afbc_right;
int afbc_top_src, afbc_bottom_src;
int afbc_top, afbc_bottom;
int temp, start, end;
if (!crtc_state) {
DRM_ERROR("Invalid crtc_state\n");
return;
}
crtc_height = crtc_state->mode.vdisplay;
crtc_width = crtc_state->mode.hdisplay;
w_in = fixed16_to_int(state->src_w);
h_in = fixed16_to_int(state->src_h);
crop_top = fixed16_to_int(state->src_y);
crop_left = fixed16_to_int(state->src_x);
video_top = state->crtc_y;
video_left = state->crtc_x;
video_width = state->crtc_w;
video_height = state->crtc_h;
DRM_DEBUG("crtc_width %d crtc_height %d interlace %d\n",
crtc_width, crtc_height, interlace_mode);
DRM_DEBUG("w_in %d h_in %d crop_top %d crop_left %d\n",
w_in, h_in, crop_top, crop_left);
DRM_DEBUG("video top %d left %d width %d height %d\n",
video_top, video_left, video_width, video_height);
ratio_x = (w_in << 18) / video_width;
ratio_y = (h_in << 18) / video_height;
if (ratio_x * video_width < (w_in << 18))
ratio_x++;
DRM_DEBUG("ratio x 0x%x y 0x%x\n", ratio_x, ratio_y);
meson_overlay_get_vertical_phase(ratio_y, &vphase, &vphase_repeat_skip,
interlace_mode);
DRM_DEBUG("vphase 0x%x skip %d\n", vphase, vphase_repeat_skip);
/* Vertical */
start = video_top + video_height / 2 - ((h_in << 17) / ratio_y);
end = (h_in << 18) / ratio_y + start - 1;
if (video_top < 0 && start < 0)
vd_start_lines = (-(start) * ratio_y) >> 18;
else if (start < video_top)
vd_start_lines = ((video_top - start) * ratio_y) >> 18;
else
vd_start_lines = 0;
if (video_top < 0)
temp_height = min_t(unsigned int,
video_top + video_height - 1,
crtc_height - 1);
else
temp_height = min_t(unsigned int,
video_top + video_height - 1,
crtc_height - 1) - video_top + 1;
temp = vd_start_lines + (temp_height * ratio_y >> 18);
vd_end_lines = (temp <= (h_in - 1)) ? temp : (h_in - 1);
vd_start_lines += crop_left;
vd_end_lines += crop_left;
/*
* TOFIX: Input frames are handled and scaled like progressive frames,
* proper handling of interlaced field input frames need to be figured
* out using the proper framebuffer flags set by userspace.
*/
if (interlace_mode) {
start >>= 1;
end >>= 1;
}
vsc_startp = max_t(int, start,
max_t(int, 0, video_top));
vsc_endp = min_t(int, end,
min_t(int, crtc_height - 1,
video_top + video_height - 1));
DRM_DEBUG("vsc startp %d endp %d start_lines %d end_lines %d\n",
vsc_startp, vsc_endp, vd_start_lines, vd_end_lines);
afbc_top = round_down(vd_start_lines, 4);
afbc_bottom = round_up(vd_end_lines + 1, 4);
afbc_top_src = 0;
afbc_bottom_src = round_up(h_in + 1, 4);
DRM_DEBUG("afbc top %d (src %d) bottom %d (src %d)\n",
afbc_top, afbc_top_src, afbc_bottom, afbc_bottom_src);
/* Horizontal */
start = video_left + video_width / 2 - ((w_in << 17) / ratio_x);
end = (w_in << 18) / ratio_x + start - 1;
if (video_left < 0 && start < 0)
hd_start_lines = (-(start) * ratio_x) >> 18;
else if (start < video_left)
hd_start_lines = ((video_left - start) * ratio_x) >> 18;
else
hd_start_lines = 0;
if (video_left < 0)
temp_width = min_t(unsigned int,
video_left + video_width - 1,
crtc_width - 1);
else
temp_width = min_t(unsigned int,
video_left + video_width - 1,
crtc_width - 1) - video_left + 1;
temp = hd_start_lines + (temp_width * ratio_x >> 18);
hd_end_lines = (temp <= (w_in - 1)) ? temp : (w_in - 1);
priv->viu.vpp_line_in_length = hd_end_lines - hd_start_lines + 1;
hsc_startp = max_t(int, start, max_t(int, 0, video_left));
hsc_endp = min_t(int, end, min_t(int, crtc_width - 1,
video_left + video_width - 1));
hd_start_lines += crop_top;
hd_end_lines += crop_top;
DRM_DEBUG("hsc startp %d endp %d start_lines %d end_lines %d\n",
hsc_startp, hsc_endp, hd_start_lines, hd_end_lines);
if (hd_start_lines > 0 || (hd_end_lines < w_in)) {
afbc_left = 0;
afbc_right = round_up(w_in, 32);
} else {
afbc_left = round_down(hd_start_lines, 32);
afbc_right = round_up(hd_end_lines + 1, 32);
}
DRM_DEBUG("afbc left %d right %d\n", afbc_left, afbc_right);
priv->viu.vpp_vsc_start_phase_step = ratio_y << 6;
priv->viu.vpp_vsc_ini_phase = vphase << 8;
priv->viu.vpp_vsc_phase_ctrl = (1 << 13) | (4 << 8) |
vphase_repeat_skip;
priv->viu.vd1_if0_luma_x0 = VD_X_START(hd_start_lines) |
VD_X_END(hd_end_lines);
priv->viu.vd1_if0_chroma_x0 = VD_X_START(hd_start_lines >> 1) |
VD_X_END(hd_end_lines >> 1);
priv->viu.viu_vd1_fmt_w =
VD_H_WIDTH(hd_end_lines - hd_start_lines + 1) |
VD_V_WIDTH(hd_end_lines/2 - hd_start_lines/2 + 1);
priv->viu.vd1_afbc_vd_cfmt_w =
AFBC_VD_H_WIDTH(afbc_right - afbc_left) |
AFBC_VD_V_WIDTH(afbc_right / 2 - afbc_left / 2);
priv->viu.vd1_afbc_vd_cfmt_h =
AFBC_VD_HEIGHT((afbc_bottom - afbc_top) / 2);
priv->viu.vd1_afbc_mif_hor_scope = AFBC_MIF_BLK_BGN_H(afbc_left / 32) |
AFBC_MIF_BLK_END_H((afbc_right / 32) - 1);
priv->viu.vd1_afbc_mif_ver_scope = AFBC_MIF_BLK_BGN_V(afbc_top / 4) |
AFBC_MIF_BLK_END_H((afbc_bottom / 4) - 1);
priv->viu.vd1_afbc_size_out =
AFBC_HSIZE_OUT(afbc_right - afbc_left) |
AFBC_VSIZE_OUT(afbc_bottom - afbc_top);
priv->viu.vd1_afbc_pixel_hor_scope =
AFBC_DEC_PIXEL_BGN_H(hd_start_lines - afbc_left) |
AFBC_DEC_PIXEL_END_H(hd_end_lines - afbc_left);
priv->viu.vd1_afbc_pixel_ver_scope =
AFBC_DEC_PIXEL_BGN_V(vd_start_lines - afbc_top) |
AFBC_DEC_PIXEL_END_V(vd_end_lines - afbc_top);
priv->viu.vd1_afbc_size_in =
AFBC_HSIZE_IN(afbc_right - afbc_left) |
AFBC_VSIZE_IN(afbc_bottom_src - afbc_top_src);
priv->viu.vd1_if0_luma_y0 = VD_Y_START(vd_start_lines) |
VD_Y_END(vd_end_lines);
priv->viu.vd1_if0_chroma_y0 = VD_Y_START(vd_start_lines >> 1) |
VD_Y_END(vd_end_lines >> 1);
priv->viu.vpp_pic_in_height = h_in;
priv->viu.vpp_postblend_vd1_h_start_end = VD_H_START(hsc_startp) |
VD_H_END(hsc_endp);
priv->viu.vpp_blend_vd2_h_start_end = VD_H_START(hd_start_lines) |
VD_H_END(hd_end_lines);
priv->viu.vpp_hsc_region12_startp = VD_REGION13_END(0) |
VD_REGION24_START(hsc_startp);
priv->viu.vpp_hsc_region34_startp =
VD_REGION13_END(hsc_startp) |
VD_REGION24_START(hsc_endp - hsc_startp);
priv->viu.vpp_hsc_region4_endp = hsc_endp - hsc_startp;
priv->viu.vpp_hsc_start_phase_step = ratio_x << 6;
priv->viu.vpp_hsc_region1_phase_slope = 0;
priv->viu.vpp_hsc_region3_phase_slope = 0;
priv->viu.vpp_hsc_phase_ctrl = (1 << 21) | (4 << 16);
priv->viu.vpp_line_in_length = hd_end_lines - hd_start_lines + 1;
priv->viu.vpp_preblend_h_size = hd_end_lines - hd_start_lines + 1;
priv->viu.vpp_postblend_vd1_v_start_end = VD_V_START(vsc_startp) |
VD_V_END(vsc_endp);
priv->viu.vpp_blend_vd2_v_start_end =
VD2_V_START((vd_end_lines + 1) >> 1) |
VD2_V_END(vd_end_lines);
priv->viu.vpp_vsc_region12_startp = 0;
priv->viu.vpp_vsc_region34_startp =
VD_REGION13_END(vsc_endp - vsc_startp) |
VD_REGION24_START(vsc_endp - vsc_startp);
priv->viu.vpp_vsc_region4_endp = vsc_endp - vsc_startp;
priv->viu.vpp_vsc_start_phase_step = ratio_y << 6;
}
static void meson_overlay_atomic_update(struct drm_plane *plane,
struct drm_atomic_state *state)
{
struct meson_overlay *meson_overlay = to_meson_overlay(plane);
struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state,
plane);
struct drm_framebuffer *fb = new_state->fb;
struct meson_drm *priv = meson_overlay->priv;
struct drm_gem_dma_object *gem;
unsigned long flags;
bool interlace_mode;
DRM_DEBUG_DRIVER("\n");
interlace_mode = new_state->crtc->mode.flags & DRM_MODE_FLAG_INTERLACE;
spin_lock_irqsave(&priv->drm->event_lock, flags);
if ((fb->modifier & DRM_FORMAT_MOD_AMLOGIC_FBC(0, 0)) ==
DRM_FORMAT_MOD_AMLOGIC_FBC(0, 0)) {
priv->viu.vd1_afbc = true;
priv->viu.vd1_afbc_mode = AFBC_MIF_URGENT(3) |
AFBC_HOLD_LINE_NUM(8) |
AFBC_BURST_LEN(2);
if (fb->modifier & DRM_FORMAT_MOD_AMLOGIC_FBC(0,
AMLOGIC_FBC_OPTION_MEM_SAVING))
priv->viu.vd1_afbc_mode |= AFBC_BLK_MEM_MODE;
if ((fb->modifier & __fourcc_mod_amlogic_layout_mask) ==
AMLOGIC_FBC_LAYOUT_SCATTER)
priv->viu.vd1_afbc_mode |= AFBC_SCATTER_MODE;
priv->viu.vd1_afbc_en = 0x1600 | AFBC_DEC_ENABLE;
priv->viu.vd1_afbc_conv_ctrl = AFBC_CONV_LBUF_LEN(256);
priv->viu.vd1_afbc_dec_def_color = AFBC_DEF_COLOR_Y(1023);
/* 420: horizontal / 2, vertical / 4 */
priv->viu.vd1_afbc_vd_cfmt_ctrl = AFBC_HORZ_RPT_PIXEL0 |
AFBC_HORZ_Y_C_RATIO(1) |
AFBC_HORZ_FMT_EN |
AFBC_VERT_RPT_LINE0 |
AFBC_VERT_INITIAL_PHASE(12) |
AFBC_VERT_PHASE_STEP(8) |
AFBC_VERT_FMT_EN;
switch (fb->format->format) {
/* AFBC Only formats */
case DRM_FORMAT_YUV420_10BIT:
priv->viu.vd1_afbc_mode |=
AFBC_COMPBITS_YUV(AFBC_COMPBITS_10BIT);
priv->viu.vd1_afbc_dec_def_color |=
AFBC_DEF_COLOR_U(512) |
AFBC_DEF_COLOR_V(512);
break;
case DRM_FORMAT_YUV420_8BIT:
priv->viu.vd1_afbc_dec_def_color |=
AFBC_DEF_COLOR_U(128) |
AFBC_DEF_COLOR_V(128);
break;
}
priv->viu.vd1_if0_gen_reg = 0;
priv->viu.vd1_if0_canvas0 = 0;
priv->viu.viu_vd1_fmt_ctrl = 0;
} else {
priv->viu.vd1_afbc = false;
priv->viu.vd1_if0_gen_reg = VD_URGENT_CHROMA |
VD_URGENT_LUMA |
VD_HOLD_LINES(9) |
VD_CHRO_RPT_LASTL_CTRL |
VD_ENABLE;
}
/* Setup scaler params */
meson_overlay_setup_scaler_params(priv, plane, interlace_mode);
priv->viu.vd1_if0_repeat_loop = 0;
priv->viu.vd1_if0_luma0_rpt_pat = interlace_mode ? 8 : 0;
priv->viu.vd1_if0_chroma0_rpt_pat = interlace_mode ? 8 : 0;
priv->viu.vd1_range_map_y = 0;
priv->viu.vd1_range_map_cb = 0;
priv->viu.vd1_range_map_cr = 0;
/* Default values for RGB888/YUV444 */
priv->viu.vd1_if0_gen_reg2 = 0;
priv->viu.viu_vd1_fmt_ctrl = 0;
/* None will match for AFBC Only formats */
switch (fb->format->format) {
/* TOFIX DRM_FORMAT_RGB888 should be supported */
case DRM_FORMAT_YUYV:
priv->viu.vd1_if0_gen_reg |= VD_BYTES_PER_PIXEL(1);
priv->viu.vd1_if0_canvas0 =
CANVAS_ADDR2(priv->canvas_id_vd1_0) |
CANVAS_ADDR1(priv->canvas_id_vd1_0) |
CANVAS_ADDR0(priv->canvas_id_vd1_0);
priv->viu.viu_vd1_fmt_ctrl = VD_HORZ_Y_C_RATIO(1) | /* /2 */
VD_HORZ_FMT_EN |
VD_VERT_RPT_LINE0 |
VD_VERT_INITIAL_PHASE(12) |
VD_VERT_PHASE_STEP(16) | /* /2 */
VD_VERT_FMT_EN;
break;
case DRM_FORMAT_NV12:
case DRM_FORMAT_NV21:
priv->viu.vd1_if0_gen_reg |= VD_SEPARATE_EN;
priv->viu.vd1_if0_canvas0 =
CANVAS_ADDR2(priv->canvas_id_vd1_1) |
CANVAS_ADDR1(priv->canvas_id_vd1_1) |
CANVAS_ADDR0(priv->canvas_id_vd1_0);
if (fb->format->format == DRM_FORMAT_NV12)
priv->viu.vd1_if0_gen_reg2 = VD_COLOR_MAP(1);
else
priv->viu.vd1_if0_gen_reg2 = VD_COLOR_MAP(2);
priv->viu.viu_vd1_fmt_ctrl = VD_HORZ_Y_C_RATIO(1) | /* /2 */
VD_HORZ_FMT_EN |
VD_VERT_RPT_LINE0 |
VD_VERT_INITIAL_PHASE(12) |
VD_VERT_PHASE_STEP(8) | /* /4 */
VD_VERT_FMT_EN;
break;
case DRM_FORMAT_YUV444:
case DRM_FORMAT_YUV422:
case DRM_FORMAT_YUV420:
case DRM_FORMAT_YUV411:
case DRM_FORMAT_YUV410:
priv->viu.vd1_if0_gen_reg |= VD_SEPARATE_EN;
priv->viu.vd1_if0_canvas0 =
CANVAS_ADDR2(priv->canvas_id_vd1_2) |
CANVAS_ADDR1(priv->canvas_id_vd1_1) |
CANVAS_ADDR0(priv->canvas_id_vd1_0);
switch (fb->format->format) {
case DRM_FORMAT_YUV422:
priv->viu.viu_vd1_fmt_ctrl =
VD_HORZ_Y_C_RATIO(1) | /* /2 */
VD_HORZ_FMT_EN |
VD_VERT_RPT_LINE0 |
VD_VERT_INITIAL_PHASE(12) |
VD_VERT_PHASE_STEP(16) | /* /2 */
VD_VERT_FMT_EN;
break;
case DRM_FORMAT_YUV420:
priv->viu.viu_vd1_fmt_ctrl =
VD_HORZ_Y_C_RATIO(1) | /* /2 */
VD_HORZ_FMT_EN |
VD_VERT_RPT_LINE0 |
VD_VERT_INITIAL_PHASE(12) |
VD_VERT_PHASE_STEP(8) | /* /4 */
VD_VERT_FMT_EN;
break;
case DRM_FORMAT_YUV411:
priv->viu.viu_vd1_fmt_ctrl =
VD_HORZ_Y_C_RATIO(2) | /* /4 */
VD_HORZ_FMT_EN |
VD_VERT_RPT_LINE0 |
VD_VERT_INITIAL_PHASE(12) |
VD_VERT_PHASE_STEP(16) | /* /2 */
VD_VERT_FMT_EN;
break;
case DRM_FORMAT_YUV410:
priv->viu.viu_vd1_fmt_ctrl =
VD_HORZ_Y_C_RATIO(2) | /* /4 */
VD_HORZ_FMT_EN |
VD_VERT_RPT_LINE0 |
VD_VERT_INITIAL_PHASE(12) |
VD_VERT_PHASE_STEP(8) | /* /4 */
VD_VERT_FMT_EN;
break;
}
break;
}
/* Update Canvas with buffer address */
priv->viu.vd1_planes = fb->format->num_planes;
switch (priv->viu.vd1_planes) {
case 3:
gem = drm_fb_dma_get_gem_obj(fb, 2);
priv->viu.vd1_addr2 = gem->dma_addr + fb->offsets[2];
priv->viu.vd1_stride2 = fb->pitches[2];
priv->viu.vd1_height2 =
drm_format_info_plane_height(fb->format,
fb->height, 2);
DRM_DEBUG("plane 2 addr 0x%x stride %d height %d\n",
priv->viu.vd1_addr2,
priv->viu.vd1_stride2,
priv->viu.vd1_height2);
fallthrough;
case 2:
gem = drm_fb_dma_get_gem_obj(fb, 1);
priv->viu.vd1_addr1 = gem->dma_addr + fb->offsets[1];
priv->viu.vd1_stride1 = fb->pitches[1];
priv->viu.vd1_height1 =
drm_format_info_plane_height(fb->format,
fb->height, 1);
DRM_DEBUG("plane 1 addr 0x%x stride %d height %d\n",
priv->viu.vd1_addr1,
priv->viu.vd1_stride1,
priv->viu.vd1_height1);
fallthrough;
case 1:
gem = drm_fb_dma_get_gem_obj(fb, 0);
priv->viu.vd1_addr0 = gem->dma_addr + fb->offsets[0];
priv->viu.vd1_stride0 = fb->pitches[0];
priv->viu.vd1_height0 =
drm_format_info_plane_height(fb->format,
fb->height, 0);
DRM_DEBUG("plane 0 addr 0x%x stride %d height %d\n",
priv->viu.vd1_addr0,
priv->viu.vd1_stride0,
priv->viu.vd1_height0);
}
if (priv->viu.vd1_afbc) {
if (priv->viu.vd1_afbc_mode & AFBC_SCATTER_MODE) {
/*
* In Scatter mode, the header contains the physical
* body content layout, thus the body content
* size isn't needed.
*/
priv->viu.vd1_afbc_head_addr = priv->viu.vd1_addr0 >> 4;
priv->viu.vd1_afbc_body_addr = 0;
} else {
/* Default mode is 4k per superblock */
unsigned long block_size = 4096;
unsigned long body_size;
/* 8bit mem saving mode is 3072bytes per superblock */
if (priv->viu.vd1_afbc_mode & AFBC_BLK_MEM_MODE)
block_size = 3072;
body_size = (ALIGN(priv->viu.vd1_stride0, 64) / 64) *
(ALIGN(priv->viu.vd1_height0, 32) / 32) *
block_size;
priv->viu.vd1_afbc_body_addr = priv->viu.vd1_addr0 >> 4;
/* Header is after body content */
priv->viu.vd1_afbc_head_addr = (priv->viu.vd1_addr0 +
body_size) >> 4;
}
}
priv->viu.vd1_enabled = true;
spin_unlock_irqrestore(&priv->drm->event_lock, flags);
DRM_DEBUG_DRIVER("\n");
}
static void meson_overlay_atomic_disable(struct drm_plane *plane,
struct drm_atomic_state *state)
{
struct meson_overlay *meson_overlay = to_meson_overlay(plane);
struct meson_drm *priv = meson_overlay->priv;
DRM_DEBUG_DRIVER("\n");
priv->viu.vd1_enabled = false;
/* Disable VD1 */
if (meson_vpu_is_compatible(priv, VPU_COMPATIBLE_G12A)) {
writel_relaxed(0, priv->io_base + _REG(VD1_BLEND_SRC_CTRL));
writel_relaxed(0, priv->io_base + _REG(VD2_BLEND_SRC_CTRL));
writel_relaxed(0, priv->io_base + _REG(VD1_IF0_GEN_REG + 0x17b0));
writel_relaxed(0, priv->io_base + _REG(VD2_IF0_GEN_REG + 0x17b0));
} else
writel_bits_relaxed(VPP_VD1_POSTBLEND | VPP_VD1_PREBLEND, 0,
priv->io_base + _REG(VPP_MISC));
}
static const struct drm_plane_helper_funcs meson_overlay_helper_funcs = {
.atomic_check = meson_overlay_atomic_check,
.atomic_disable = meson_overlay_atomic_disable,
.atomic_update = meson_overlay_atomic_update,
};
static bool meson_overlay_format_mod_supported(struct drm_plane *plane,
u32 format, u64 modifier)
{
if (modifier == DRM_FORMAT_MOD_LINEAR &&
format != DRM_FORMAT_YUV420_8BIT &&
format != DRM_FORMAT_YUV420_10BIT)
return true;
if ((modifier & DRM_FORMAT_MOD_AMLOGIC_FBC(0, 0)) ==
DRM_FORMAT_MOD_AMLOGIC_FBC(0, 0)) {
unsigned int layout = modifier &
DRM_FORMAT_MOD_AMLOGIC_FBC(
__fourcc_mod_amlogic_layout_mask, 0);
unsigned int options =
(modifier >> __fourcc_mod_amlogic_options_shift) &
__fourcc_mod_amlogic_options_mask;
if (format != DRM_FORMAT_YUV420_8BIT &&
format != DRM_FORMAT_YUV420_10BIT) {
DRM_DEBUG_KMS("%llx invalid format 0x%08x\n",
modifier, format);
return false;
}
if (layout != AMLOGIC_FBC_LAYOUT_BASIC &&
layout != AMLOGIC_FBC_LAYOUT_SCATTER) {
DRM_DEBUG_KMS("%llx invalid layout %x\n",
modifier, layout);
return false;
}
if (options &&
options != AMLOGIC_FBC_OPTION_MEM_SAVING) {
DRM_DEBUG_KMS("%llx invalid layout %x\n",
modifier, layout);
return false;
}
return true;
}
DRM_DEBUG_KMS("invalid modifier %llx for format 0x%08x\n",
modifier, format);
return false;
}
static const struct drm_plane_funcs meson_overlay_funcs = {
.update_plane = drm_atomic_helper_update_plane,
.disable_plane = drm_atomic_helper_disable_plane,
.destroy = drm_plane_cleanup,
.reset = drm_atomic_helper_plane_reset,
.atomic_duplicate_state = drm_atomic_helper_plane_duplicate_state,
.atomic_destroy_state = drm_atomic_helper_plane_destroy_state,
.format_mod_supported = meson_overlay_format_mod_supported,
};
static const uint32_t supported_drm_formats[] = {
DRM_FORMAT_YUYV,
DRM_FORMAT_NV12,
DRM_FORMAT_NV21,
DRM_FORMAT_YUV444,
DRM_FORMAT_YUV422,
DRM_FORMAT_YUV420,
DRM_FORMAT_YUV411,
DRM_FORMAT_YUV410,
DRM_FORMAT_YUV420_8BIT, /* Amlogic FBC Only */
DRM_FORMAT_YUV420_10BIT, /* Amlogic FBC Only */
};
static const uint64_t format_modifiers[] = {
DRM_FORMAT_MOD_AMLOGIC_FBC(AMLOGIC_FBC_LAYOUT_SCATTER,
AMLOGIC_FBC_OPTION_MEM_SAVING),
DRM_FORMAT_MOD_AMLOGIC_FBC(AMLOGIC_FBC_LAYOUT_BASIC,
AMLOGIC_FBC_OPTION_MEM_SAVING),
DRM_FORMAT_MOD_AMLOGIC_FBC(AMLOGIC_FBC_LAYOUT_SCATTER, 0),
DRM_FORMAT_MOD_AMLOGIC_FBC(AMLOGIC_FBC_LAYOUT_BASIC, 0),
DRM_FORMAT_MOD_LINEAR,
DRM_FORMAT_MOD_INVALID,
};
int meson_overlay_create(struct meson_drm *priv)
{
struct meson_overlay *meson_overlay;
struct drm_plane *plane;
DRM_DEBUG_DRIVER("\n");
meson_overlay = devm_kzalloc(priv->drm->dev, sizeof(*meson_overlay),
GFP_KERNEL);
if (!meson_overlay)
return -ENOMEM;
meson_overlay->priv = priv;
plane = &meson_overlay->base;
drm_universal_plane_init(priv->drm, plane, 0xFF,
&meson_overlay_funcs,
supported_drm_formats,
ARRAY_SIZE(supported_drm_formats),
format_modifiers,
DRM_PLANE_TYPE_OVERLAY, "meson_overlay_plane");
drm_plane_helper_add(plane, &meson_overlay_helper_funcs);
/* For now, VD Overlay plane is always on the back */
drm_plane_create_zpos_immutable_property(plane, 0);
priv->overlay_plane = plane;
DRM_DEBUG_DRIVER("\n");
return 0;
}
| linux-master | drivers/gpu/drm/meson/meson_overlay.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2017 Linus Walleij <[email protected]>
* Parts of this file were based on sources as follows:
*
* Copyright (C) 2006-2008 Intel Corporation
* Copyright (C) 2007 Amos Lee <[email protected]>
* Copyright (C) 2007 Dave Airlie <[email protected]>
* Copyright (C) 2011 Texas Instruments
* Copyright (C) 2017 Eric Anholt
*/
#include <linux/clk.h>
#include <linux/dma-buf.h>
#include <linux/of_graph.h>
#include <linux/delay.h>
#include <drm/drm_fb_dma_helper.h>
#include <drm/drm_fourcc.h>
#include <drm/drm_framebuffer.h>
#include <drm/drm_gem_atomic_helper.h>
#include <drm/drm_gem_dma_helper.h>
#include <drm/drm_panel.h>
#include <drm/drm_vblank.h>
#include "tve200_drm.h"
irqreturn_t tve200_irq(int irq, void *data)
{
struct tve200_drm_dev_private *priv = data;
u32 stat;
u32 val;
stat = readl(priv->regs + TVE200_INT_STAT);
if (!stat)
return IRQ_NONE;
/*
* Vblank IRQ
*
* The hardware is a bit tilted: the line stays high after clearing
* the vblank IRQ, firing many more interrupts. We counter this
* by toggling the IRQ back and forth from firing at vblank and
* firing at start of active image, which works around the problem
* since those occur strictly in sequence, and we get two IRQs for each
* frame, one at start of Vblank (that we make call into the CRTC) and
* another one at the start of the image (that we discard).
*/
if (stat & TVE200_INT_V_STATUS) {
val = readl(priv->regs + TVE200_CTRL);
/* We have an actual start of vsync */
if (!(val & TVE200_VSTSTYPE_BITS)) {
drm_crtc_handle_vblank(&priv->pipe.crtc);
/* Toggle trigger to start of active image */
val |= TVE200_VSTSTYPE_VAI;
} else {
/* Toggle trigger back to start of vsync */
val &= ~TVE200_VSTSTYPE_BITS;
}
writel(val, priv->regs + TVE200_CTRL);
} else
dev_err(priv->drm->dev, "stray IRQ %08x\n", stat);
/* Clear the interrupt once done */
writel(stat, priv->regs + TVE200_INT_CLR);
return IRQ_HANDLED;
}
static int tve200_display_check(struct drm_simple_display_pipe *pipe,
struct drm_plane_state *pstate,
struct drm_crtc_state *cstate)
{
const struct drm_display_mode *mode = &cstate->mode;
struct drm_framebuffer *old_fb = pipe->plane.state->fb;
struct drm_framebuffer *fb = pstate->fb;
/*
* We support these specific resolutions and nothing else.
*/
if (!(mode->hdisplay == 352 && mode->vdisplay == 240) && /* SIF(525) */
!(mode->hdisplay == 352 && mode->vdisplay == 288) && /* CIF(625) */
!(mode->hdisplay == 640 && mode->vdisplay == 480) && /* VGA */
!(mode->hdisplay == 720 && mode->vdisplay == 480) && /* D1 */
!(mode->hdisplay == 720 && mode->vdisplay == 576)) { /* D1 */
DRM_DEBUG_KMS("unsupported display mode (%u x %u)\n",
mode->hdisplay, mode->vdisplay);
return -EINVAL;
}
if (fb) {
u32 offset = drm_fb_dma_get_gem_addr(fb, pstate, 0);
/* FB base address must be dword aligned. */
if (offset & 3) {
DRM_DEBUG_KMS("FB not 32-bit aligned\n");
return -EINVAL;
}
/*
* There's no pitch register, the mode's hdisplay
* controls this.
*/
if (fb->pitches[0] != mode->hdisplay * fb->format->cpp[0]) {
DRM_DEBUG_KMS("can't handle pitches\n");
return -EINVAL;
}
/*
* We can't change the FB format in a flicker-free
* manner (and only update it during CRTC enable).
*/
if (old_fb && old_fb->format != fb->format)
cstate->mode_changed = true;
}
return 0;
}
static void tve200_display_enable(struct drm_simple_display_pipe *pipe,
struct drm_crtc_state *cstate,
struct drm_plane_state *plane_state)
{
struct drm_crtc *crtc = &pipe->crtc;
struct drm_plane *plane = &pipe->plane;
struct drm_device *drm = crtc->dev;
struct tve200_drm_dev_private *priv = drm->dev_private;
const struct drm_display_mode *mode = &cstate->mode;
struct drm_framebuffer *fb = plane->state->fb;
struct drm_connector *connector = priv->connector;
u32 format = fb->format->format;
u32 ctrl1 = 0;
int retries;
clk_prepare_enable(priv->clk);
/* Reset the TVE200 and wait for it to come back online */
writel(TVE200_CTRL_4_RESET, priv->regs + TVE200_CTRL_4);
for (retries = 0; retries < 5; retries++) {
usleep_range(30000, 50000);
if (readl(priv->regs + TVE200_CTRL_4) & TVE200_CTRL_4_RESET)
continue;
else
break;
}
if (retries == 5 &&
readl(priv->regs + TVE200_CTRL_4) & TVE200_CTRL_4_RESET) {
dev_err(drm->dev, "can't get hardware out of reset\n");
return;
}
/* Function 1 */
ctrl1 |= TVE200_CTRL_CSMODE;
/* Interlace mode for CCIR656: parameterize? */
ctrl1 |= TVE200_CTRL_NONINTERLACE;
/* 32 words per burst */
ctrl1 |= TVE200_CTRL_BURST_32_WORDS;
/* 16 retries */
ctrl1 |= TVE200_CTRL_RETRYCNT_16;
/* NTSC mode: parametrize? */
ctrl1 |= TVE200_CTRL_NTSC;
/* Vsync IRQ at start of Vsync at first */
ctrl1 |= TVE200_VSTSTYPE_VSYNC;
if (connector->display_info.bus_flags &
DRM_BUS_FLAG_PIXDATA_DRIVE_NEGEDGE)
ctrl1 |= TVE200_CTRL_TVCLKP;
if ((mode->hdisplay == 352 && mode->vdisplay == 240) || /* SIF(525) */
(mode->hdisplay == 352 && mode->vdisplay == 288)) { /* CIF(625) */
ctrl1 |= TVE200_CTRL_IPRESOL_CIF;
dev_info(drm->dev, "CIF mode\n");
} else if (mode->hdisplay == 640 && mode->vdisplay == 480) {
ctrl1 |= TVE200_CTRL_IPRESOL_VGA;
dev_info(drm->dev, "VGA mode\n");
} else if ((mode->hdisplay == 720 && mode->vdisplay == 480) ||
(mode->hdisplay == 720 && mode->vdisplay == 576)) {
ctrl1 |= TVE200_CTRL_IPRESOL_D1;
dev_info(drm->dev, "D1 mode\n");
}
if (format & DRM_FORMAT_BIG_ENDIAN) {
ctrl1 |= TVE200_CTRL_BBBP;
format &= ~DRM_FORMAT_BIG_ENDIAN;
}
switch (format) {
case DRM_FORMAT_XRGB8888:
ctrl1 |= TVE200_IPDMOD_RGB888;
break;
case DRM_FORMAT_RGB565:
ctrl1 |= TVE200_IPDMOD_RGB565;
break;
case DRM_FORMAT_XRGB1555:
ctrl1 |= TVE200_IPDMOD_RGB555;
break;
case DRM_FORMAT_XBGR8888:
ctrl1 |= TVE200_IPDMOD_RGB888 | TVE200_BGR;
break;
case DRM_FORMAT_BGR565:
ctrl1 |= TVE200_IPDMOD_RGB565 | TVE200_BGR;
break;
case DRM_FORMAT_XBGR1555:
ctrl1 |= TVE200_IPDMOD_RGB555 | TVE200_BGR;
break;
case DRM_FORMAT_YUYV:
ctrl1 |= TVE200_IPDMOD_YUV422;
ctrl1 |= TVE200_CTRL_YCBCRODR_CR0Y1CB0Y0;
break;
case DRM_FORMAT_YVYU:
ctrl1 |= TVE200_IPDMOD_YUV422;
ctrl1 |= TVE200_CTRL_YCBCRODR_CB0Y1CR0Y0;
break;
case DRM_FORMAT_UYVY:
ctrl1 |= TVE200_IPDMOD_YUV422;
ctrl1 |= TVE200_CTRL_YCBCRODR_Y1CR0Y0CB0;
break;
case DRM_FORMAT_VYUY:
ctrl1 |= TVE200_IPDMOD_YUV422;
ctrl1 |= TVE200_CTRL_YCBCRODR_Y1CB0Y0CR0;
break;
case DRM_FORMAT_YUV420:
ctrl1 |= TVE200_CTRL_YUV420;
ctrl1 |= TVE200_IPDMOD_YUV420;
break;
default:
dev_err(drm->dev, "Unknown FB format 0x%08x\n",
fb->format->format);
break;
}
ctrl1 |= TVE200_TVEEN;
/* Turn it on */
writel(ctrl1, priv->regs + TVE200_CTRL);
drm_crtc_vblank_on(crtc);
}
static void tve200_display_disable(struct drm_simple_display_pipe *pipe)
{
struct drm_crtc *crtc = &pipe->crtc;
struct drm_device *drm = crtc->dev;
struct tve200_drm_dev_private *priv = drm->dev_private;
drm_crtc_vblank_off(crtc);
/* Disable put into reset and Power Down */
writel(0, priv->regs + TVE200_CTRL);
writel(TVE200_CTRL_4_RESET, priv->regs + TVE200_CTRL_4);
clk_disable_unprepare(priv->clk);
}
static void tve200_display_update(struct drm_simple_display_pipe *pipe,
struct drm_plane_state *old_pstate)
{
struct drm_crtc *crtc = &pipe->crtc;
struct drm_device *drm = crtc->dev;
struct tve200_drm_dev_private *priv = drm->dev_private;
struct drm_pending_vblank_event *event = crtc->state->event;
struct drm_plane *plane = &pipe->plane;
struct drm_plane_state *pstate = plane->state;
struct drm_framebuffer *fb = pstate->fb;
if (fb) {
/* For RGB, the Y component is used as base address */
writel(drm_fb_dma_get_gem_addr(fb, pstate, 0),
priv->regs + TVE200_Y_FRAME_BASE_ADDR);
/* For three plane YUV we need two more addresses */
if (fb->format->format == DRM_FORMAT_YUV420) {
writel(drm_fb_dma_get_gem_addr(fb, pstate, 1),
priv->regs + TVE200_U_FRAME_BASE_ADDR);
writel(drm_fb_dma_get_gem_addr(fb, pstate, 2),
priv->regs + TVE200_V_FRAME_BASE_ADDR);
}
}
if (event) {
crtc->state->event = NULL;
spin_lock_irq(&crtc->dev->event_lock);
if (crtc->state->active && drm_crtc_vblank_get(crtc) == 0)
drm_crtc_arm_vblank_event(crtc, event);
else
drm_crtc_send_vblank_event(crtc, event);
spin_unlock_irq(&crtc->dev->event_lock);
}
}
static int tve200_display_enable_vblank(struct drm_simple_display_pipe *pipe)
{
struct drm_crtc *crtc = &pipe->crtc;
struct drm_device *drm = crtc->dev;
struct tve200_drm_dev_private *priv = drm->dev_private;
/* Clear any IRQs and enable */
writel(0xFF, priv->regs + TVE200_INT_CLR);
writel(TVE200_INT_V_STATUS, priv->regs + TVE200_INT_EN);
return 0;
}
static void tve200_display_disable_vblank(struct drm_simple_display_pipe *pipe)
{
struct drm_crtc *crtc = &pipe->crtc;
struct drm_device *drm = crtc->dev;
struct tve200_drm_dev_private *priv = drm->dev_private;
writel(0, priv->regs + TVE200_INT_EN);
}
static const struct drm_simple_display_pipe_funcs tve200_display_funcs = {
.check = tve200_display_check,
.enable = tve200_display_enable,
.disable = tve200_display_disable,
.update = tve200_display_update,
.enable_vblank = tve200_display_enable_vblank,
.disable_vblank = tve200_display_disable_vblank,
};
int tve200_display_init(struct drm_device *drm)
{
struct tve200_drm_dev_private *priv = drm->dev_private;
int ret;
static const u32 formats[] = {
DRM_FORMAT_XRGB8888,
DRM_FORMAT_XBGR8888,
DRM_FORMAT_RGB565,
DRM_FORMAT_BGR565,
DRM_FORMAT_XRGB1555,
DRM_FORMAT_XBGR1555,
/*
* The controller actually supports any YCbCr ordering,
* for packed YCbCr. This just lists the orderings that
* DRM supports.
*/
DRM_FORMAT_YUYV,
DRM_FORMAT_YVYU,
DRM_FORMAT_UYVY,
DRM_FORMAT_VYUY,
/* This uses three planes */
DRM_FORMAT_YUV420,
};
ret = drm_simple_display_pipe_init(drm, &priv->pipe,
&tve200_display_funcs,
formats, ARRAY_SIZE(formats),
NULL,
priv->connector);
if (ret)
return ret;
return 0;
}
| linux-master | drivers/gpu/drm/tve200/tve200_display.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2017 Linus Walleij <[email protected]>
* Parts of this file were based on sources as follows:
*
* Copyright (C) 2006-2008 Intel Corporation
* Copyright (C) 2007 Amos Lee <[email protected]>
* Copyright (C) 2007 Dave Airlie <[email protected]>
* Copyright (C) 2011 Texas Instruments
* Copyright (C) 2017 Eric Anholt
*/
/**
* DOC: Faraday TV Encoder TVE200 DRM Driver
*
* The Faraday TV Encoder TVE200 is also known as the Gemini TV Interface
* Controller (TVC) and is found in the Gemini Chipset from Storlink
* Semiconductor (later Storm Semiconductor, later Cortina Systems)
* but also in the Grain Media GM8180 chipset. On the Gemini the module
* is connected to 8 data lines and a single clock line, comprising an
* 8-bit BT.656 interface.
*
* This is a very basic YUV display driver. The datasheet specifies that
* it supports the ITU BT.656 standard. It requires a 27 MHz clock which is
* the hallmark of any TV encoder supporting both PAL and NTSC.
*
* This driver exposes a standard KMS interface for this TV encoder.
*/
#include <linux/clk.h>
#include <linux/dma-buf.h>
#include <linux/irq.h>
#include <linux/io.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/shmem_fs.h>
#include <linux/slab.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_bridge.h>
#include <drm/drm_drv.h>
#include <drm/drm_fbdev_dma.h>
#include <drm/drm_gem_dma_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_module.h>
#include <drm/drm_of.h>
#include <drm/drm_panel.h>
#include <drm/drm_probe_helper.h>
#include <drm/drm_vblank.h>
#include "tve200_drm.h"
#define DRIVER_DESC "DRM module for Faraday TVE200"
static const struct drm_mode_config_funcs mode_config_funcs = {
.fb_create = drm_gem_fb_create,
.atomic_check = drm_atomic_helper_check,
.atomic_commit = drm_atomic_helper_commit,
};
static int tve200_modeset_init(struct drm_device *dev)
{
struct drm_mode_config *mode_config;
struct tve200_drm_dev_private *priv = dev->dev_private;
struct drm_panel *panel;
struct drm_bridge *bridge;
int ret;
drm_mode_config_init(dev);
mode_config = &dev->mode_config;
mode_config->funcs = &mode_config_funcs;
mode_config->min_width = 352;
mode_config->max_width = 720;
mode_config->min_height = 240;
mode_config->max_height = 576;
ret = drm_of_find_panel_or_bridge(dev->dev->of_node,
0, 0, &panel, &bridge);
if (ret && ret != -ENODEV)
return ret;
if (panel) {
bridge = drm_panel_bridge_add_typed(panel,
DRM_MODE_CONNECTOR_Unknown);
if (IS_ERR(bridge)) {
ret = PTR_ERR(bridge);
goto out_bridge;
}
} else {
/*
* TODO: when we are using a different bridge than a panel
* (such as a dumb VGA connector) we need to devise a different
* method to get the connector out of the bridge.
*/
dev_err(dev->dev, "the bridge is not a panel\n");
ret = -EINVAL;
goto out_bridge;
}
ret = tve200_display_init(dev);
if (ret) {
dev_err(dev->dev, "failed to init display\n");
goto out_bridge;
}
ret = drm_simple_display_pipe_attach_bridge(&priv->pipe,
bridge);
if (ret) {
dev_err(dev->dev, "failed to attach bridge\n");
goto out_bridge;
}
priv->panel = panel;
priv->connector = drm_panel_bridge_connector(bridge);
priv->bridge = bridge;
dev_info(dev->dev, "attached to panel %s\n",
dev_name(panel->dev));
ret = drm_vblank_init(dev, 1);
if (ret) {
dev_err(dev->dev, "failed to init vblank\n");
goto out_bridge;
}
drm_mode_config_reset(dev);
drm_kms_helper_poll_init(dev);
goto finish;
out_bridge:
if (panel)
drm_panel_bridge_remove(bridge);
drm_mode_config_cleanup(dev);
finish:
return ret;
}
DEFINE_DRM_GEM_DMA_FOPS(drm_fops);
static const struct drm_driver tve200_drm_driver = {
.driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_ATOMIC,
.ioctls = NULL,
.fops = &drm_fops,
.name = "tve200",
.desc = DRIVER_DESC,
.date = "20170703",
.major = 1,
.minor = 0,
.patchlevel = 0,
DRM_GEM_DMA_DRIVER_OPS,
};
static int tve200_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct tve200_drm_dev_private *priv;
struct drm_device *drm;
int irq;
int ret;
priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
drm = drm_dev_alloc(&tve200_drm_driver, dev);
if (IS_ERR(drm))
return PTR_ERR(drm);
platform_set_drvdata(pdev, drm);
priv->drm = drm;
drm->dev_private = priv;
/* Clock the silicon so we can access the registers */
priv->pclk = devm_clk_get(dev, "PCLK");
if (IS_ERR(priv->pclk)) {
dev_err(dev, "unable to get PCLK\n");
ret = PTR_ERR(priv->pclk);
goto dev_unref;
}
ret = clk_prepare_enable(priv->pclk);
if (ret) {
dev_err(dev, "failed to enable PCLK\n");
goto dev_unref;
}
/* This clock is for the pixels (27MHz) */
priv->clk = devm_clk_get(dev, "TVE");
if (IS_ERR(priv->clk)) {
dev_err(dev, "unable to get TVE clock\n");
ret = PTR_ERR(priv->clk);
goto clk_disable;
}
priv->regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(priv->regs)) {
dev_err(dev, "%s failed mmio\n", __func__);
ret = -EINVAL;
goto clk_disable;
}
irq = platform_get_irq(pdev, 0);
if (irq < 0) {
ret = irq;
goto clk_disable;
}
/* turn off interrupts before requesting the irq */
writel(0, priv->regs + TVE200_INT_EN);
ret = devm_request_irq(dev, irq, tve200_irq, 0, "tve200", priv);
if (ret) {
dev_err(dev, "failed to request irq %d\n", ret);
goto clk_disable;
}
ret = tve200_modeset_init(drm);
if (ret)
goto clk_disable;
ret = drm_dev_register(drm, 0);
if (ret < 0)
goto clk_disable;
/*
* Passing in 16 here will make the RGB565 mode the default
* Passing in 32 will use XRGB8888 mode
*/
drm_fbdev_dma_setup(drm, 16);
return 0;
clk_disable:
clk_disable_unprepare(priv->pclk);
dev_unref:
drm_dev_put(drm);
return ret;
}
static void tve200_remove(struct platform_device *pdev)
{
struct drm_device *drm = platform_get_drvdata(pdev);
struct tve200_drm_dev_private *priv = drm->dev_private;
drm_dev_unregister(drm);
if (priv->panel)
drm_panel_bridge_remove(priv->bridge);
drm_mode_config_cleanup(drm);
clk_disable_unprepare(priv->pclk);
drm_dev_put(drm);
}
static const struct of_device_id tve200_of_match[] = {
{
.compatible = "faraday,tve200",
},
{},
};
static struct platform_driver tve200_driver = {
.driver = {
.name = "tve200",
.of_match_table = tve200_of_match,
},
.probe = tve200_probe,
.remove_new = tve200_remove,
};
drm_module_platform_driver(tve200_driver);
MODULE_DESCRIPTION(DRIVER_DESC);
MODULE_AUTHOR("Linus Walleij <[email protected]>");
MODULE_LICENSE("GPL");
| linux-master | drivers/gpu/drm/tve200/tve200_drv.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright © 2019-2020 Intel Corporation
*/
#include <linux/clk.h>
#include <linux/delay.h>
#include <linux/of.h>
#include <linux/of_graph.h>
#include <linux/mfd/syscon.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_bridge.h>
#include <drm/drm_bridge_connector.h>
#include <drm/drm_mipi_dsi.h>
#include <drm/drm_simple_kms_helper.h>
#include <drm/drm_print.h>
#include <drm/drm_probe_helper.h>
#include "kmb_dsi.h"
#include "kmb_regs.h"
static struct mipi_dsi_host *dsi_host;
static struct mipi_dsi_device *dsi_device;
static struct drm_bridge *adv_bridge;
/* Default setting is 1080p, 4 lanes */
#define IMG_HEIGHT_LINES 1080
#define IMG_WIDTH_PX 1920
#define MIPI_TX_ACTIVE_LANES 4
static struct mipi_tx_frame_section_cfg mipi_tx_frame0_sect_cfg = {
.width_pixels = IMG_WIDTH_PX,
.height_lines = IMG_HEIGHT_LINES,
.data_type = DSI_LP_DT_PPS_RGB888_24B,
.data_mode = MIPI_DATA_MODE1,
.dma_packed = 0
};
static struct mipi_tx_frame_cfg mipitx_frame0_cfg = {
.sections[0] = &mipi_tx_frame0_sect_cfg,
.sections[1] = NULL,
.sections[2] = NULL,
.sections[3] = NULL,
.vsync_width = 5,
.v_backporch = 36,
.v_frontporch = 4,
.hsync_width = 44,
.h_backporch = 148,
.h_frontporch = 88
};
static const struct mipi_tx_dsi_cfg mipitx_dsi_cfg = {
.hfp_blank_en = 0,
.eotp_en = 0,
.lpm_last_vfp_line = 0,
.lpm_first_vsa_line = 0,
.sync_pulse_eventn = DSI_VIDEO_MODE_NO_BURST_EVENT,
.hfp_blanking = SEND_BLANK_PACKET,
.hbp_blanking = SEND_BLANK_PACKET,
.hsa_blanking = SEND_BLANK_PACKET,
.v_blanking = SEND_BLANK_PACKET,
};
static struct mipi_ctrl_cfg mipi_tx_init_cfg = {
.active_lanes = MIPI_TX_ACTIVE_LANES,
.lane_rate_mbps = MIPI_TX_LANE_DATA_RATE_MBPS,
.ref_clk_khz = MIPI_TX_REF_CLK_KHZ,
.cfg_clk_khz = MIPI_TX_CFG_CLK_KHZ,
.tx_ctrl_cfg = {
.frames[0] = &mipitx_frame0_cfg,
.frames[1] = NULL,
.frames[2] = NULL,
.frames[3] = NULL,
.tx_dsi_cfg = &mipitx_dsi_cfg,
.line_sync_pkt_en = 0,
.line_counter_active = 0,
.frame_counter_active = 0,
.tx_always_use_hact = 1,
.tx_hact_wait_stop = 1,
}
};
struct mipi_hs_freq_range_cfg {
u16 default_bit_rate_mbps;
u8 hsfreqrange_code;
};
struct vco_params {
u32 freq;
u32 range;
u32 divider;
};
static const struct vco_params vco_table[] = {
{52, 0x3f, 8},
{80, 0x39, 8},
{105, 0x2f, 4},
{160, 0x29, 4},
{210, 0x1f, 2},
{320, 0x19, 2},
{420, 0x0f, 1},
{630, 0x09, 1},
{1100, 0x03, 1},
{0xffff, 0x01, 1},
};
static const struct mipi_hs_freq_range_cfg
mipi_hs_freq_range[MIPI_DPHY_DEFAULT_BIT_RATES] = {
{.default_bit_rate_mbps = 80, .hsfreqrange_code = 0x00},
{.default_bit_rate_mbps = 90, .hsfreqrange_code = 0x10},
{.default_bit_rate_mbps = 100, .hsfreqrange_code = 0x20},
{.default_bit_rate_mbps = 110, .hsfreqrange_code = 0x30},
{.default_bit_rate_mbps = 120, .hsfreqrange_code = 0x01},
{.default_bit_rate_mbps = 130, .hsfreqrange_code = 0x11},
{.default_bit_rate_mbps = 140, .hsfreqrange_code = 0x21},
{.default_bit_rate_mbps = 150, .hsfreqrange_code = 0x31},
{.default_bit_rate_mbps = 160, .hsfreqrange_code = 0x02},
{.default_bit_rate_mbps = 170, .hsfreqrange_code = 0x12},
{.default_bit_rate_mbps = 180, .hsfreqrange_code = 0x22},
{.default_bit_rate_mbps = 190, .hsfreqrange_code = 0x32},
{.default_bit_rate_mbps = 205, .hsfreqrange_code = 0x03},
{.default_bit_rate_mbps = 220, .hsfreqrange_code = 0x13},
{.default_bit_rate_mbps = 235, .hsfreqrange_code = 0x23},
{.default_bit_rate_mbps = 250, .hsfreqrange_code = 0x33},
{.default_bit_rate_mbps = 275, .hsfreqrange_code = 0x04},
{.default_bit_rate_mbps = 300, .hsfreqrange_code = 0x14},
{.default_bit_rate_mbps = 325, .hsfreqrange_code = 0x25},
{.default_bit_rate_mbps = 350, .hsfreqrange_code = 0x35},
{.default_bit_rate_mbps = 400, .hsfreqrange_code = 0x05},
{.default_bit_rate_mbps = 450, .hsfreqrange_code = 0x16},
{.default_bit_rate_mbps = 500, .hsfreqrange_code = 0x26},
{.default_bit_rate_mbps = 550, .hsfreqrange_code = 0x37},
{.default_bit_rate_mbps = 600, .hsfreqrange_code = 0x07},
{.default_bit_rate_mbps = 650, .hsfreqrange_code = 0x18},
{.default_bit_rate_mbps = 700, .hsfreqrange_code = 0x28},
{.default_bit_rate_mbps = 750, .hsfreqrange_code = 0x39},
{.default_bit_rate_mbps = 800, .hsfreqrange_code = 0x09},
{.default_bit_rate_mbps = 850, .hsfreqrange_code = 0x19},
{.default_bit_rate_mbps = 900, .hsfreqrange_code = 0x29},
{.default_bit_rate_mbps = 1000, .hsfreqrange_code = 0x0A},
{.default_bit_rate_mbps = 1050, .hsfreqrange_code = 0x1A},
{.default_bit_rate_mbps = 1100, .hsfreqrange_code = 0x2A},
{.default_bit_rate_mbps = 1150, .hsfreqrange_code = 0x3B},
{.default_bit_rate_mbps = 1200, .hsfreqrange_code = 0x0B},
{.default_bit_rate_mbps = 1250, .hsfreqrange_code = 0x1B},
{.default_bit_rate_mbps = 1300, .hsfreqrange_code = 0x2B},
{.default_bit_rate_mbps = 1350, .hsfreqrange_code = 0x3C},
{.default_bit_rate_mbps = 1400, .hsfreqrange_code = 0x0C},
{.default_bit_rate_mbps = 1450, .hsfreqrange_code = 0x1C},
{.default_bit_rate_mbps = 1500, .hsfreqrange_code = 0x2C},
{.default_bit_rate_mbps = 1550, .hsfreqrange_code = 0x3D},
{.default_bit_rate_mbps = 1600, .hsfreqrange_code = 0x0D},
{.default_bit_rate_mbps = 1650, .hsfreqrange_code = 0x1D},
{.default_bit_rate_mbps = 1700, .hsfreqrange_code = 0x2E},
{.default_bit_rate_mbps = 1750, .hsfreqrange_code = 0x3E},
{.default_bit_rate_mbps = 1800, .hsfreqrange_code = 0x0E},
{.default_bit_rate_mbps = 1850, .hsfreqrange_code = 0x1E},
{.default_bit_rate_mbps = 1900, .hsfreqrange_code = 0x2F},
{.default_bit_rate_mbps = 1950, .hsfreqrange_code = 0x3F},
{.default_bit_rate_mbps = 2000, .hsfreqrange_code = 0x0F},
{.default_bit_rate_mbps = 2050, .hsfreqrange_code = 0x40},
{.default_bit_rate_mbps = 2100, .hsfreqrange_code = 0x41},
{.default_bit_rate_mbps = 2150, .hsfreqrange_code = 0x42},
{.default_bit_rate_mbps = 2200, .hsfreqrange_code = 0x43},
{.default_bit_rate_mbps = 2250, .hsfreqrange_code = 0x44},
{.default_bit_rate_mbps = 2300, .hsfreqrange_code = 0x45},
{.default_bit_rate_mbps = 2350, .hsfreqrange_code = 0x46},
{.default_bit_rate_mbps = 2400, .hsfreqrange_code = 0x47},
{.default_bit_rate_mbps = 2450, .hsfreqrange_code = 0x48},
{.default_bit_rate_mbps = 2500, .hsfreqrange_code = 0x49}
};
static void kmb_dsi_clk_disable(struct kmb_dsi *kmb_dsi)
{
clk_disable_unprepare(kmb_dsi->clk_mipi);
clk_disable_unprepare(kmb_dsi->clk_mipi_ecfg);
clk_disable_unprepare(kmb_dsi->clk_mipi_cfg);
}
void kmb_dsi_host_unregister(struct kmb_dsi *kmb_dsi)
{
kmb_dsi_clk_disable(kmb_dsi);
mipi_dsi_host_unregister(kmb_dsi->host);
}
/*
* This DSI can only be paired with bridges that do config through i2c
* which is ADV 7535 in the KMB EVM
*/
static ssize_t kmb_dsi_host_transfer(struct mipi_dsi_host *host,
const struct mipi_dsi_msg *msg)
{
return 0;
}
static int kmb_dsi_host_attach(struct mipi_dsi_host *host,
struct mipi_dsi_device *dev)
{
return 0;
}
static int kmb_dsi_host_detach(struct mipi_dsi_host *host,
struct mipi_dsi_device *dev)
{
return 0;
}
static const struct mipi_dsi_host_ops kmb_dsi_host_ops = {
.attach = kmb_dsi_host_attach,
.detach = kmb_dsi_host_detach,
.transfer = kmb_dsi_host_transfer,
};
int kmb_dsi_host_bridge_init(struct device *dev)
{
struct device_node *encoder_node, *dsi_out;
/* Create and register MIPI DSI host */
if (!dsi_host) {
dsi_host = kzalloc(sizeof(*dsi_host), GFP_KERNEL);
if (!dsi_host)
return -ENOMEM;
dsi_host->ops = &kmb_dsi_host_ops;
if (!dsi_device) {
dsi_device = kzalloc(sizeof(*dsi_device), GFP_KERNEL);
if (!dsi_device) {
kfree(dsi_host);
return -ENOMEM;
}
}
dsi_host->dev = dev;
mipi_dsi_host_register(dsi_host);
}
/* Find ADV7535 node and initialize it */
dsi_out = of_graph_get_endpoint_by_regs(dev->of_node, 0, 1);
if (!dsi_out) {
DRM_ERROR("Failed to get dsi_out node info from DT\n");
return -EINVAL;
}
encoder_node = of_graph_get_remote_port_parent(dsi_out);
if (!encoder_node) {
of_node_put(dsi_out);
DRM_ERROR("Failed to get bridge info from DT\n");
return -EINVAL;
}
/* Locate drm bridge from the hdmi encoder DT node */
adv_bridge = of_drm_find_bridge(encoder_node);
of_node_put(dsi_out);
of_node_put(encoder_node);
if (!adv_bridge) {
DRM_DEBUG("Wait for external bridge driver DT\n");
return -EPROBE_DEFER;
}
return 0;
}
static u32 mipi_get_datatype_params(u32 data_type, u32 data_mode,
struct mipi_data_type_params *params)
{
struct mipi_data_type_params data_type_param;
switch (data_type) {
case DSI_LP_DT_PPS_YCBCR420_12B:
data_type_param.size_constraint_pixels = 2;
data_type_param.size_constraint_bytes = 3;
switch (data_mode) {
/* Case 0 not supported according to MDK */
case 1:
case 2:
case 3:
data_type_param.pixels_per_pclk = 2;
data_type_param.bits_per_pclk = 24;
break;
default:
DRM_ERROR("DSI: Invalid data_mode %d\n", data_mode);
return -EINVAL;
}
break;
case DSI_LP_DT_PPS_YCBCR422_16B:
data_type_param.size_constraint_pixels = 2;
data_type_param.size_constraint_bytes = 4;
switch (data_mode) {
/* Case 0 and 1 not supported according
* to MDK
*/
case 2:
data_type_param.pixels_per_pclk = 1;
data_type_param.bits_per_pclk = 16;
break;
case 3:
data_type_param.pixels_per_pclk = 2;
data_type_param.bits_per_pclk = 32;
break;
default:
DRM_ERROR("DSI: Invalid data_mode %d\n", data_mode);
return -EINVAL;
}
break;
case DSI_LP_DT_LPPS_YCBCR422_20B:
case DSI_LP_DT_PPS_YCBCR422_24B:
data_type_param.size_constraint_pixels = 2;
data_type_param.size_constraint_bytes = 6;
switch (data_mode) {
/* Case 0 not supported according to MDK */
case 1:
case 2:
case 3:
data_type_param.pixels_per_pclk = 1;
data_type_param.bits_per_pclk = 24;
break;
default:
DRM_ERROR("DSI: Invalid data_mode %d\n", data_mode);
return -EINVAL;
}
break;
case DSI_LP_DT_PPS_RGB565_16B:
data_type_param.size_constraint_pixels = 1;
data_type_param.size_constraint_bytes = 2;
switch (data_mode) {
case 0:
case 1:
data_type_param.pixels_per_pclk = 1;
data_type_param.bits_per_pclk = 16;
break;
case 2:
case 3:
data_type_param.pixels_per_pclk = 2;
data_type_param.bits_per_pclk = 32;
break;
default:
DRM_ERROR("DSI: Invalid data_mode %d\n", data_mode);
return -EINVAL;
}
break;
case DSI_LP_DT_PPS_RGB666_18B:
data_type_param.size_constraint_pixels = 4;
data_type_param.size_constraint_bytes = 9;
data_type_param.bits_per_pclk = 18;
data_type_param.pixels_per_pclk = 1;
break;
case DSI_LP_DT_LPPS_RGB666_18B:
case DSI_LP_DT_PPS_RGB888_24B:
data_type_param.size_constraint_pixels = 1;
data_type_param.size_constraint_bytes = 3;
data_type_param.bits_per_pclk = 24;
data_type_param.pixels_per_pclk = 1;
break;
case DSI_LP_DT_PPS_RGB101010_30B:
data_type_param.size_constraint_pixels = 4;
data_type_param.size_constraint_bytes = 15;
data_type_param.bits_per_pclk = 30;
data_type_param.pixels_per_pclk = 1;
break;
default:
DRM_ERROR("DSI: Invalid data_type %d\n", data_type);
return -EINVAL;
}
*params = data_type_param;
return 0;
}
static u32 compute_wc(u32 width_px, u8 size_constr_p, u8 size_constr_b)
{
/* Calculate the word count for each long packet */
return (((width_px / size_constr_p) * size_constr_b) & 0xffff);
}
static u32 compute_unpacked_bytes(u32 wc, u8 bits_per_pclk)
{
/* Number of PCLK cycles needed to transfer a line
* with each PCLK cycle, 4 Bytes are sent through the PPL module
*/
return ((wc * 8) / bits_per_pclk) * 4;
}
static u32 mipi_tx_fg_section_cfg_regs(struct kmb_dsi *kmb_dsi,
u8 frame_id, u8 section,
u32 height_lines, u32 unpacked_bytes,
struct mipi_tx_frame_sect_phcfg *ph_cfg)
{
u32 cfg = 0;
u32 ctrl_no = MIPI_CTRL6;
u32 reg_adr;
/* Frame section packet header */
/* Word count bits [15:0] */
cfg = (ph_cfg->wc & MIPI_TX_SECT_WC_MASK) << 0;
/* Data type (bits [21:16]) */
cfg |= ((ph_cfg->data_type & MIPI_TX_SECT_DT_MASK)
<< MIPI_TX_SECT_DT_SHIFT);
/* Virtual channel (bits [23:22]) */
cfg |= ((ph_cfg->vchannel & MIPI_TX_SECT_VC_MASK)
<< MIPI_TX_SECT_VC_SHIFT);
/* Data mode (bits [24:25]) */
cfg |= ((ph_cfg->data_mode & MIPI_TX_SECT_DM_MASK)
<< MIPI_TX_SECT_DM_SHIFT);
if (ph_cfg->dma_packed)
cfg |= MIPI_TX_SECT_DMA_PACKED;
dev_dbg(kmb_dsi->dev,
"ctrl=%d frame_id=%d section=%d cfg=%x packed=%d\n",
ctrl_no, frame_id, section, cfg, ph_cfg->dma_packed);
kmb_write_mipi(kmb_dsi,
(MIPI_TXm_HS_FGn_SECTo_PH(ctrl_no, frame_id, section)),
cfg);
/* Unpacked bytes */
/* There are 4 frame generators and each fg has 4 sections
* There are 2 registers for unpacked bytes (# bytes each
* section occupies in memory)
* REG_UNPACKED_BYTES0: [15:0]-BYTES0, [31:16]-BYTES1
* REG_UNPACKED_BYTES1: [15:0]-BYTES2, [31:16]-BYTES3
*/
reg_adr =
MIPI_TXm_HS_FGn_SECT_UNPACKED_BYTES0(ctrl_no,
frame_id) + (section / 2) * 4;
kmb_write_bits_mipi(kmb_dsi, reg_adr, (section % 2) * 16, 16,
unpacked_bytes);
dev_dbg(kmb_dsi->dev,
"unpacked_bytes = %d, wordcount = %d\n", unpacked_bytes,
ph_cfg->wc);
/* Line config */
reg_adr = MIPI_TXm_HS_FGn_SECTo_LINE_CFG(ctrl_no, frame_id, section);
kmb_write_mipi(kmb_dsi, reg_adr, height_lines);
return 0;
}
static u32 mipi_tx_fg_section_cfg(struct kmb_dsi *kmb_dsi,
u8 frame_id, u8 section,
struct mipi_tx_frame_section_cfg *frame_scfg,
u32 *bits_per_pclk, u32 *wc)
{
u32 ret = 0;
u32 unpacked_bytes;
struct mipi_data_type_params data_type_parameters;
struct mipi_tx_frame_sect_phcfg ph_cfg;
ret = mipi_get_datatype_params(frame_scfg->data_type,
frame_scfg->data_mode,
&data_type_parameters);
if (ret)
return ret;
/* Packet width has to be a multiple of the minimum packet width
* (in pixels) set for each data type
*/
if (frame_scfg->width_pixels %
data_type_parameters.size_constraint_pixels != 0)
return -EINVAL;
*wc = compute_wc(frame_scfg->width_pixels,
data_type_parameters.size_constraint_pixels,
data_type_parameters.size_constraint_bytes);
unpacked_bytes = compute_unpacked_bytes(*wc,
data_type_parameters.bits_per_pclk);
ph_cfg.wc = *wc;
ph_cfg.data_mode = frame_scfg->data_mode;
ph_cfg.data_type = frame_scfg->data_type;
ph_cfg.dma_packed = frame_scfg->dma_packed;
ph_cfg.vchannel = frame_id;
mipi_tx_fg_section_cfg_regs(kmb_dsi, frame_id, section,
frame_scfg->height_lines,
unpacked_bytes, &ph_cfg);
/* Caller needs bits_per_clk for additional caluclations */
*bits_per_pclk = data_type_parameters.bits_per_pclk;
return 0;
}
#define CLK_DIFF_LOW 50
#define CLK_DIFF_HI 60
#define SYSCLK_500 500
static void mipi_tx_fg_cfg_regs(struct kmb_dsi *kmb_dsi, u8 frame_gen,
struct mipi_tx_frame_timing_cfg *fg_cfg)
{
u32 sysclk;
u32 ppl_llp_ratio;
u32 ctrl_no = MIPI_CTRL6, reg_adr, val, offset;
/* 500 Mhz system clock minus 50 to account for the difference in
* MIPI clock speed in RTL tests
*/
if (kmb_dsi->sys_clk_mhz == SYSCLK_500) {
sysclk = kmb_dsi->sys_clk_mhz - CLK_DIFF_LOW;
} else {
/* 700 Mhz clk*/
sysclk = kmb_dsi->sys_clk_mhz - CLK_DIFF_HI;
}
/* PPL-Pixel Packing Layer, LLP-Low Level Protocol
* Frame genartor timing parameters are clocked on the system clock,
* whereas as the equivalent parameters in the LLP blocks are clocked
* on LLP Tx clock from the D-PHY - BYTE clock
*/
/* Multiply by 1000 to maintain precision */
ppl_llp_ratio = ((fg_cfg->bpp / 8) * sysclk * 1000) /
((fg_cfg->lane_rate_mbps / 8) * fg_cfg->active_lanes);
dev_dbg(kmb_dsi->dev, "ppl_llp_ratio=%d\n", ppl_llp_ratio);
dev_dbg(kmb_dsi->dev, "bpp=%d sysclk=%d lane-rate=%d active-lanes=%d\n",
fg_cfg->bpp, sysclk, fg_cfg->lane_rate_mbps,
fg_cfg->active_lanes);
/* Frame generator number of lines */
reg_adr = MIPI_TXm_HS_FGn_NUM_LINES(ctrl_no, frame_gen);
kmb_write_mipi(kmb_dsi, reg_adr, fg_cfg->v_active);
/* vsync width
* There are 2 registers for vsync width (VSA in lines for
* channels 0-3)
* REG_VSYNC_WIDTH0: [15:0]-VSA for channel0, [31:16]-VSA for channel1
* REG_VSYNC_WIDTH1: [15:0]-VSA for channel2, [31:16]-VSA for channel3
*/
offset = (frame_gen % 2) * 16;
reg_adr = MIPI_TXm_HS_VSYNC_WIDTHn(ctrl_no, frame_gen / 2);
kmb_write_bits_mipi(kmb_dsi, reg_adr, offset, 16, fg_cfg->vsync_width);
/* vertical backporch (vbp) */
reg_adr = MIPI_TXm_HS_V_BACKPORCHESn(ctrl_no, frame_gen / 2);
kmb_write_bits_mipi(kmb_dsi, reg_adr, offset, 16, fg_cfg->v_backporch);
/* vertical frontporch (vfp) */
reg_adr = MIPI_TXm_HS_V_FRONTPORCHESn(ctrl_no, frame_gen / 2);
kmb_write_bits_mipi(kmb_dsi, reg_adr, offset, 16, fg_cfg->v_frontporch);
/* vertical active (vactive) */
reg_adr = MIPI_TXm_HS_V_ACTIVEn(ctrl_no, frame_gen / 2);
kmb_write_bits_mipi(kmb_dsi, reg_adr, offset, 16, fg_cfg->v_active);
/* hsync width */
reg_adr = MIPI_TXm_HS_HSYNC_WIDTHn(ctrl_no, frame_gen);
kmb_write_mipi(kmb_dsi, reg_adr,
(fg_cfg->hsync_width * ppl_llp_ratio) / 1000);
/* horizontal backporch (hbp) */
reg_adr = MIPI_TXm_HS_H_BACKPORCHn(ctrl_no, frame_gen);
kmb_write_mipi(kmb_dsi, reg_adr,
(fg_cfg->h_backporch * ppl_llp_ratio) / 1000);
/* horizontal frontporch (hfp) */
reg_adr = MIPI_TXm_HS_H_FRONTPORCHn(ctrl_no, frame_gen);
kmb_write_mipi(kmb_dsi, reg_adr,
(fg_cfg->h_frontporch * ppl_llp_ratio) / 1000);
/* horizontal active (ha) */
reg_adr = MIPI_TXm_HS_H_ACTIVEn(ctrl_no, frame_gen);
/* convert h_active which is wc in bytes to cycles */
val = (fg_cfg->h_active * sysclk * 1000) /
((fg_cfg->lane_rate_mbps / 8) * fg_cfg->active_lanes);
val /= 1000;
kmb_write_mipi(kmb_dsi, reg_adr, val);
/* llp hsync width */
reg_adr = MIPI_TXm_HS_LLP_HSYNC_WIDTHn(ctrl_no, frame_gen);
kmb_write_mipi(kmb_dsi, reg_adr, fg_cfg->hsync_width * (fg_cfg->bpp / 8));
/* llp h backporch */
reg_adr = MIPI_TXm_HS_LLP_H_BACKPORCHn(ctrl_no, frame_gen);
kmb_write_mipi(kmb_dsi, reg_adr, fg_cfg->h_backporch * (fg_cfg->bpp / 8));
/* llp h frontporch */
reg_adr = MIPI_TXm_HS_LLP_H_FRONTPORCHn(ctrl_no, frame_gen);
kmb_write_mipi(kmb_dsi, reg_adr,
fg_cfg->h_frontporch * (fg_cfg->bpp / 8));
}
static void mipi_tx_fg_cfg(struct kmb_dsi *kmb_dsi, u8 frame_gen,
u8 active_lanes, u32 bpp, u32 wc,
u32 lane_rate_mbps, struct mipi_tx_frame_cfg *fg_cfg)
{
u32 i, fg_num_lines = 0;
struct mipi_tx_frame_timing_cfg fg_t_cfg;
/* Calculate the total frame generator number of
* lines based on it's active sections
*/
for (i = 0; i < MIPI_TX_FRAME_GEN_SECTIONS; i++) {
if (fg_cfg->sections[i])
fg_num_lines += fg_cfg->sections[i]->height_lines;
}
fg_t_cfg.bpp = bpp;
fg_t_cfg.lane_rate_mbps = lane_rate_mbps;
fg_t_cfg.hsync_width = fg_cfg->hsync_width;
fg_t_cfg.h_backporch = fg_cfg->h_backporch;
fg_t_cfg.h_frontporch = fg_cfg->h_frontporch;
fg_t_cfg.h_active = wc;
fg_t_cfg.vsync_width = fg_cfg->vsync_width;
fg_t_cfg.v_backporch = fg_cfg->v_backporch;
fg_t_cfg.v_frontporch = fg_cfg->v_frontporch;
fg_t_cfg.v_active = fg_num_lines;
fg_t_cfg.active_lanes = active_lanes;
/* Apply frame generator timing setting */
mipi_tx_fg_cfg_regs(kmb_dsi, frame_gen, &fg_t_cfg);
}
static void mipi_tx_multichannel_fifo_cfg(struct kmb_dsi *kmb_dsi,
u8 active_lanes, u8 vchannel_id)
{
u32 fifo_size, fifo_rthreshold;
u32 ctrl_no = MIPI_CTRL6;
/* Clear all mc fifo channel sizes and thresholds */
kmb_write_mipi(kmb_dsi, MIPI_TX_HS_MC_FIFO_CTRL_EN, 0);
kmb_write_mipi(kmb_dsi, MIPI_TX_HS_MC_FIFO_CHAN_ALLOC0, 0);
kmb_write_mipi(kmb_dsi, MIPI_TX_HS_MC_FIFO_CHAN_ALLOC1, 0);
kmb_write_mipi(kmb_dsi, MIPI_TX_HS_MC_FIFO_RTHRESHOLD0, 0);
kmb_write_mipi(kmb_dsi, MIPI_TX_HS_MC_FIFO_RTHRESHOLD1, 0);
fifo_size = ((active_lanes > MIPI_D_LANES_PER_DPHY) ?
MIPI_CTRL_4LANE_MAX_MC_FIFO_LOC :
MIPI_CTRL_2LANE_MAX_MC_FIFO_LOC) - 1;
/* MC fifo size for virtual channels 0-3
* REG_MC_FIFO_CHAN_ALLOC0: [8:0]-channel0, [24:16]-channel1
* REG_MC_FIFO_CHAN_ALLOC1: [8:0]-2, [24:16]-channel3
*/
SET_MC_FIFO_CHAN_ALLOC(kmb_dsi, ctrl_no, vchannel_id, fifo_size);
/* Set threshold to half the fifo size, actual size=size*16 */
fifo_rthreshold = ((fifo_size) * 8) & BIT_MASK_16;
SET_MC_FIFO_RTHRESHOLD(kmb_dsi, ctrl_no, vchannel_id, fifo_rthreshold);
/* Enable the MC FIFO channel corresponding to the Virtual Channel */
kmb_set_bit_mipi(kmb_dsi, MIPI_TXm_HS_MC_FIFO_CTRL_EN(ctrl_no),
vchannel_id);
}
static void mipi_tx_ctrl_cfg(struct kmb_dsi *kmb_dsi, u8 fg_id,
struct mipi_ctrl_cfg *ctrl_cfg)
{
u32 sync_cfg = 0, ctrl = 0, fg_en;
u32 ctrl_no = MIPI_CTRL6;
/* MIPI_TX_HS_SYNC_CFG */
if (ctrl_cfg->tx_ctrl_cfg.line_sync_pkt_en)
sync_cfg |= LINE_SYNC_PKT_ENABLE;
if (ctrl_cfg->tx_ctrl_cfg.frame_counter_active)
sync_cfg |= FRAME_COUNTER_ACTIVE;
if (ctrl_cfg->tx_ctrl_cfg.line_counter_active)
sync_cfg |= LINE_COUNTER_ACTIVE;
if (ctrl_cfg->tx_ctrl_cfg.tx_dsi_cfg->v_blanking)
sync_cfg |= DSI_V_BLANKING;
if (ctrl_cfg->tx_ctrl_cfg.tx_dsi_cfg->hsa_blanking)
sync_cfg |= DSI_HSA_BLANKING;
if (ctrl_cfg->tx_ctrl_cfg.tx_dsi_cfg->hbp_blanking)
sync_cfg |= DSI_HBP_BLANKING;
if (ctrl_cfg->tx_ctrl_cfg.tx_dsi_cfg->hfp_blanking)
sync_cfg |= DSI_HFP_BLANKING;
if (ctrl_cfg->tx_ctrl_cfg.tx_dsi_cfg->sync_pulse_eventn)
sync_cfg |= DSI_SYNC_PULSE_EVENTN;
if (ctrl_cfg->tx_ctrl_cfg.tx_dsi_cfg->lpm_first_vsa_line)
sync_cfg |= DSI_LPM_FIRST_VSA_LINE;
if (ctrl_cfg->tx_ctrl_cfg.tx_dsi_cfg->lpm_last_vfp_line)
sync_cfg |= DSI_LPM_LAST_VFP_LINE;
/* Enable frame generator */
fg_en = 1 << fg_id;
sync_cfg |= FRAME_GEN_EN(fg_en);
if (ctrl_cfg->tx_ctrl_cfg.tx_always_use_hact)
sync_cfg |= ALWAYS_USE_HACT(fg_en);
if (ctrl_cfg->tx_ctrl_cfg.tx_hact_wait_stop)
sync_cfg |= HACT_WAIT_STOP(fg_en);
dev_dbg(kmb_dsi->dev, "sync_cfg=%d fg_en=%d\n", sync_cfg, fg_en);
/* MIPI_TX_HS_CTRL */
/* type:DSI, source:LCD */
ctrl = HS_CTRL_EN | TX_SOURCE;
ctrl |= LCD_VC(fg_id);
ctrl |= ACTIVE_LANES(ctrl_cfg->active_lanes - 1);
if (ctrl_cfg->tx_ctrl_cfg.tx_dsi_cfg->eotp_en)
ctrl |= DSI_EOTP_EN;
if (ctrl_cfg->tx_ctrl_cfg.tx_dsi_cfg->hfp_blank_en)
ctrl |= DSI_CMD_HFP_EN;
/*67 ns stop time */
ctrl |= HSEXIT_CNT(0x43);
kmb_write_mipi(kmb_dsi, MIPI_TXm_HS_SYNC_CFG(ctrl_no), sync_cfg);
kmb_write_mipi(kmb_dsi, MIPI_TXm_HS_CTRL(ctrl_no), ctrl);
}
static u32 mipi_tx_init_cntrl(struct kmb_dsi *kmb_dsi,
struct mipi_ctrl_cfg *ctrl_cfg)
{
u32 ret = 0;
u8 active_vchannels = 0;
u8 frame_id, sect;
u32 bits_per_pclk = 0;
u32 word_count = 0;
struct mipi_tx_frame_cfg *frame;
/* This is the order to initialize MIPI TX:
* 1. set frame section parameters
* 2. set frame specific parameters
* 3. connect lcd to mipi
* 4. multi channel fifo cfg
* 5. set mipitxcctrlcfg
*/
for (frame_id = 0; frame_id < 4; frame_id++) {
frame = ctrl_cfg->tx_ctrl_cfg.frames[frame_id];
/* Find valid frame, assume only one valid frame */
if (!frame)
continue;
/* Frame Section configuration */
/* TODO - assume there is only one valid section in a frame,
* so bits_per_pclk and word_count are only set once
*/
for (sect = 0; sect < MIPI_CTRL_VIRTUAL_CHANNELS; sect++) {
if (!frame->sections[sect])
continue;
ret = mipi_tx_fg_section_cfg(kmb_dsi, frame_id, sect,
frame->sections[sect],
&bits_per_pclk,
&word_count);
if (ret)
return ret;
}
/* Set frame specific parameters */
mipi_tx_fg_cfg(kmb_dsi, frame_id, ctrl_cfg->active_lanes,
bits_per_pclk, word_count,
ctrl_cfg->lane_rate_mbps, frame);
active_vchannels++;
/* Stop iterating as only one virtual channel
* shall be used for LCD connection
*/
break;
}
if (active_vchannels == 0)
return -EINVAL;
/* Multi-Channel FIFO Configuration */
mipi_tx_multichannel_fifo_cfg(kmb_dsi, ctrl_cfg->active_lanes, frame_id);
/* Frame Generator Enable */
mipi_tx_ctrl_cfg(kmb_dsi, frame_id, ctrl_cfg);
return ret;
}
static void test_mode_send(struct kmb_dsi *kmb_dsi, u32 dphy_no,
u32 test_code, u32 test_data)
{
/* Steps to send test code:
* - set testclk HIGH
* - set testdin with test code
* - set testen HIGH
* - set testclk LOW
* - set testen LOW
*/
/* Set testclk high */
SET_DPHY_TEST_CTRL1_CLK(kmb_dsi, dphy_no);
/* Set testdin */
SET_TEST_DIN0_3(kmb_dsi, dphy_no, test_code);
/* Set testen high */
SET_DPHY_TEST_CTRL1_EN(kmb_dsi, dphy_no);
/* Set testclk low */
CLR_DPHY_TEST_CTRL1_CLK(kmb_dsi, dphy_no);
/* Set testen low */
CLR_DPHY_TEST_CTRL1_EN(kmb_dsi, dphy_no);
if (test_code) {
/* Steps to send test data:
* - set testen LOW
* - set testclk LOW
* - set testdin with data
* - set testclk HIGH
*/
/* Set testen low */
CLR_DPHY_TEST_CTRL1_EN(kmb_dsi, dphy_no);
/* Set testclk low */
CLR_DPHY_TEST_CTRL1_CLK(kmb_dsi, dphy_no);
/* Set data in testdin */
kmb_write_mipi(kmb_dsi,
DPHY_TEST_DIN0_3 + ((dphy_no / 0x4) * 0x4),
test_data << ((dphy_no % 4) * 8));
/* Set testclk high */
SET_DPHY_TEST_CTRL1_CLK(kmb_dsi, dphy_no);
}
}
static inline void
set_test_mode_src_osc_freq_target_low_bits(struct kmb_dsi *kmb_dsi,
u32 dphy_no,
u32 freq)
{
/* Typical rise/fall time=166, refer Table 1207 databook,
* sr_osc_freq_target[7:0]
*/
test_mode_send(kmb_dsi, dphy_no, TEST_CODE_SLEW_RATE_DDL_CYCLES,
(freq & 0x7f));
}
static inline void
set_test_mode_src_osc_freq_target_hi_bits(struct kmb_dsi *kmb_dsi,
u32 dphy_no,
u32 freq)
{
u32 data;
/* Flag this as high nibble */
data = ((freq >> 6) & 0x1f) | (1 << 7);
/* Typical rise/fall time=166, refer Table 1207 databook,
* sr_osc_freq_target[11:7]
*/
test_mode_send(kmb_dsi, dphy_no, TEST_CODE_SLEW_RATE_DDL_CYCLES, data);
}
static void mipi_tx_get_vco_params(struct vco_params *vco)
{
int i;
for (i = 0; i < ARRAY_SIZE(vco_table); i++) {
if (vco->freq < vco_table[i].freq) {
*vco = vco_table[i];
return;
}
}
WARN_ONCE(1, "Invalid vco freq = %u for PLL setup\n", vco->freq);
}
static void mipi_tx_pll_setup(struct kmb_dsi *kmb_dsi, u32 dphy_no,
u32 ref_clk_mhz, u32 target_freq_mhz)
{
u32 best_n = 0, best_m = 0;
u32 n = 0, m = 0, div = 0, delta, freq = 0, t_freq;
u32 best_freq_delta = 3000;
/* pll_ref_clk: - valid range: 2~64 MHz; Typically 24 MHz
* Fvco: - valid range: 320~1250 MHz (Gen3 D-PHY)
* Fout: - valid range: 40~1250 MHz (Gen3 D-PHY)
* n: - valid range [0 15]
* N: - N = n + 1
* -valid range: [1 16]
* -conditions: - (pll_ref_clk / N) >= 2 MHz
* -(pll_ref_clk / N) <= 8 MHz
* m: valid range [62 623]
* M: - M = m + 2
* -valid range [64 625]
* -Fvco = (M/N) * pll_ref_clk
*/
struct vco_params vco_p = {
.range = 0,
.divider = 1,
};
vco_p.freq = target_freq_mhz;
mipi_tx_get_vco_params(&vco_p);
/* Search pll n parameter */
for (n = PLL_N_MIN; n <= PLL_N_MAX; n++) {
/* Calculate the pll input frequency division ratio
* multiply by 1000 for precision -
* no floating point, add n for rounding
*/
div = ((ref_clk_mhz * 1000) + n) / (n + 1);
/* Found a valid n parameter */
if ((div < 2000 || div > 8000))
continue;
/* Search pll m parameter */
for (m = PLL_M_MIN; m <= PLL_M_MAX; m++) {
/* Calculate the Fvco(DPHY PLL output frequency)
* using the current n,m params
*/
freq = div * (m + 2);
freq /= 1000;
/* Trim the potential pll freq to max supported */
if (freq > PLL_FVCO_MAX)
continue;
delta = abs(freq - target_freq_mhz);
/* Select the best (closest to target pll freq)
* n,m parameters so far
*/
if (delta < best_freq_delta) {
best_n = n;
best_m = m;
best_freq_delta = delta;
}
}
}
/* Program vco_cntrl parameter
* PLL_VCO_Control[5:0] = pll_vco_cntrl_ovr,
* PLL_VCO_Control[6] = pll_vco_cntrl_ovr_en
*/
test_mode_send(kmb_dsi, dphy_no, TEST_CODE_PLL_VCO_CTRL, (vco_p.range
| (1 << 6)));
/* Program m, n pll parameters */
dev_dbg(kmb_dsi->dev, "m = %d n = %d\n", best_m, best_n);
/* PLL_Input_Divider_Ratio[3:0] = pll_n_ovr */
test_mode_send(kmb_dsi, dphy_no, TEST_CODE_PLL_INPUT_DIVIDER,
(best_n & 0x0f));
/* m - low nibble PLL_Loop_Divider_Ratio[4:0]
* pll_m_ovr[4:0]
*/
test_mode_send(kmb_dsi, dphy_no, TEST_CODE_PLL_FEEDBACK_DIVIDER,
(best_m & 0x1f));
/* m - high nibble PLL_Loop_Divider_Ratio[4:0]
* pll_m_ovr[9:5]
*/
test_mode_send(kmb_dsi, dphy_no, TEST_CODE_PLL_FEEDBACK_DIVIDER,
((best_m >> 5) & 0x1f) | PLL_FEEDBACK_DIVIDER_HIGH);
/* Enable overwrite of n,m parameters :pll_n_ovr_en, pll_m_ovr_en */
test_mode_send(kmb_dsi, dphy_no, TEST_CODE_PLL_OUTPUT_CLK_SEL,
(PLL_N_OVR_EN | PLL_M_OVR_EN));
/* Program Charge-Pump parameters */
/* pll_prop_cntrl-fixed values for prop_cntrl from DPHY doc */
t_freq = target_freq_mhz * vco_p.divider;
test_mode_send(kmb_dsi, dphy_no,
TEST_CODE_PLL_PROPORTIONAL_CHARGE_PUMP_CTRL,
((t_freq > 1150) ? 0x0C : 0x0B));
/* pll_int_cntrl-fixed value for int_cntrl from DPHY doc */
test_mode_send(kmb_dsi, dphy_no, TEST_CODE_PLL_INTEGRAL_CHARGE_PUMP_CTRL,
0x00);
/* pll_gmp_cntrl-fixed value for gmp_cntrl from DPHY doci */
test_mode_send(kmb_dsi, dphy_no, TEST_CODE_PLL_GMP_CTRL, 0x10);
/* pll_cpbias_cntrl-fixed value for cpbias_cntrl from DPHY doc */
test_mode_send(kmb_dsi, dphy_no, TEST_CODE_PLL_CHARGE_PUMP_BIAS, 0x10);
/* pll_th1 -Lock Detector Phase error threshold,
* document gives fixed value
*/
test_mode_send(kmb_dsi, dphy_no, TEST_CODE_PLL_PHASE_ERR_CTRL, 0x02);
/* PLL Lock Configuration */
/* pll_th2 - Lock Filter length, document gives fixed value */
test_mode_send(kmb_dsi, dphy_no, TEST_CODE_PLL_LOCK_FILTER, 0x60);
/* pll_th3- PLL Unlocking filter, document gives fixed value */
test_mode_send(kmb_dsi, dphy_no, TEST_CODE_PLL_UNLOCK_FILTER, 0x03);
/* pll_lock_sel-PLL Lock Detector Selection,
* document gives fixed value
*/
test_mode_send(kmb_dsi, dphy_no, TEST_CODE_PLL_LOCK_DETECTOR, 0x02);
}
static void set_slewrate_gt_1500(struct kmb_dsi *kmb_dsi, u32 dphy_no)
{
u32 test_code = 0, test_data = 0;
/* Bypass slew rate calibration algorithm
* bits[1:0} srcal_en_ovr_en, srcal_en_ovr
*/
test_code = TEST_CODE_SLEW_RATE_OVERRIDE_CTRL;
test_data = 0x02;
test_mode_send(kmb_dsi, dphy_no, test_code, test_data);
/* Disable slew rate calibration */
test_code = TEST_CODE_SLEW_RATE_DDL_LOOP_CTRL;
test_data = 0x00;
test_mode_send(kmb_dsi, dphy_no, test_code, test_data);
}
static void set_slewrate_gt_1000(struct kmb_dsi *kmb_dsi, u32 dphy_no)
{
u32 test_code = 0, test_data = 0;
/* BitRate: > 1 Gbps && <= 1.5 Gbps: - slew rate control ON
* typical rise/fall times: 166 ps
*/
/* Do not bypass slew rate calibration algorithm
* bits[1:0}=srcal_en_ovr_en, srcal_en_ovr, bit[6]=sr_range
*/
test_code = TEST_CODE_SLEW_RATE_OVERRIDE_CTRL;
test_data = (0x03 | (1 << 6));
test_mode_send(kmb_dsi, dphy_no, test_code, test_data);
/* Enable slew rate calibration */
test_code = TEST_CODE_SLEW_RATE_DDL_LOOP_CTRL;
test_data = 0x01;
test_mode_send(kmb_dsi, dphy_no, test_code, test_data);
/* Set sr_osc_freq_target[6:0] low nibble
* typical rise/fall time=166, refer Table 1207 databook
*/
test_code = TEST_CODE_SLEW_RATE_DDL_CYCLES;
test_data = (0x72f & 0x7f);
test_mode_send(kmb_dsi, dphy_no, test_code, test_data);
/* Set sr_osc_freq_target[11:7] high nibble
* Typical rise/fall time=166, refer Table 1207 databook
*/
test_code = TEST_CODE_SLEW_RATE_DDL_CYCLES;
test_data = ((0x72f >> 6) & 0x1f) | (1 << 7);
test_mode_send(kmb_dsi, dphy_no, test_code, test_data);
}
static void set_slewrate_lt_1000(struct kmb_dsi *kmb_dsi, u32 dphy_no)
{
u32 test_code = 0, test_data = 0;
/* lane_rate_mbps <= 1000 Mbps
* BitRate: <= 1 Gbps:
* - slew rate control ON
* - typical rise/fall times: 225 ps
*/
/* Do not bypass slew rate calibration algorithm */
test_code = TEST_CODE_SLEW_RATE_OVERRIDE_CTRL;
test_data = (0x03 | (1 << 6));
test_mode_send(kmb_dsi, dphy_no, test_code, test_data);
/* Enable slew rate calibration */
test_code = TEST_CODE_SLEW_RATE_DDL_LOOP_CTRL;
test_data = 0x01;
test_mode_send(kmb_dsi, dphy_no, test_code, test_data);
/* Typical rise/fall time=255, refer Table 1207 databook */
test_code = TEST_CODE_SLEW_RATE_DDL_CYCLES;
test_data = (0x523 & 0x7f);
test_mode_send(kmb_dsi, dphy_no, test_code, test_data);
/* Set sr_osc_freq_target[11:7] high nibble */
test_code = TEST_CODE_SLEW_RATE_DDL_CYCLES;
test_data = ((0x523 >> 6) & 0x1f) | (1 << 7);
test_mode_send(kmb_dsi, dphy_no, test_code, test_data);
}
static void setup_pll(struct kmb_dsi *kmb_dsi, u32 dphy_no,
struct mipi_ctrl_cfg *cfg)
{
u32 test_code = 0, test_data = 0;
/* Set PLL regulator in bypass */
test_code = TEST_CODE_PLL_ANALOG_PROG;
test_data = 0x01;
test_mode_send(kmb_dsi, dphy_no, test_code, test_data);
/* PLL Parameters Setup */
mipi_tx_pll_setup(kmb_dsi, dphy_no, cfg->ref_clk_khz / 1000,
cfg->lane_rate_mbps / 2);
/* Set clksel */
kmb_write_bits_mipi(kmb_dsi, DPHY_INIT_CTRL1, PLL_CLKSEL_0, 2, 0x01);
/* Set pll_shadow_control */
kmb_set_bit_mipi(kmb_dsi, DPHY_INIT_CTRL1, PLL_SHADOW_CTRL);
}
static void set_lane_data_rate(struct kmb_dsi *kmb_dsi, u32 dphy_no,
struct mipi_ctrl_cfg *cfg)
{
u32 i, test_code = 0, test_data = 0;
for (i = 0; i < MIPI_DPHY_DEFAULT_BIT_RATES; i++) {
if (mipi_hs_freq_range[i].default_bit_rate_mbps <
cfg->lane_rate_mbps)
continue;
/* Send the test code and data */
/* bit[6:0] = hsfreqrange_ovr bit[7] = hsfreqrange_ovr_en */
test_code = TEST_CODE_HS_FREQ_RANGE_CFG;
test_data = (mipi_hs_freq_range[i].hsfreqrange_code & 0x7f) |
(1 << 7);
test_mode_send(kmb_dsi, dphy_no, test_code, test_data);
break;
}
}
static void dphy_init_sequence(struct kmb_dsi *kmb_dsi,
struct mipi_ctrl_cfg *cfg, u32 dphy_no,
int active_lanes, enum dphy_mode mode)
{
u32 test_code = 0, test_data = 0, val;
/* Set D-PHY in shutdown mode */
/* Assert RSTZ signal */
CLR_DPHY_INIT_CTRL0(kmb_dsi, dphy_no, RESETZ);
/* Assert SHUTDOWNZ signal */
CLR_DPHY_INIT_CTRL0(kmb_dsi, dphy_no, SHUTDOWNZ);
val = kmb_read_mipi(kmb_dsi, DPHY_INIT_CTRL0);
/* Init D-PHY_n
* Pulse testclear signal to make sure the d-phy configuration
* starts from a clean base
*/
CLR_DPHY_TEST_CTRL0(kmb_dsi, dphy_no);
ndelay(15);
SET_DPHY_TEST_CTRL0(kmb_dsi, dphy_no);
ndelay(15);
CLR_DPHY_TEST_CTRL0(kmb_dsi, dphy_no);
ndelay(15);
/* Set mastermacro bit - Master or slave mode */
test_code = TEST_CODE_MULTIPLE_PHY_CTRL;
/* DPHY has its own clock lane enabled (master) */
if (mode == MIPI_DPHY_MASTER)
test_data = 0x01;
else
test_data = 0x00;
/* Send the test code and data */
test_mode_send(kmb_dsi, dphy_no, test_code, test_data);
/* Set the lane data rate */
set_lane_data_rate(kmb_dsi, dphy_no, cfg);
/* High-Speed Tx Slew Rate Calibration
* BitRate: > 1.5 Gbps && <= 2.5 Gbps: slew rate control OFF
*/
if (cfg->lane_rate_mbps > 1500)
set_slewrate_gt_1500(kmb_dsi, dphy_no);
else if (cfg->lane_rate_mbps > 1000)
set_slewrate_gt_1000(kmb_dsi, dphy_no);
else
set_slewrate_lt_1000(kmb_dsi, dphy_no);
/* Set cfgclkfreqrange */
val = (((cfg->cfg_clk_khz / 1000) - 17) * 4) & 0x3f;
SET_DPHY_FREQ_CTRL0_3(kmb_dsi, dphy_no, val);
/* Enable config clk for the corresponding d-phy */
kmb_set_bit_mipi(kmb_dsi, DPHY_CFG_CLK_EN, dphy_no);
/* PLL setup */
if (mode == MIPI_DPHY_MASTER)
setup_pll(kmb_dsi, dphy_no, cfg);
/* Send NORMAL OPERATION test code */
test_code = 0x0;
test_data = 0x0;
test_mode_send(kmb_dsi, dphy_no, test_code, test_data);
/* Configure BASEDIR for data lanes
* NOTE: basedir only applies to LANE_0 of each D-PHY.
* The other lanes keep their direction based on the D-PHY type,
* either Rx or Tx.
* bits[5:0] - BaseDir: 1 = Rx
* bits[9:6] - BaseDir: 0 = Tx
*/
kmb_write_bits_mipi(kmb_dsi, DPHY_INIT_CTRL2, 0, 9, 0x03f);
ndelay(15);
/* Enable CLOCK LANE
* Clock lane should be enabled regardless of the direction
* set for the D-PHY (Rx/Tx)
*/
kmb_set_bit_mipi(kmb_dsi, DPHY_INIT_CTRL2, 12 + dphy_no);
/* Enable DATA LANES */
kmb_write_bits_mipi(kmb_dsi, DPHY_ENABLE, dphy_no * 2, 2,
((1 << active_lanes) - 1));
ndelay(15);
/* Take D-PHY out of shutdown mode */
/* Deassert SHUTDOWNZ signal */
SET_DPHY_INIT_CTRL0(kmb_dsi, dphy_no, SHUTDOWNZ);
ndelay(15);
/* Deassert RSTZ signal */
SET_DPHY_INIT_CTRL0(kmb_dsi, dphy_no, RESETZ);
}
static void dphy_wait_fsm(struct kmb_dsi *kmb_dsi, u32 dphy_no,
enum dphy_tx_fsm fsm_state)
{
enum dphy_tx_fsm val = DPHY_TX_POWERDWN;
int i = 0;
int status = 1;
do {
test_mode_send(kmb_dsi, dphy_no, TEST_CODE_FSM_CONTROL, 0x80);
val = GET_TEST_DOUT4_7(kmb_dsi, dphy_no);
i++;
if (i > TIMEOUT) {
status = 0;
break;
}
} while (val != fsm_state);
dev_dbg(kmb_dsi->dev, "%s: dphy %d val = %x", __func__, dphy_no, val);
dev_dbg(kmb_dsi->dev, "* DPHY %d WAIT_FSM %s *",
dphy_no, status ? "SUCCESS" : "FAILED");
}
static void wait_init_done(struct kmb_dsi *kmb_dsi, u32 dphy_no,
u32 active_lanes)
{
u32 stopstatedata = 0;
u32 data_lanes = (1 << active_lanes) - 1;
int i = 0;
int status = 1;
do {
stopstatedata = GET_STOPSTATE_DATA(kmb_dsi, dphy_no)
& data_lanes;
/* TODO-need to add a time out and return failure */
i++;
if (i > TIMEOUT) {
status = 0;
dev_dbg(kmb_dsi->dev,
"! WAIT_INIT_DONE: TIMING OUT!(err_stat=%d)",
kmb_read_mipi(kmb_dsi, MIPI_DPHY_ERR_STAT6_7));
break;
}
} while (stopstatedata != data_lanes);
dev_dbg(kmb_dsi->dev, "* DPHY %d INIT - %s *",
dphy_no, status ? "SUCCESS" : "FAILED");
}
static void wait_pll_lock(struct kmb_dsi *kmb_dsi, u32 dphy_no)
{
int i = 0;
int status = 1;
do {
/* TODO-need to add a time out and return failure */
i++;
if (i > TIMEOUT) {
status = 0;
dev_dbg(kmb_dsi->dev, "%s: timing out", __func__);
break;
}
} while (!GET_PLL_LOCK(kmb_dsi, dphy_no));
dev_dbg(kmb_dsi->dev, "* PLL Locked for DPHY %d - %s *",
dphy_no, status ? "SUCCESS" : "FAILED");
}
static u32 mipi_tx_init_dphy(struct kmb_dsi *kmb_dsi,
struct mipi_ctrl_cfg *cfg)
{
u32 dphy_no = MIPI_DPHY6;
/* Multiple D-PHYs needed */
if (cfg->active_lanes > MIPI_DPHY_D_LANES) {
/*
*Initialization for Tx aggregation mode is done according to
*a. start init PHY1
*b. poll for PHY1 FSM state LOCK
* b1. reg addr 0x03[3:0] - state_main[3:0] == 5 (LOCK)
*c. poll for PHY1 calibrations done :
* c1. termination calibration lower section: addr 0x22[5]
* - rescal_done
* c2. slewrate calibration (if data rate < = 1500 Mbps):
* addr 0xA7[3:2] - srcal_done, sr_finished
*d. start init PHY0
*e. poll for PHY0 stopstate
*f. poll for PHY1 stopstate
*/
/* PHY #N+1 ('slave') */
dphy_init_sequence(kmb_dsi, cfg, dphy_no + 1,
(cfg->active_lanes - MIPI_DPHY_D_LANES),
MIPI_DPHY_SLAVE);
dphy_wait_fsm(kmb_dsi, dphy_no + 1, DPHY_TX_LOCK);
/* PHY #N master */
dphy_init_sequence(kmb_dsi, cfg, dphy_no, MIPI_DPHY_D_LANES,
MIPI_DPHY_MASTER);
/* Wait for DPHY init to complete */
wait_init_done(kmb_dsi, dphy_no, MIPI_DPHY_D_LANES);
wait_init_done(kmb_dsi, dphy_no + 1,
cfg->active_lanes - MIPI_DPHY_D_LANES);
wait_pll_lock(kmb_dsi, dphy_no);
wait_pll_lock(kmb_dsi, dphy_no + 1);
dphy_wait_fsm(kmb_dsi, dphy_no, DPHY_TX_IDLE);
} else { /* Single DPHY */
dphy_init_sequence(kmb_dsi, cfg, dphy_no, cfg->active_lanes,
MIPI_DPHY_MASTER);
dphy_wait_fsm(kmb_dsi, dphy_no, DPHY_TX_IDLE);
wait_init_done(kmb_dsi, dphy_no, cfg->active_lanes);
wait_pll_lock(kmb_dsi, dphy_no);
}
return 0;
}
static void connect_lcd_to_mipi(struct kmb_dsi *kmb_dsi,
struct drm_atomic_state *old_state)
{
struct regmap *msscam;
msscam = syscon_regmap_lookup_by_compatible("intel,keembay-msscam");
if (IS_ERR(msscam)) {
dev_dbg(kmb_dsi->dev, "failed to get msscam syscon");
return;
}
drm_atomic_bridge_chain_enable(adv_bridge, old_state);
/* DISABLE MIPI->CIF CONNECTION */
regmap_write(msscam, MSS_MIPI_CIF_CFG, 0);
/* ENABLE LCD->MIPI CONNECTION */
regmap_write(msscam, MSS_LCD_MIPI_CFG, 1);
/* DISABLE LCD->CIF LOOPBACK */
regmap_write(msscam, MSS_LOOPBACK_CFG, 1);
}
int kmb_dsi_mode_set(struct kmb_dsi *kmb_dsi, struct drm_display_mode *mode,
int sys_clk_mhz, struct drm_atomic_state *old_state)
{
u64 data_rate;
kmb_dsi->sys_clk_mhz = sys_clk_mhz;
mipi_tx_init_cfg.active_lanes = MIPI_TX_ACTIVE_LANES;
mipi_tx_frame0_sect_cfg.width_pixels = mode->crtc_hdisplay;
mipi_tx_frame0_sect_cfg.height_lines = mode->crtc_vdisplay;
mipitx_frame0_cfg.vsync_width =
mode->crtc_vsync_end - mode->crtc_vsync_start;
mipitx_frame0_cfg.v_backporch =
mode->crtc_vtotal - mode->crtc_vsync_end;
mipitx_frame0_cfg.v_frontporch =
mode->crtc_vsync_start - mode->crtc_vdisplay;
mipitx_frame0_cfg.hsync_width =
mode->crtc_hsync_end - mode->crtc_hsync_start;
mipitx_frame0_cfg.h_backporch =
mode->crtc_htotal - mode->crtc_hsync_end;
mipitx_frame0_cfg.h_frontporch =
mode->crtc_hsync_start - mode->crtc_hdisplay;
/* Lane rate = (vtotal*htotal*fps*bpp)/4 / 1000000
* to convert to Mbps
*/
data_rate = ((((u32)mode->crtc_vtotal * (u32)mode->crtc_htotal) *
(u32)(drm_mode_vrefresh(mode)) *
MIPI_TX_BPP) / mipi_tx_init_cfg.active_lanes) / 1000000;
dev_dbg(kmb_dsi->dev, "data_rate=%u active_lanes=%d\n",
(u32)data_rate, mipi_tx_init_cfg.active_lanes);
/* When late rate < 800, modeset fails with 4 lanes,
* so switch to 2 lanes
*/
if (data_rate < 800) {
mipi_tx_init_cfg.active_lanes = 2;
mipi_tx_init_cfg.lane_rate_mbps = data_rate * 2;
} else {
mipi_tx_init_cfg.lane_rate_mbps = data_rate;
}
/* Initialize mipi controller */
mipi_tx_init_cntrl(kmb_dsi, &mipi_tx_init_cfg);
/* Dphy initialization */
mipi_tx_init_dphy(kmb_dsi, &mipi_tx_init_cfg);
connect_lcd_to_mipi(kmb_dsi, old_state);
dev_info(kmb_dsi->dev, "mipi hw initialized");
return 0;
}
struct kmb_dsi *kmb_dsi_init(struct platform_device *pdev)
{
struct kmb_dsi *kmb_dsi;
struct device *dev = get_device(&pdev->dev);
kmb_dsi = devm_kzalloc(dev, sizeof(*kmb_dsi), GFP_KERNEL);
if (!kmb_dsi) {
dev_err(dev, "failed to allocate kmb_dsi\n");
return ERR_PTR(-ENOMEM);
}
kmb_dsi->host = dsi_host;
kmb_dsi->host->ops = &kmb_dsi_host_ops;
dsi_device->host = kmb_dsi->host;
kmb_dsi->device = dsi_device;
return kmb_dsi;
}
int kmb_dsi_encoder_init(struct drm_device *dev, struct kmb_dsi *kmb_dsi)
{
struct drm_encoder *encoder;
struct drm_connector *connector;
int ret = 0;
encoder = &kmb_dsi->base;
encoder->possible_crtcs = 1;
encoder->possible_clones = 0;
ret = drm_simple_encoder_init(dev, encoder, DRM_MODE_ENCODER_DSI);
if (ret) {
dev_err(kmb_dsi->dev, "Failed to init encoder %d\n", ret);
return ret;
}
/* Link drm_bridge to encoder */
ret = drm_bridge_attach(encoder, adv_bridge, NULL,
DRM_BRIDGE_ATTACH_NO_CONNECTOR);
if (ret) {
drm_encoder_cleanup(encoder);
return ret;
}
drm_info(dev, "Bridge attached : SUCCESS");
connector = drm_bridge_connector_init(dev, encoder);
if (IS_ERR(connector)) {
DRM_ERROR("Unable to create bridge connector");
drm_encoder_cleanup(encoder);
return PTR_ERR(connector);
}
drm_connector_attach_encoder(connector, encoder);
return 0;
}
int kmb_dsi_map_mmio(struct kmb_dsi *kmb_dsi)
{
struct resource *res;
struct device *dev = kmb_dsi->dev;
res = platform_get_resource_byname(kmb_dsi->pdev, IORESOURCE_MEM,
"mipi");
if (!res) {
dev_err(dev, "failed to get resource for mipi");
return -ENOMEM;
}
kmb_dsi->mipi_mmio = devm_ioremap_resource(dev, res);
if (IS_ERR(kmb_dsi->mipi_mmio)) {
dev_err(dev, "failed to ioremap mipi registers");
return PTR_ERR(kmb_dsi->mipi_mmio);
}
return 0;
}
static int kmb_dsi_clk_enable(struct kmb_dsi *kmb_dsi)
{
int ret;
struct device *dev = kmb_dsi->dev;
ret = clk_prepare_enable(kmb_dsi->clk_mipi);
if (ret) {
dev_err(dev, "Failed to enable MIPI clock: %d\n", ret);
return ret;
}
ret = clk_prepare_enable(kmb_dsi->clk_mipi_ecfg);
if (ret) {
dev_err(dev, "Failed to enable MIPI_ECFG clock: %d\n", ret);
return ret;
}
ret = clk_prepare_enable(kmb_dsi->clk_mipi_cfg);
if (ret) {
dev_err(dev, "Failed to enable MIPI_CFG clock: %d\n", ret);
return ret;
}
dev_info(dev, "SUCCESS : enabled MIPI clocks\n");
return 0;
}
int kmb_dsi_clk_init(struct kmb_dsi *kmb_dsi)
{
struct device *dev = kmb_dsi->dev;
unsigned long clk;
kmb_dsi->clk_mipi = devm_clk_get(dev, "clk_mipi");
if (IS_ERR(kmb_dsi->clk_mipi)) {
dev_err(dev, "devm_clk_get() failed clk_mipi\n");
return PTR_ERR(kmb_dsi->clk_mipi);
}
kmb_dsi->clk_mipi_ecfg = devm_clk_get(dev, "clk_mipi_ecfg");
if (IS_ERR(kmb_dsi->clk_mipi_ecfg)) {
dev_err(dev, "devm_clk_get() failed clk_mipi_ecfg\n");
return PTR_ERR(kmb_dsi->clk_mipi_ecfg);
}
kmb_dsi->clk_mipi_cfg = devm_clk_get(dev, "clk_mipi_cfg");
if (IS_ERR(kmb_dsi->clk_mipi_cfg)) {
dev_err(dev, "devm_clk_get() failed clk_mipi_cfg\n");
return PTR_ERR(kmb_dsi->clk_mipi_cfg);
}
/* Set MIPI clock to 24 Mhz */
clk_set_rate(kmb_dsi->clk_mipi, KMB_MIPI_DEFAULT_CLK);
if (clk_get_rate(kmb_dsi->clk_mipi) != KMB_MIPI_DEFAULT_CLK) {
dev_err(dev, "failed to set to clk_mipi to %d\n",
KMB_MIPI_DEFAULT_CLK);
return -1;
}
dev_dbg(dev, "clk_mipi = %ld\n", clk_get_rate(kmb_dsi->clk_mipi));
clk = clk_get_rate(kmb_dsi->clk_mipi_ecfg);
if (clk != KMB_MIPI_DEFAULT_CFG_CLK) {
/* Set MIPI_ECFG clock to 24 Mhz */
clk_set_rate(kmb_dsi->clk_mipi_ecfg, KMB_MIPI_DEFAULT_CFG_CLK);
clk = clk_get_rate(kmb_dsi->clk_mipi_ecfg);
if (clk != KMB_MIPI_DEFAULT_CFG_CLK) {
dev_err(dev, "failed to set to clk_mipi_ecfg to %d\n",
KMB_MIPI_DEFAULT_CFG_CLK);
return -1;
}
}
clk = clk_get_rate(kmb_dsi->clk_mipi_cfg);
if (clk != KMB_MIPI_DEFAULT_CFG_CLK) {
/* Set MIPI_CFG clock to 24 Mhz */
clk_set_rate(kmb_dsi->clk_mipi_cfg, 24000000);
clk = clk_get_rate(kmb_dsi->clk_mipi_cfg);
if (clk != KMB_MIPI_DEFAULT_CFG_CLK) {
dev_err(dev, "failed to set clk_mipi_cfg to %d\n",
KMB_MIPI_DEFAULT_CFG_CLK);
return -1;
}
}
return kmb_dsi_clk_enable(kmb_dsi);
}
| linux-master | drivers/gpu/drm/kmb/kmb_dsi.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright © 2018-2020 Intel Corporation
*/
#include <linux/clk.h>
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_crtc.h>
#include <drm/drm_print.h>
#include <drm/drm_vblank.h>
#include <drm/drm_modeset_helper_vtables.h>
#include "kmb_drv.h"
#include "kmb_dsi.h"
#include "kmb_plane.h"
#include "kmb_regs.h"
struct kmb_crtc_timing {
u32 vfront_porch;
u32 vback_porch;
u32 vsync_len;
u32 hfront_porch;
u32 hback_porch;
u32 hsync_len;
};
static int kmb_crtc_enable_vblank(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
struct kmb_drm_private *kmb = to_kmb(dev);
/* Clear interrupt */
kmb_write_lcd(kmb, LCD_INT_CLEAR, LCD_INT_VERT_COMP);
/* Set which interval to generate vertical interrupt */
kmb_write_lcd(kmb, LCD_VSTATUS_COMPARE,
LCD_VSTATUS_COMPARE_VSYNC);
/* Enable vertical interrupt */
kmb_set_bitmask_lcd(kmb, LCD_INT_ENABLE,
LCD_INT_VERT_COMP);
return 0;
}
static void kmb_crtc_disable_vblank(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
struct kmb_drm_private *kmb = to_kmb(dev);
/* Clear interrupt */
kmb_write_lcd(kmb, LCD_INT_CLEAR, LCD_INT_VERT_COMP);
/* Disable vertical interrupt */
kmb_clr_bitmask_lcd(kmb, LCD_INT_ENABLE,
LCD_INT_VERT_COMP);
}
static const struct drm_crtc_funcs kmb_crtc_funcs = {
.destroy = drm_crtc_cleanup,
.set_config = drm_atomic_helper_set_config,
.page_flip = drm_atomic_helper_page_flip,
.reset = drm_atomic_helper_crtc_reset,
.atomic_duplicate_state = drm_atomic_helper_crtc_duplicate_state,
.atomic_destroy_state = drm_atomic_helper_crtc_destroy_state,
.enable_vblank = kmb_crtc_enable_vblank,
.disable_vblank = kmb_crtc_disable_vblank,
};
static void kmb_crtc_set_mode(struct drm_crtc *crtc,
struct drm_atomic_state *old_state)
{
struct drm_device *dev = crtc->dev;
struct drm_display_mode *m = &crtc->state->adjusted_mode;
struct kmb_crtc_timing vm;
struct kmb_drm_private *kmb = to_kmb(dev);
unsigned int val = 0;
/* Initialize mipi */
kmb_dsi_mode_set(kmb->kmb_dsi, m, kmb->sys_clk_mhz, old_state);
drm_info(dev,
"vfp= %d vbp= %d vsync_len=%d hfp=%d hbp=%d hsync_len=%d\n",
m->crtc_vsync_start - m->crtc_vdisplay,
m->crtc_vtotal - m->crtc_vsync_end,
m->crtc_vsync_end - m->crtc_vsync_start,
m->crtc_hsync_start - m->crtc_hdisplay,
m->crtc_htotal - m->crtc_hsync_end,
m->crtc_hsync_end - m->crtc_hsync_start);
val = kmb_read_lcd(kmb, LCD_INT_ENABLE);
kmb_clr_bitmask_lcd(kmb, LCD_INT_ENABLE, val);
kmb_set_bitmask_lcd(kmb, LCD_INT_CLEAR, ~0x0);
vm.vfront_porch = 2;
vm.vback_porch = 2;
vm.vsync_len = 8;
vm.hfront_porch = 0;
vm.hback_porch = 0;
vm.hsync_len = 28;
drm_dbg(dev, "%s : %dactive height= %d vbp=%d vfp=%d vsync-w=%d h-active=%d h-bp=%d h-fp=%d hsync-l=%d",
__func__, __LINE__,
m->crtc_vdisplay, vm.vback_porch, vm.vfront_porch,
vm.vsync_len, m->crtc_hdisplay, vm.hback_porch,
vm.hfront_porch, vm.hsync_len);
kmb_write_lcd(kmb, LCD_V_ACTIVEHEIGHT,
m->crtc_vdisplay - 1);
kmb_write_lcd(kmb, LCD_V_BACKPORCH, vm.vback_porch);
kmb_write_lcd(kmb, LCD_V_FRONTPORCH, vm.vfront_porch);
kmb_write_lcd(kmb, LCD_VSYNC_WIDTH, vm.vsync_len - 1);
kmb_write_lcd(kmb, LCD_H_ACTIVEWIDTH,
m->crtc_hdisplay - 1);
kmb_write_lcd(kmb, LCD_H_BACKPORCH, vm.hback_porch);
kmb_write_lcd(kmb, LCD_H_FRONTPORCH, vm.hfront_porch);
kmb_write_lcd(kmb, LCD_HSYNC_WIDTH, vm.hsync_len - 1);
/* This is hardcoded as 0 in the Myriadx code */
kmb_write_lcd(kmb, LCD_VSYNC_START, 0);
kmb_write_lcd(kmb, LCD_VSYNC_END, 0);
/* Back ground color */
kmb_write_lcd(kmb, LCD_BG_COLOUR_LS, 0x4);
if (m->flags == DRM_MODE_FLAG_INTERLACE) {
kmb_write_lcd(kmb,
LCD_VSYNC_WIDTH_EVEN, vm.vsync_len - 1);
kmb_write_lcd(kmb,
LCD_V_BACKPORCH_EVEN, vm.vback_porch);
kmb_write_lcd(kmb,
LCD_V_FRONTPORCH_EVEN, vm.vfront_porch);
kmb_write_lcd(kmb, LCD_V_ACTIVEHEIGHT_EVEN,
m->crtc_vdisplay - 1);
/* This is hardcoded as 10 in the Myriadx code */
kmb_write_lcd(kmb, LCD_VSYNC_START_EVEN, 10);
kmb_write_lcd(kmb, LCD_VSYNC_END_EVEN, 10);
}
kmb_write_lcd(kmb, LCD_TIMING_GEN_TRIG, 1);
kmb_set_bitmask_lcd(kmb, LCD_CONTROL, LCD_CTRL_ENABLE);
kmb_set_bitmask_lcd(kmb, LCD_INT_ENABLE, val);
}
static void kmb_crtc_atomic_enable(struct drm_crtc *crtc,
struct drm_atomic_state *state)
{
struct kmb_drm_private *kmb = crtc_to_kmb_priv(crtc);
clk_prepare_enable(kmb->kmb_clk.clk_lcd);
kmb_crtc_set_mode(crtc, state);
drm_crtc_vblank_on(crtc);
}
static void kmb_crtc_atomic_disable(struct drm_crtc *crtc,
struct drm_atomic_state *state)
{
struct kmb_drm_private *kmb = crtc_to_kmb_priv(crtc);
struct drm_crtc_state *old_state = drm_atomic_get_old_crtc_state(state, crtc);
/* due to hw limitations, planes need to be off when crtc is off */
drm_atomic_helper_disable_planes_on_crtc(old_state, false);
drm_crtc_vblank_off(crtc);
clk_disable_unprepare(kmb->kmb_clk.clk_lcd);
}
static void kmb_crtc_atomic_begin(struct drm_crtc *crtc,
struct drm_atomic_state *state)
{
struct drm_device *dev = crtc->dev;
struct kmb_drm_private *kmb = to_kmb(dev);
kmb_clr_bitmask_lcd(kmb, LCD_INT_ENABLE,
LCD_INT_VERT_COMP);
}
static void kmb_crtc_atomic_flush(struct drm_crtc *crtc,
struct drm_atomic_state *state)
{
struct drm_device *dev = crtc->dev;
struct kmb_drm_private *kmb = to_kmb(dev);
kmb_set_bitmask_lcd(kmb, LCD_INT_ENABLE,
LCD_INT_VERT_COMP);
spin_lock_irq(&crtc->dev->event_lock);
if (crtc->state->event) {
if (drm_crtc_vblank_get(crtc) == 0)
drm_crtc_arm_vblank_event(crtc, crtc->state->event);
else
drm_crtc_send_vblank_event(crtc, crtc->state->event);
}
crtc->state->event = NULL;
spin_unlock_irq(&crtc->dev->event_lock);
}
static enum drm_mode_status
kmb_crtc_mode_valid(struct drm_crtc *crtc,
const struct drm_display_mode *mode)
{
int refresh;
struct drm_device *dev = crtc->dev;
int vfp = mode->vsync_start - mode->vdisplay;
if (mode->vdisplay < KMB_CRTC_MAX_HEIGHT) {
drm_dbg(dev, "height = %d less than %d",
mode->vdisplay, KMB_CRTC_MAX_HEIGHT);
return MODE_BAD_VVALUE;
}
if (mode->hdisplay < KMB_CRTC_MAX_WIDTH) {
drm_dbg(dev, "width = %d less than %d",
mode->hdisplay, KMB_CRTC_MAX_WIDTH);
return MODE_BAD_HVALUE;
}
refresh = drm_mode_vrefresh(mode);
if (refresh < KMB_MIN_VREFRESH || refresh > KMB_MAX_VREFRESH) {
drm_dbg(dev, "refresh = %d less than %d or greater than %d",
refresh, KMB_MIN_VREFRESH, KMB_MAX_VREFRESH);
return MODE_BAD;
}
if (vfp < KMB_CRTC_MIN_VFP) {
drm_dbg(dev, "vfp = %d less than %d", vfp, KMB_CRTC_MIN_VFP);
return MODE_BAD;
}
return MODE_OK;
}
static const struct drm_crtc_helper_funcs kmb_crtc_helper_funcs = {
.atomic_begin = kmb_crtc_atomic_begin,
.atomic_enable = kmb_crtc_atomic_enable,
.atomic_disable = kmb_crtc_atomic_disable,
.atomic_flush = kmb_crtc_atomic_flush,
.mode_valid = kmb_crtc_mode_valid,
};
int kmb_setup_crtc(struct drm_device *drm)
{
struct kmb_drm_private *kmb = to_kmb(drm);
struct kmb_plane *primary;
int ret;
primary = kmb_plane_init(drm);
if (IS_ERR(primary))
return PTR_ERR(primary);
ret = drm_crtc_init_with_planes(drm, &kmb->crtc, &primary->base_plane,
NULL, &kmb_crtc_funcs, NULL);
if (ret) {
kmb_plane_destroy(&primary->base_plane);
return ret;
}
drm_crtc_helper_add(&kmb->crtc, &kmb_crtc_helper_funcs);
return 0;
}
| linux-master | drivers/gpu/drm/kmb/kmb_crtc.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright © 2018-2020 Intel Corporation
*/
#include <linux/clk.h>
#include <linux/module.h>
#include <linux/of_graph.h>
#include <linux/of_platform.h>
#include <linux/of_reserved_mem.h>
#include <linux/mfd/syscon.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/regmap.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_drv.h>
#include <drm/drm_fbdev_dma.h>
#include <drm/drm_gem_dma_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_module.h>
#include <drm/drm_probe_helper.h>
#include <drm/drm_vblank.h>
#include "kmb_drv.h"
#include "kmb_dsi.h"
#include "kmb_regs.h"
static int kmb_display_clk_enable(struct kmb_drm_private *kmb)
{
int ret = 0;
ret = clk_prepare_enable(kmb->kmb_clk.clk_lcd);
if (ret) {
drm_err(&kmb->drm, "Failed to enable LCD clock: %d\n", ret);
return ret;
}
DRM_INFO("SUCCESS : enabled LCD clocks\n");
return 0;
}
static int kmb_initialize_clocks(struct kmb_drm_private *kmb, struct device *dev)
{
int ret = 0;
struct regmap *msscam;
kmb->kmb_clk.clk_lcd = devm_clk_get(dev, "clk_lcd");
if (IS_ERR(kmb->kmb_clk.clk_lcd)) {
drm_err(&kmb->drm, "clk_get() failed clk_lcd\n");
return PTR_ERR(kmb->kmb_clk.clk_lcd);
}
kmb->kmb_clk.clk_pll0 = devm_clk_get(dev, "clk_pll0");
if (IS_ERR(kmb->kmb_clk.clk_pll0)) {
drm_err(&kmb->drm, "clk_get() failed clk_pll0 ");
return PTR_ERR(kmb->kmb_clk.clk_pll0);
}
kmb->sys_clk_mhz = clk_get_rate(kmb->kmb_clk.clk_pll0) / 1000000;
drm_info(&kmb->drm, "system clk = %d Mhz", kmb->sys_clk_mhz);
ret = kmb_dsi_clk_init(kmb->kmb_dsi);
/* Set LCD clock to 200 Mhz */
clk_set_rate(kmb->kmb_clk.clk_lcd, KMB_LCD_DEFAULT_CLK);
if (clk_get_rate(kmb->kmb_clk.clk_lcd) != KMB_LCD_DEFAULT_CLK) {
drm_err(&kmb->drm, "failed to set to clk_lcd to %d\n",
KMB_LCD_DEFAULT_CLK);
return -1;
}
drm_dbg(&kmb->drm, "clk_lcd = %ld\n", clk_get_rate(kmb->kmb_clk.clk_lcd));
ret = kmb_display_clk_enable(kmb);
if (ret)
return ret;
msscam = syscon_regmap_lookup_by_compatible("intel,keembay-msscam");
if (IS_ERR(msscam)) {
drm_err(&kmb->drm, "failed to get msscam syscon");
return -1;
}
/* Enable MSS_CAM_CLK_CTRL for MIPI TX and LCD */
regmap_update_bits(msscam, MSS_CAM_CLK_CTRL, 0x1fff, 0x1fff);
regmap_update_bits(msscam, MSS_CAM_RSTN_CTRL, 0xffffffff, 0xffffffff);
return 0;
}
static void kmb_display_clk_disable(struct kmb_drm_private *kmb)
{
clk_disable_unprepare(kmb->kmb_clk.clk_lcd);
}
static void __iomem *kmb_map_mmio(struct drm_device *drm,
struct platform_device *pdev,
char *name)
{
struct resource *res;
void __iomem *mem;
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, name);
if (!res) {
drm_err(drm, "failed to get resource for %s", name);
return ERR_PTR(-ENOMEM);
}
mem = devm_ioremap_resource(drm->dev, res);
if (IS_ERR(mem))
drm_err(drm, "failed to ioremap %s registers", name);
return mem;
}
static int kmb_hw_init(struct drm_device *drm, unsigned long flags)
{
struct kmb_drm_private *kmb = to_kmb(drm);
struct platform_device *pdev = to_platform_device(drm->dev);
int irq_lcd;
int ret = 0;
/* Map LCD MMIO registers */
kmb->lcd_mmio = kmb_map_mmio(drm, pdev, "lcd");
if (IS_ERR(kmb->lcd_mmio)) {
drm_err(&kmb->drm, "failed to map LCD registers\n");
return -ENOMEM;
}
/* Map MIPI MMIO registers */
ret = kmb_dsi_map_mmio(kmb->kmb_dsi);
if (ret)
return ret;
/* Enable display clocks */
kmb_initialize_clocks(kmb, &pdev->dev);
/* Register irqs here - section 17.3 in databook
* lists LCD at 79 and 82 for MIPI under MSS CPU -
* firmware has redirected 79 to A53 IRQ 33
*/
/* Allocate LCD interrupt resources */
irq_lcd = platform_get_irq(pdev, 0);
if (irq_lcd < 0) {
ret = irq_lcd;
drm_err(&kmb->drm, "irq_lcd not found");
goto setup_fail;
}
/* Get the optional framebuffer memory resource */
ret = of_reserved_mem_device_init(drm->dev);
if (ret && ret != -ENODEV)
return ret;
spin_lock_init(&kmb->irq_lock);
kmb->irq_lcd = irq_lcd;
return 0;
setup_fail:
of_reserved_mem_device_release(drm->dev);
return ret;
}
static const struct drm_mode_config_funcs kmb_mode_config_funcs = {
.fb_create = drm_gem_fb_create,
.atomic_check = drm_atomic_helper_check,
.atomic_commit = drm_atomic_helper_commit,
};
static int kmb_setup_mode_config(struct drm_device *drm)
{
int ret;
struct kmb_drm_private *kmb = to_kmb(drm);
ret = drmm_mode_config_init(drm);
if (ret)
return ret;
drm->mode_config.min_width = KMB_FB_MIN_WIDTH;
drm->mode_config.min_height = KMB_FB_MIN_HEIGHT;
drm->mode_config.max_width = KMB_FB_MAX_WIDTH;
drm->mode_config.max_height = KMB_FB_MAX_HEIGHT;
drm->mode_config.preferred_depth = 24;
drm->mode_config.funcs = &kmb_mode_config_funcs;
ret = kmb_setup_crtc(drm);
if (ret < 0) {
drm_err(drm, "failed to create crtc\n");
return ret;
}
ret = kmb_dsi_encoder_init(drm, kmb->kmb_dsi);
/* Set the CRTC's port so that the encoder component can find it */
kmb->crtc.port = of_graph_get_port_by_id(drm->dev->of_node, 0);
ret = drm_vblank_init(drm, drm->mode_config.num_crtc);
if (ret < 0) {
drm_err(drm, "failed to initialize vblank\n");
pm_runtime_disable(drm->dev);
return ret;
}
drm_mode_config_reset(drm);
return 0;
}
static irqreturn_t handle_lcd_irq(struct drm_device *dev)
{
unsigned long status, val, val1;
int plane_id, dma0_state, dma1_state;
struct kmb_drm_private *kmb = to_kmb(dev);
u32 ctrl = 0;
status = kmb_read_lcd(kmb, LCD_INT_STATUS);
spin_lock(&kmb->irq_lock);
if (status & LCD_INT_EOF) {
kmb_write_lcd(kmb, LCD_INT_CLEAR, LCD_INT_EOF);
/* When disabling/enabling LCD layers, the change takes effect
* immediately and does not wait for EOF (end of frame).
* When kmb_plane_atomic_disable is called, mark the plane as
* disabled but actually disable the plane when EOF irq is
* being handled.
*/
for (plane_id = LAYER_0;
plane_id < KMB_MAX_PLANES; plane_id++) {
if (kmb->plane_status[plane_id].disable) {
kmb_clr_bitmask_lcd(kmb,
LCD_LAYERn_DMA_CFG
(plane_id),
LCD_DMA_LAYER_ENABLE);
kmb_clr_bitmask_lcd(kmb, LCD_CONTROL,
kmb->plane_status[plane_id].ctrl);
ctrl = kmb_read_lcd(kmb, LCD_CONTROL);
if (!(ctrl & (LCD_CTRL_VL1_ENABLE |
LCD_CTRL_VL2_ENABLE |
LCD_CTRL_GL1_ENABLE |
LCD_CTRL_GL2_ENABLE))) {
/* If no LCD layers are using DMA,
* then disable DMA pipelined AXI read
* transactions.
*/
kmb_clr_bitmask_lcd(kmb, LCD_CONTROL,
LCD_CTRL_PIPELINE_DMA);
}
kmb->plane_status[plane_id].disable = false;
}
}
if (kmb->kmb_under_flow) {
/* DMA Recovery after underflow */
dma0_state = (kmb->layer_no == 0) ?
LCD_VIDEO0_DMA0_STATE : LCD_VIDEO1_DMA0_STATE;
dma1_state = (kmb->layer_no == 0) ?
LCD_VIDEO0_DMA1_STATE : LCD_VIDEO1_DMA1_STATE;
do {
kmb_write_lcd(kmb, LCD_FIFO_FLUSH, 1);
val = kmb_read_lcd(kmb, dma0_state)
& LCD_DMA_STATE_ACTIVE;
val1 = kmb_read_lcd(kmb, dma1_state)
& LCD_DMA_STATE_ACTIVE;
} while ((val || val1));
/* disable dma */
kmb_clr_bitmask_lcd(kmb,
LCD_LAYERn_DMA_CFG(kmb->layer_no),
LCD_DMA_LAYER_ENABLE);
kmb_write_lcd(kmb, LCD_FIFO_FLUSH, 1);
kmb->kmb_flush_done = 1;
kmb->kmb_under_flow = 0;
}
}
if (status & LCD_INT_LINE_CMP) {
/* clear line compare interrupt */
kmb_write_lcd(kmb, LCD_INT_CLEAR, LCD_INT_LINE_CMP);
}
if (status & LCD_INT_VERT_COMP) {
/* Read VSTATUS */
val = kmb_read_lcd(kmb, LCD_VSTATUS);
val = (val & LCD_VSTATUS_VERTICAL_STATUS_MASK);
switch (val) {
case LCD_VSTATUS_COMPARE_VSYNC:
/* Clear vertical compare interrupt */
kmb_write_lcd(kmb, LCD_INT_CLEAR, LCD_INT_VERT_COMP);
if (kmb->kmb_flush_done) {
kmb_set_bitmask_lcd(kmb,
LCD_LAYERn_DMA_CFG
(kmb->layer_no),
LCD_DMA_LAYER_ENABLE);
kmb->kmb_flush_done = 0;
}
drm_crtc_handle_vblank(&kmb->crtc);
break;
case LCD_VSTATUS_COMPARE_BACKPORCH:
case LCD_VSTATUS_COMPARE_ACTIVE:
case LCD_VSTATUS_COMPARE_FRONT_PORCH:
kmb_write_lcd(kmb, LCD_INT_CLEAR, LCD_INT_VERT_COMP);
break;
}
}
if (status & LCD_INT_DMA_ERR) {
val =
(status & LCD_INT_DMA_ERR &
kmb_read_lcd(kmb, LCD_INT_ENABLE));
/* LAYER0 - VL0 */
if (val & (LAYER0_DMA_FIFO_UNDERFLOW |
LAYER0_DMA_CB_FIFO_UNDERFLOW |
LAYER0_DMA_CR_FIFO_UNDERFLOW)) {
kmb->kmb_under_flow++;
drm_info(&kmb->drm,
"!LAYER0:VL0 DMA UNDERFLOW val = 0x%lx,under_flow=%d",
val, kmb->kmb_under_flow);
/* disable underflow interrupt */
kmb_clr_bitmask_lcd(kmb, LCD_INT_ENABLE,
LAYER0_DMA_FIFO_UNDERFLOW |
LAYER0_DMA_CB_FIFO_UNDERFLOW |
LAYER0_DMA_CR_FIFO_UNDERFLOW);
kmb_set_bitmask_lcd(kmb, LCD_INT_CLEAR,
LAYER0_DMA_CB_FIFO_UNDERFLOW |
LAYER0_DMA_FIFO_UNDERFLOW |
LAYER0_DMA_CR_FIFO_UNDERFLOW);
/* disable auto restart mode */
kmb_clr_bitmask_lcd(kmb, LCD_LAYERn_DMA_CFG(0),
LCD_DMA_LAYER_CONT_PING_PONG_UPDATE);
kmb->layer_no = 0;
}
if (val & LAYER0_DMA_FIFO_OVERFLOW)
drm_dbg(&kmb->drm,
"LAYER0:VL0 DMA OVERFLOW val = 0x%lx", val);
if (val & LAYER0_DMA_CB_FIFO_OVERFLOW)
drm_dbg(&kmb->drm,
"LAYER0:VL0 DMA CB OVERFLOW val = 0x%lx", val);
if (val & LAYER0_DMA_CR_FIFO_OVERFLOW)
drm_dbg(&kmb->drm,
"LAYER0:VL0 DMA CR OVERFLOW val = 0x%lx", val);
/* LAYER1 - VL1 */
if (val & (LAYER1_DMA_FIFO_UNDERFLOW |
LAYER1_DMA_CB_FIFO_UNDERFLOW |
LAYER1_DMA_CR_FIFO_UNDERFLOW)) {
kmb->kmb_under_flow++;
drm_info(&kmb->drm,
"!LAYER1:VL1 DMA UNDERFLOW val = 0x%lx, under_flow=%d",
val, kmb->kmb_under_flow);
/* disable underflow interrupt */
kmb_clr_bitmask_lcd(kmb, LCD_INT_ENABLE,
LAYER1_DMA_FIFO_UNDERFLOW |
LAYER1_DMA_CB_FIFO_UNDERFLOW |
LAYER1_DMA_CR_FIFO_UNDERFLOW);
kmb_set_bitmask_lcd(kmb, LCD_INT_CLEAR,
LAYER1_DMA_CB_FIFO_UNDERFLOW |
LAYER1_DMA_FIFO_UNDERFLOW |
LAYER1_DMA_CR_FIFO_UNDERFLOW);
/* disable auto restart mode */
kmb_clr_bitmask_lcd(kmb, LCD_LAYERn_DMA_CFG(1),
LCD_DMA_LAYER_CONT_PING_PONG_UPDATE);
kmb->layer_no = 1;
}
/* LAYER1 - VL1 */
if (val & LAYER1_DMA_FIFO_OVERFLOW)
drm_dbg(&kmb->drm,
"LAYER1:VL1 DMA OVERFLOW val = 0x%lx", val);
if (val & LAYER1_DMA_CB_FIFO_OVERFLOW)
drm_dbg(&kmb->drm,
"LAYER1:VL1 DMA CB OVERFLOW val = 0x%lx", val);
if (val & LAYER1_DMA_CR_FIFO_OVERFLOW)
drm_dbg(&kmb->drm,
"LAYER1:VL1 DMA CR OVERFLOW val = 0x%lx", val);
/* LAYER2 - GL0 */
if (val & LAYER2_DMA_FIFO_UNDERFLOW)
drm_dbg(&kmb->drm,
"LAYER2:GL0 DMA UNDERFLOW val = 0x%lx", val);
if (val & LAYER2_DMA_FIFO_OVERFLOW)
drm_dbg(&kmb->drm,
"LAYER2:GL0 DMA OVERFLOW val = 0x%lx", val);
/* LAYER3 - GL1 */
if (val & LAYER3_DMA_FIFO_UNDERFLOW)
drm_dbg(&kmb->drm,
"LAYER3:GL1 DMA UNDERFLOW val = 0x%lx", val);
if (val & LAYER3_DMA_FIFO_OVERFLOW)
drm_dbg(&kmb->drm,
"LAYER3:GL1 DMA OVERFLOW val = 0x%lx", val);
}
spin_unlock(&kmb->irq_lock);
if (status & LCD_INT_LAYER) {
/* Clear layer interrupts */
kmb_write_lcd(kmb, LCD_INT_CLEAR, LCD_INT_LAYER);
}
/* Clear all interrupts */
kmb_set_bitmask_lcd(kmb, LCD_INT_CLEAR, 1);
return IRQ_HANDLED;
}
/* IRQ handler */
static irqreturn_t kmb_isr(int irq, void *arg)
{
struct drm_device *dev = (struct drm_device *)arg;
handle_lcd_irq(dev);
return IRQ_HANDLED;
}
static void kmb_irq_reset(struct drm_device *drm)
{
kmb_write_lcd(to_kmb(drm), LCD_INT_CLEAR, 0xFFFF);
kmb_write_lcd(to_kmb(drm), LCD_INT_ENABLE, 0);
}
static int kmb_irq_install(struct drm_device *drm, unsigned int irq)
{
if (irq == IRQ_NOTCONNECTED)
return -ENOTCONN;
kmb_irq_reset(drm);
return request_irq(irq, kmb_isr, 0, drm->driver->name, drm);
}
static void kmb_irq_uninstall(struct drm_device *drm)
{
struct kmb_drm_private *kmb = to_kmb(drm);
kmb_irq_reset(drm);
free_irq(kmb->irq_lcd, drm);
}
DEFINE_DRM_GEM_DMA_FOPS(fops);
static const struct drm_driver kmb_driver = {
.driver_features = DRIVER_GEM |
DRIVER_MODESET | DRIVER_ATOMIC,
/* GEM Operations */
.fops = &fops,
DRM_GEM_DMA_DRIVER_OPS_VMAP,
.name = "kmb-drm",
.desc = "KEEMBAY DISPLAY DRIVER",
.date = DRIVER_DATE,
.major = DRIVER_MAJOR,
.minor = DRIVER_MINOR,
};
static int kmb_remove(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct drm_device *drm = dev_get_drvdata(dev);
struct kmb_drm_private *kmb = to_kmb(drm);
drm_dev_unregister(drm);
drm_kms_helper_poll_fini(drm);
of_node_put(kmb->crtc.port);
kmb->crtc.port = NULL;
pm_runtime_get_sync(drm->dev);
kmb_irq_uninstall(drm);
pm_runtime_put_sync(drm->dev);
pm_runtime_disable(drm->dev);
of_reserved_mem_device_release(drm->dev);
/* Release clks */
kmb_display_clk_disable(kmb);
dev_set_drvdata(dev, NULL);
/* Unregister DSI host */
kmb_dsi_host_unregister(kmb->kmb_dsi);
drm_atomic_helper_shutdown(drm);
return 0;
}
static int kmb_probe(struct platform_device *pdev)
{
struct device *dev = get_device(&pdev->dev);
struct kmb_drm_private *kmb;
int ret = 0;
struct device_node *dsi_in;
struct device_node *dsi_node;
struct platform_device *dsi_pdev;
/* The bridge (ADV 7535) will return -EPROBE_DEFER until it
* has a mipi_dsi_host to register its device to. So, we
* first register the DSI host during probe time, and then return
* -EPROBE_DEFER until the bridge is loaded. Probe will be called again
* and then the rest of the driver initialization can proceed
* afterwards and the bridge can be successfully attached.
*/
dsi_in = of_graph_get_endpoint_by_regs(dev->of_node, 0, 0);
if (!dsi_in) {
DRM_ERROR("Failed to get dsi_in node info from DT");
return -EINVAL;
}
dsi_node = of_graph_get_remote_port_parent(dsi_in);
if (!dsi_node) {
of_node_put(dsi_in);
DRM_ERROR("Failed to get dsi node from DT\n");
return -EINVAL;
}
dsi_pdev = of_find_device_by_node(dsi_node);
if (!dsi_pdev) {
of_node_put(dsi_in);
of_node_put(dsi_node);
DRM_ERROR("Failed to get dsi platform device\n");
return -EINVAL;
}
of_node_put(dsi_in);
of_node_put(dsi_node);
ret = kmb_dsi_host_bridge_init(get_device(&dsi_pdev->dev));
if (ret == -EPROBE_DEFER) {
return -EPROBE_DEFER;
} else if (ret) {
DRM_ERROR("probe failed to initialize DSI host bridge\n");
return ret;
}
/* Create DRM device */
kmb = devm_drm_dev_alloc(dev, &kmb_driver,
struct kmb_drm_private, drm);
if (IS_ERR(kmb))
return PTR_ERR(kmb);
dev_set_drvdata(dev, &kmb->drm);
/* Initialize MIPI DSI */
kmb->kmb_dsi = kmb_dsi_init(dsi_pdev);
if (IS_ERR(kmb->kmb_dsi)) {
drm_err(&kmb->drm, "failed to initialize DSI\n");
ret = PTR_ERR(kmb->kmb_dsi);
goto err_free1;
}
kmb->kmb_dsi->dev = &dsi_pdev->dev;
kmb->kmb_dsi->pdev = dsi_pdev;
ret = kmb_hw_init(&kmb->drm, 0);
if (ret)
goto err_free1;
ret = kmb_setup_mode_config(&kmb->drm);
if (ret)
goto err_free;
ret = kmb_irq_install(&kmb->drm, kmb->irq_lcd);
if (ret < 0) {
drm_err(&kmb->drm, "failed to install IRQ handler\n");
goto err_irq;
}
drm_kms_helper_poll_init(&kmb->drm);
/* Register graphics device with the kernel */
ret = drm_dev_register(&kmb->drm, 0);
if (ret)
goto err_register;
drm_fbdev_dma_setup(&kmb->drm, 0);
return 0;
err_register:
drm_kms_helper_poll_fini(&kmb->drm);
err_irq:
pm_runtime_disable(kmb->drm.dev);
err_free:
drm_crtc_cleanup(&kmb->crtc);
drm_mode_config_cleanup(&kmb->drm);
err_free1:
dev_set_drvdata(dev, NULL);
kmb_dsi_host_unregister(kmb->kmb_dsi);
return ret;
}
static const struct of_device_id kmb_of_match[] = {
{.compatible = "intel,keembay-display"},
{},
};
MODULE_DEVICE_TABLE(of, kmb_of_match);
static int __maybe_unused kmb_pm_suspend(struct device *dev)
{
struct drm_device *drm = dev_get_drvdata(dev);
struct kmb_drm_private *kmb = to_kmb(drm);
drm_kms_helper_poll_disable(drm);
kmb->state = drm_atomic_helper_suspend(drm);
if (IS_ERR(kmb->state)) {
drm_kms_helper_poll_enable(drm);
return PTR_ERR(kmb->state);
}
return 0;
}
static int __maybe_unused kmb_pm_resume(struct device *dev)
{
struct drm_device *drm = dev_get_drvdata(dev);
struct kmb_drm_private *kmb = drm ? to_kmb(drm) : NULL;
if (!kmb)
return 0;
drm_atomic_helper_resume(drm, kmb->state);
drm_kms_helper_poll_enable(drm);
return 0;
}
static SIMPLE_DEV_PM_OPS(kmb_pm_ops, kmb_pm_suspend, kmb_pm_resume);
static struct platform_driver kmb_platform_driver = {
.probe = kmb_probe,
.remove = kmb_remove,
.driver = {
.name = "kmb-drm",
.pm = &kmb_pm_ops,
.of_match_table = kmb_of_match,
},
};
drm_module_platform_driver(kmb_platform_driver);
MODULE_AUTHOR("Intel Corporation");
MODULE_DESCRIPTION("Keembay Display driver");
MODULE_LICENSE("GPL v2");
| linux-master | drivers/gpu/drm/kmb/kmb_drv.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright © 2018-2020 Intel Corporation
*/
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_blend.h>
#include <drm/drm_crtc.h>
#include <drm/drm_fb_dma_helper.h>
#include <drm/drm_fourcc.h>
#include <drm/drm_framebuffer.h>
#include <drm/drm_gem_dma_helper.h>
#include <drm/drm_managed.h>
#include "kmb_drv.h"
#include "kmb_plane.h"
#include "kmb_regs.h"
const u32 layer_irqs[] = {
LCD_INT_VL0,
LCD_INT_VL1,
LCD_INT_GL0,
LCD_INT_GL1
};
/* Conversion (yuv->rgb) matrix from myriadx */
static const u32 csc_coef_lcd[] = {
1024, 0, 1436,
1024, -352, -731,
1024, 1814, 0,
-179, 125, -226
};
/* Graphics layer (layers 2 & 3) formats, only packed formats are supported */
static const u32 kmb_formats_g[] = {
DRM_FORMAT_RGB332,
DRM_FORMAT_XRGB4444, DRM_FORMAT_XBGR4444,
DRM_FORMAT_ARGB4444, DRM_FORMAT_ABGR4444,
DRM_FORMAT_XRGB1555, DRM_FORMAT_XBGR1555,
DRM_FORMAT_ARGB1555, DRM_FORMAT_ABGR1555,
DRM_FORMAT_RGB565, DRM_FORMAT_BGR565,
DRM_FORMAT_RGB888, DRM_FORMAT_BGR888,
DRM_FORMAT_XRGB8888, DRM_FORMAT_XBGR8888,
DRM_FORMAT_ARGB8888, DRM_FORMAT_ABGR8888,
};
/* Video layer ( 0 & 1) formats, packed and planar formats are supported */
static const u32 kmb_formats_v[] = {
/* packed formats */
DRM_FORMAT_RGB332,
DRM_FORMAT_XRGB4444, DRM_FORMAT_XBGR4444,
DRM_FORMAT_ARGB4444, DRM_FORMAT_ABGR4444,
DRM_FORMAT_XRGB1555, DRM_FORMAT_XBGR1555,
DRM_FORMAT_ARGB1555, DRM_FORMAT_ABGR1555,
DRM_FORMAT_RGB565, DRM_FORMAT_BGR565,
DRM_FORMAT_RGB888, DRM_FORMAT_BGR888,
DRM_FORMAT_XRGB8888, DRM_FORMAT_XBGR8888,
DRM_FORMAT_ARGB8888, DRM_FORMAT_ABGR8888,
/*planar formats */
DRM_FORMAT_YUV420, DRM_FORMAT_YVU420,
DRM_FORMAT_YUV422, DRM_FORMAT_YVU422,
DRM_FORMAT_YUV444, DRM_FORMAT_YVU444,
DRM_FORMAT_NV12, DRM_FORMAT_NV21,
};
static unsigned int check_pixel_format(struct drm_plane *plane, u32 format)
{
struct kmb_drm_private *kmb;
struct kmb_plane *kmb_plane = to_kmb_plane(plane);
int i;
int plane_id = kmb_plane->id;
struct disp_cfg init_disp_cfg;
kmb = to_kmb(plane->dev);
init_disp_cfg = kmb->init_disp_cfg[plane_id];
/* Due to HW limitations, changing pixel format after initial
* plane configuration is not supported.
*/
if (init_disp_cfg.format && init_disp_cfg.format != format) {
drm_dbg(&kmb->drm, "Cannot change format after initial plane configuration");
return -EINVAL;
}
for (i = 0; i < plane->format_count; i++) {
if (plane->format_types[i] == format)
return 0;
}
return -EINVAL;
}
static int kmb_plane_atomic_check(struct drm_plane *plane,
struct drm_atomic_state *state)
{
struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state,
plane);
struct kmb_drm_private *kmb;
struct kmb_plane *kmb_plane = to_kmb_plane(plane);
int plane_id = kmb_plane->id;
struct disp_cfg init_disp_cfg;
struct drm_framebuffer *fb;
int ret;
struct drm_crtc_state *crtc_state;
bool can_position;
kmb = to_kmb(plane->dev);
init_disp_cfg = kmb->init_disp_cfg[plane_id];
fb = new_plane_state->fb;
if (!fb || !new_plane_state->crtc)
return 0;
ret = check_pixel_format(plane, fb->format->format);
if (ret)
return ret;
if (new_plane_state->crtc_w > KMB_FB_MAX_WIDTH ||
new_plane_state->crtc_h > KMB_FB_MAX_HEIGHT ||
new_plane_state->crtc_w < KMB_FB_MIN_WIDTH ||
new_plane_state->crtc_h < KMB_FB_MIN_HEIGHT)
return -EINVAL;
/* Due to HW limitations, changing plane height or width after
* initial plane configuration is not supported.
*/
if ((init_disp_cfg.width && init_disp_cfg.height) &&
(init_disp_cfg.width != fb->width ||
init_disp_cfg.height != fb->height)) {
drm_dbg(&kmb->drm, "Cannot change plane height or width after initial configuration");
return -EINVAL;
}
can_position = (plane->type == DRM_PLANE_TYPE_OVERLAY);
crtc_state =
drm_atomic_get_existing_crtc_state(state,
new_plane_state->crtc);
return drm_atomic_helper_check_plane_state(new_plane_state,
crtc_state,
DRM_PLANE_NO_SCALING,
DRM_PLANE_NO_SCALING,
can_position, true);
}
static void kmb_plane_atomic_disable(struct drm_plane *plane,
struct drm_atomic_state *state)
{
struct kmb_plane *kmb_plane = to_kmb_plane(plane);
int plane_id = kmb_plane->id;
struct kmb_drm_private *kmb;
kmb = to_kmb(plane->dev);
if (WARN_ON(plane_id >= KMB_MAX_PLANES))
return;
switch (plane_id) {
case LAYER_0:
kmb->plane_status[plane_id].ctrl = LCD_CTRL_VL1_ENABLE;
break;
case LAYER_1:
kmb->plane_status[plane_id].ctrl = LCD_CTRL_VL2_ENABLE;
break;
}
kmb->plane_status[plane_id].disable = true;
}
static unsigned int get_pixel_format(u32 format)
{
unsigned int val = 0;
switch (format) {
/* planar formats */
case DRM_FORMAT_YUV444:
val = LCD_LAYER_FORMAT_YCBCR444PLAN | LCD_LAYER_PLANAR_STORAGE;
break;
case DRM_FORMAT_YVU444:
val = LCD_LAYER_FORMAT_YCBCR444PLAN | LCD_LAYER_PLANAR_STORAGE
| LCD_LAYER_CRCB_ORDER;
break;
case DRM_FORMAT_YUV422:
val = LCD_LAYER_FORMAT_YCBCR422PLAN | LCD_LAYER_PLANAR_STORAGE;
break;
case DRM_FORMAT_YVU422:
val = LCD_LAYER_FORMAT_YCBCR422PLAN | LCD_LAYER_PLANAR_STORAGE
| LCD_LAYER_CRCB_ORDER;
break;
case DRM_FORMAT_YUV420:
val = LCD_LAYER_FORMAT_YCBCR420PLAN | LCD_LAYER_PLANAR_STORAGE;
break;
case DRM_FORMAT_YVU420:
val = LCD_LAYER_FORMAT_YCBCR420PLAN | LCD_LAYER_PLANAR_STORAGE
| LCD_LAYER_CRCB_ORDER;
break;
case DRM_FORMAT_NV12:
val = LCD_LAYER_FORMAT_NV12 | LCD_LAYER_PLANAR_STORAGE;
break;
case DRM_FORMAT_NV21:
val = LCD_LAYER_FORMAT_NV12 | LCD_LAYER_PLANAR_STORAGE
| LCD_LAYER_CRCB_ORDER;
break;
/* packed formats */
/* looks hw requires B & G to be swapped when RGB */
case DRM_FORMAT_RGB332:
val = LCD_LAYER_FORMAT_RGB332 | LCD_LAYER_BGR_ORDER;
break;
case DRM_FORMAT_XBGR4444:
val = LCD_LAYER_FORMAT_RGBX4444;
break;
case DRM_FORMAT_ARGB4444:
val = LCD_LAYER_FORMAT_RGBA4444 | LCD_LAYER_BGR_ORDER;
break;
case DRM_FORMAT_ABGR4444:
val = LCD_LAYER_FORMAT_RGBA4444;
break;
case DRM_FORMAT_XRGB1555:
val = LCD_LAYER_FORMAT_XRGB1555 | LCD_LAYER_BGR_ORDER;
break;
case DRM_FORMAT_XBGR1555:
val = LCD_LAYER_FORMAT_XRGB1555;
break;
case DRM_FORMAT_ARGB1555:
val = LCD_LAYER_FORMAT_RGBA1555 | LCD_LAYER_BGR_ORDER;
break;
case DRM_FORMAT_ABGR1555:
val = LCD_LAYER_FORMAT_RGBA1555;
break;
case DRM_FORMAT_RGB565:
val = LCD_LAYER_FORMAT_RGB565 | LCD_LAYER_BGR_ORDER;
break;
case DRM_FORMAT_BGR565:
val = LCD_LAYER_FORMAT_RGB565;
break;
case DRM_FORMAT_RGB888:
val = LCD_LAYER_FORMAT_RGB888 | LCD_LAYER_BGR_ORDER;
break;
case DRM_FORMAT_BGR888:
val = LCD_LAYER_FORMAT_RGB888;
break;
case DRM_FORMAT_XRGB8888:
val = LCD_LAYER_FORMAT_RGBX8888 | LCD_LAYER_BGR_ORDER;
break;
case DRM_FORMAT_XBGR8888:
val = LCD_LAYER_FORMAT_RGBX8888;
break;
case DRM_FORMAT_ARGB8888:
val = LCD_LAYER_FORMAT_RGBA8888 | LCD_LAYER_BGR_ORDER;
break;
case DRM_FORMAT_ABGR8888:
val = LCD_LAYER_FORMAT_RGBA8888;
break;
}
DRM_INFO_ONCE("%s : %d format=0x%x val=0x%x\n",
__func__, __LINE__, format, val);
return val;
}
static unsigned int get_bits_per_pixel(const struct drm_format_info *format)
{
u32 bpp = 0;
unsigned int val = 0;
if (format->num_planes > 1) {
val = LCD_LAYER_8BPP;
return val;
}
bpp += 8 * format->cpp[0];
switch (bpp) {
case 8:
val = LCD_LAYER_8BPP;
break;
case 16:
val = LCD_LAYER_16BPP;
break;
case 24:
val = LCD_LAYER_24BPP;
break;
case 32:
val = LCD_LAYER_32BPP;
break;
}
DRM_DEBUG("bpp=%d val=0x%x\n", bpp, val);
return val;
}
static void config_csc(struct kmb_drm_private *kmb, int plane_id)
{
/* YUV to RGB conversion using the fixed matrix csc_coef_lcd */
kmb_write_lcd(kmb, LCD_LAYERn_CSC_COEFF11(plane_id), csc_coef_lcd[0]);
kmb_write_lcd(kmb, LCD_LAYERn_CSC_COEFF12(plane_id), csc_coef_lcd[1]);
kmb_write_lcd(kmb, LCD_LAYERn_CSC_COEFF13(plane_id), csc_coef_lcd[2]);
kmb_write_lcd(kmb, LCD_LAYERn_CSC_COEFF21(plane_id), csc_coef_lcd[3]);
kmb_write_lcd(kmb, LCD_LAYERn_CSC_COEFF22(plane_id), csc_coef_lcd[4]);
kmb_write_lcd(kmb, LCD_LAYERn_CSC_COEFF23(plane_id), csc_coef_lcd[5]);
kmb_write_lcd(kmb, LCD_LAYERn_CSC_COEFF31(plane_id), csc_coef_lcd[6]);
kmb_write_lcd(kmb, LCD_LAYERn_CSC_COEFF32(plane_id), csc_coef_lcd[7]);
kmb_write_lcd(kmb, LCD_LAYERn_CSC_COEFF33(plane_id), csc_coef_lcd[8]);
kmb_write_lcd(kmb, LCD_LAYERn_CSC_OFF1(plane_id), csc_coef_lcd[9]);
kmb_write_lcd(kmb, LCD_LAYERn_CSC_OFF2(plane_id), csc_coef_lcd[10]);
kmb_write_lcd(kmb, LCD_LAYERn_CSC_OFF3(plane_id), csc_coef_lcd[11]);
}
static void kmb_plane_set_alpha(struct kmb_drm_private *kmb,
const struct drm_plane_state *state,
unsigned char plane_id,
unsigned int *val)
{
u16 plane_alpha = state->alpha;
u16 pixel_blend_mode = state->pixel_blend_mode;
int has_alpha = state->fb->format->has_alpha;
if (plane_alpha != DRM_BLEND_ALPHA_OPAQUE)
*val |= LCD_LAYER_ALPHA_STATIC;
if (has_alpha) {
switch (pixel_blend_mode) {
case DRM_MODE_BLEND_PIXEL_NONE:
break;
case DRM_MODE_BLEND_PREMULTI:
*val |= LCD_LAYER_ALPHA_EMBED | LCD_LAYER_ALPHA_PREMULT;
break;
case DRM_MODE_BLEND_COVERAGE:
*val |= LCD_LAYER_ALPHA_EMBED;
break;
default:
DRM_DEBUG("Missing pixel blend mode case (%s == %ld)\n",
__stringify(pixel_blend_mode),
(long)pixel_blend_mode);
break;
}
}
if (plane_alpha == DRM_BLEND_ALPHA_OPAQUE && !has_alpha) {
*val &= LCD_LAYER_ALPHA_DISABLED;
return;
}
kmb_write_lcd(kmb, LCD_LAYERn_ALPHA(plane_id), plane_alpha);
}
static void kmb_plane_atomic_update(struct drm_plane *plane,
struct drm_atomic_state *state)
{
struct drm_plane_state *old_plane_state = drm_atomic_get_old_plane_state(state,
plane);
struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state,
plane);
struct drm_framebuffer *fb;
struct kmb_drm_private *kmb;
unsigned int width;
unsigned int height;
unsigned int dma_len;
struct kmb_plane *kmb_plane;
unsigned int dma_cfg;
unsigned int ctrl = 0, val = 0, out_format = 0;
unsigned int src_w, src_h, crtc_x, crtc_y;
unsigned char plane_id;
int num_planes;
static dma_addr_t addr[MAX_SUB_PLANES];
struct disp_cfg *init_disp_cfg;
if (!plane || !new_plane_state || !old_plane_state)
return;
fb = new_plane_state->fb;
if (!fb)
return;
num_planes = fb->format->num_planes;
kmb_plane = to_kmb_plane(plane);
kmb = to_kmb(plane->dev);
plane_id = kmb_plane->id;
spin_lock_irq(&kmb->irq_lock);
if (kmb->kmb_under_flow || kmb->kmb_flush_done) {
spin_unlock_irq(&kmb->irq_lock);
drm_dbg(&kmb->drm, "plane_update:underflow!!!! returning");
return;
}
spin_unlock_irq(&kmb->irq_lock);
init_disp_cfg = &kmb->init_disp_cfg[plane_id];
src_w = new_plane_state->src_w >> 16;
src_h = new_plane_state->src_h >> 16;
crtc_x = new_plane_state->crtc_x;
crtc_y = new_plane_state->crtc_y;
drm_dbg(&kmb->drm,
"src_w=%d src_h=%d, fb->format->format=0x%x fb->flags=0x%x\n",
src_w, src_h, fb->format->format, fb->flags);
width = fb->width;
height = fb->height;
dma_len = (width * height * fb->format->cpp[0]);
drm_dbg(&kmb->drm, "dma_len=%d ", dma_len);
kmb_write_lcd(kmb, LCD_LAYERn_DMA_LEN(plane_id), dma_len);
kmb_write_lcd(kmb, LCD_LAYERn_DMA_LEN_SHADOW(plane_id), dma_len);
kmb_write_lcd(kmb, LCD_LAYERn_DMA_LINE_VSTRIDE(plane_id),
fb->pitches[0]);
kmb_write_lcd(kmb, LCD_LAYERn_DMA_LINE_WIDTH(plane_id),
(width * fb->format->cpp[0]));
addr[Y_PLANE] = drm_fb_dma_get_gem_addr(fb, new_plane_state, 0);
kmb_write_lcd(kmb, LCD_LAYERn_DMA_START_ADDR(plane_id),
addr[Y_PLANE] + fb->offsets[0]);
val = get_pixel_format(fb->format->format);
val |= get_bits_per_pixel(fb->format);
/* Program Cb/Cr for planar formats */
if (num_planes > 1) {
kmb_write_lcd(kmb, LCD_LAYERn_DMA_CB_LINE_VSTRIDE(plane_id),
width * fb->format->cpp[0]);
kmb_write_lcd(kmb, LCD_LAYERn_DMA_CB_LINE_WIDTH(plane_id),
(width * fb->format->cpp[0]));
addr[U_PLANE] = drm_fb_dma_get_gem_addr(fb, new_plane_state,
U_PLANE);
/* check if Cb/Cr is swapped*/
if (num_planes == 3 && (val & LCD_LAYER_CRCB_ORDER))
kmb_write_lcd(kmb,
LCD_LAYERn_DMA_START_CR_ADR(plane_id),
addr[U_PLANE]);
else
kmb_write_lcd(kmb,
LCD_LAYERn_DMA_START_CB_ADR(plane_id),
addr[U_PLANE]);
if (num_planes == 3) {
kmb_write_lcd(kmb,
LCD_LAYERn_DMA_CR_LINE_VSTRIDE(plane_id),
((width) * fb->format->cpp[0]));
kmb_write_lcd(kmb,
LCD_LAYERn_DMA_CR_LINE_WIDTH(plane_id),
((width) * fb->format->cpp[0]));
addr[V_PLANE] = drm_fb_dma_get_gem_addr(fb,
new_plane_state,
V_PLANE);
/* check if Cb/Cr is swapped*/
if (val & LCD_LAYER_CRCB_ORDER)
kmb_write_lcd(kmb,
LCD_LAYERn_DMA_START_CB_ADR(plane_id),
addr[V_PLANE]);
else
kmb_write_lcd(kmb,
LCD_LAYERn_DMA_START_CR_ADR(plane_id),
addr[V_PLANE]);
}
}
kmb_write_lcd(kmb, LCD_LAYERn_WIDTH(plane_id), src_w - 1);
kmb_write_lcd(kmb, LCD_LAYERn_HEIGHT(plane_id), src_h - 1);
kmb_write_lcd(kmb, LCD_LAYERn_COL_START(plane_id), crtc_x);
kmb_write_lcd(kmb, LCD_LAYERn_ROW_START(plane_id), crtc_y);
val |= LCD_LAYER_FIFO_100;
if (val & LCD_LAYER_PLANAR_STORAGE) {
val |= LCD_LAYER_CSC_EN;
/* Enable CSC if input is planar and output is RGB */
config_csc(kmb, plane_id);
}
kmb_plane_set_alpha(kmb, plane->state, plane_id, &val);
kmb_write_lcd(kmb, LCD_LAYERn_CFG(plane_id), val);
/* Configure LCD_CONTROL */
ctrl = kmb_read_lcd(kmb, LCD_CONTROL);
/* Set layer blending config */
ctrl &= ~LCD_CTRL_ALPHA_ALL;
ctrl |= LCD_CTRL_ALPHA_BOTTOM_VL1 |
LCD_CTRL_ALPHA_BLEND_VL2;
ctrl &= ~LCD_CTRL_ALPHA_BLEND_BKGND_DISABLE;
switch (plane_id) {
case LAYER_0:
ctrl |= LCD_CTRL_VL1_ENABLE;
break;
case LAYER_1:
ctrl |= LCD_CTRL_VL2_ENABLE;
break;
case LAYER_2:
ctrl |= LCD_CTRL_GL1_ENABLE;
break;
case LAYER_3:
ctrl |= LCD_CTRL_GL2_ENABLE;
break;
}
ctrl |= LCD_CTRL_PROGRESSIVE | LCD_CTRL_TIM_GEN_ENABLE
| LCD_CTRL_CONTINUOUS | LCD_CTRL_OUTPUT_ENABLED;
/* LCD is connected to MIPI on kmb
* Therefore this bit is required for DSI Tx
*/
ctrl |= LCD_CTRL_VHSYNC_IDLE_LVL;
kmb_write_lcd(kmb, LCD_CONTROL, ctrl);
/* Enable pipeline AXI read transactions for the DMA
* after setting graphics layers. This must be done
* in a separate write cycle.
*/
kmb_set_bitmask_lcd(kmb, LCD_CONTROL, LCD_CTRL_PIPELINE_DMA);
/* FIXME no doc on how to set output format, these values are taken
* from the Myriadx tests
*/
out_format |= LCD_OUTF_FORMAT_RGB888;
/* Leave RGB order,conversion mode and clip mode to default */
/* do not interleave RGB channels for mipi Tx compatibility */
out_format |= LCD_OUTF_MIPI_RGB_MODE;
kmb_write_lcd(kmb, LCD_OUT_FORMAT_CFG, out_format);
dma_cfg = LCD_DMA_LAYER_ENABLE | LCD_DMA_LAYER_VSTRIDE_EN |
LCD_DMA_LAYER_CONT_UPDATE | LCD_DMA_LAYER_AXI_BURST_16;
/* Enable DMA */
kmb_write_lcd(kmb, LCD_LAYERn_DMA_CFG(plane_id), dma_cfg);
/* Save initial display config */
if (!init_disp_cfg->width ||
!init_disp_cfg->height ||
!init_disp_cfg->format) {
init_disp_cfg->width = width;
init_disp_cfg->height = height;
init_disp_cfg->format = fb->format->format;
}
drm_dbg(&kmb->drm, "dma_cfg=0x%x LCD_DMA_CFG=0x%x\n", dma_cfg,
kmb_read_lcd(kmb, LCD_LAYERn_DMA_CFG(plane_id)));
kmb_set_bitmask_lcd(kmb, LCD_INT_CLEAR, LCD_INT_EOF |
LCD_INT_DMA_ERR);
kmb_set_bitmask_lcd(kmb, LCD_INT_ENABLE, LCD_INT_EOF |
LCD_INT_DMA_ERR);
}
static const struct drm_plane_helper_funcs kmb_plane_helper_funcs = {
.atomic_check = kmb_plane_atomic_check,
.atomic_update = kmb_plane_atomic_update,
.atomic_disable = kmb_plane_atomic_disable
};
void kmb_plane_destroy(struct drm_plane *plane)
{
struct kmb_plane *kmb_plane = to_kmb_plane(plane);
drm_plane_cleanup(plane);
kfree(kmb_plane);
}
static const struct drm_plane_funcs kmb_plane_funcs = {
.update_plane = drm_atomic_helper_update_plane,
.disable_plane = drm_atomic_helper_disable_plane,
.destroy = kmb_plane_destroy,
.reset = drm_atomic_helper_plane_reset,
.atomic_duplicate_state = drm_atomic_helper_plane_duplicate_state,
.atomic_destroy_state = drm_atomic_helper_plane_destroy_state,
};
struct kmb_plane *kmb_plane_init(struct drm_device *drm)
{
struct kmb_drm_private *kmb = to_kmb(drm);
struct kmb_plane *plane = NULL;
struct kmb_plane *primary = NULL;
int i = 0;
int ret = 0;
enum drm_plane_type plane_type;
const u32 *plane_formats;
int num_plane_formats;
unsigned int blend_caps = BIT(DRM_MODE_BLEND_PIXEL_NONE) |
BIT(DRM_MODE_BLEND_PREMULTI) |
BIT(DRM_MODE_BLEND_COVERAGE);
for (i = 0; i < KMB_MAX_PLANES; i++) {
plane = drmm_kzalloc(drm, sizeof(*plane), GFP_KERNEL);
if (!plane) {
drm_err(drm, "Failed to allocate plane\n");
return ERR_PTR(-ENOMEM);
}
plane_type = (i == 0) ? DRM_PLANE_TYPE_PRIMARY :
DRM_PLANE_TYPE_OVERLAY;
if (i < 2) {
plane_formats = kmb_formats_v;
num_plane_formats = ARRAY_SIZE(kmb_formats_v);
} else {
plane_formats = kmb_formats_g;
num_plane_formats = ARRAY_SIZE(kmb_formats_g);
}
ret = drm_universal_plane_init(drm, &plane->base_plane,
POSSIBLE_CRTCS, &kmb_plane_funcs,
plane_formats, num_plane_formats,
NULL, plane_type, "plane %d", i);
if (ret < 0) {
drm_err(drm, "drm_universal_plane_init failed (ret=%d)",
ret);
goto cleanup;
}
drm_dbg(drm, "%s : %d i=%d type=%d",
__func__, __LINE__,
i, plane_type);
drm_plane_create_alpha_property(&plane->base_plane);
drm_plane_create_blend_mode_property(&plane->base_plane,
blend_caps);
drm_plane_create_zpos_immutable_property(&plane->base_plane, i);
drm_plane_helper_add(&plane->base_plane,
&kmb_plane_helper_funcs);
if (plane_type == DRM_PLANE_TYPE_PRIMARY) {
primary = plane;
kmb->plane = plane;
}
drm_dbg(drm, "%s : %d primary=%p\n", __func__, __LINE__,
&primary->base_plane);
plane->id = i;
}
/* Disable pipeline AXI read transactions for the DMA
* prior to setting graphics layers
*/
kmb_clr_bitmask_lcd(kmb, LCD_CONTROL, LCD_CTRL_PIPELINE_DMA);
return primary;
cleanup:
drmm_kfree(drm, plane);
return ERR_PTR(ret);
}
| linux-master | drivers/gpu/drm/kmb/kmb_plane.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* DRM driver for Sitronix ST7586 panels
*
* Copyright 2017 David Lechner <[email protected]>
*/
#include <linux/delay.h>
#include <linux/gpio/consumer.h>
#include <linux/module.h>
#include <linux/property.h>
#include <linux/spi/spi.h>
#include <video/mipi_display.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_damage_helper.h>
#include <drm/drm_drv.h>
#include <drm/drm_fb_dma_helper.h>
#include <drm/drm_fbdev_generic.h>
#include <drm/drm_format_helper.h>
#include <drm/drm_framebuffer.h>
#include <drm/drm_gem_atomic_helper.h>
#include <drm/drm_gem_dma_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_managed.h>
#include <drm/drm_mipi_dbi.h>
#include <drm/drm_rect.h>
/* controller-specific commands */
#define ST7586_DISP_MODE_GRAY 0x38
#define ST7586_DISP_MODE_MONO 0x39
#define ST7586_ENABLE_DDRAM 0x3a
#define ST7586_SET_DISP_DUTY 0xb0
#define ST7586_SET_PART_DISP 0xb4
#define ST7586_SET_NLINE_INV 0xb5
#define ST7586_SET_VOP 0xc0
#define ST7586_SET_BIAS_SYSTEM 0xc3
#define ST7586_SET_BOOST_LEVEL 0xc4
#define ST7586_SET_VOP_OFFSET 0xc7
#define ST7586_ENABLE_ANALOG 0xd0
#define ST7586_AUTO_READ_CTRL 0xd7
#define ST7586_OTP_RW_CTRL 0xe0
#define ST7586_OTP_CTRL_OUT 0xe1
#define ST7586_OTP_READ 0xe3
#define ST7586_DISP_CTRL_MX BIT(6)
#define ST7586_DISP_CTRL_MY BIT(7)
/*
* The ST7586 controller has an unusual pixel format where 2bpp grayscale is
* packed 3 pixels per byte with the first two pixels using 3 bits and the 3rd
* pixel using only 2 bits.
*
* | D7 | D6 | D5 || | || 2bpp |
* | (D4) | (D3) | (D2) || D1 | D0 || GRAY |
* +------+------+------++------+------++------+
* | 1 | 1 | 1 || 1 | 1 || 0 0 | black
* | 1 | 0 | 0 || 1 | 0 || 0 1 | dark gray
* | 0 | 1 | 0 || 0 | 1 || 1 0 | light gray
* | 0 | 0 | 0 || 0 | 0 || 1 1 | white
*/
static const u8 st7586_lookup[] = { 0x7, 0x4, 0x2, 0x0 };
static void st7586_xrgb8888_to_gray332(u8 *dst, void *vaddr,
struct drm_framebuffer *fb,
struct drm_rect *clip)
{
size_t len = (clip->x2 - clip->x1) * (clip->y2 - clip->y1);
unsigned int x, y;
u8 *src, *buf, val;
struct iosys_map dst_map, vmap;
buf = kmalloc(len, GFP_KERNEL);
if (!buf)
return;
iosys_map_set_vaddr(&dst_map, buf);
iosys_map_set_vaddr(&vmap, vaddr);
drm_fb_xrgb8888_to_gray8(&dst_map, NULL, &vmap, fb, clip);
src = buf;
for (y = clip->y1; y < clip->y2; y++) {
for (x = clip->x1; x < clip->x2; x += 3) {
val = st7586_lookup[*src++ >> 6] << 5;
val |= st7586_lookup[*src++ >> 6] << 2;
val |= st7586_lookup[*src++ >> 6] >> 1;
*dst++ = val;
}
}
kfree(buf);
}
static int st7586_buf_copy(void *dst, struct iosys_map *src, struct drm_framebuffer *fb,
struct drm_rect *clip)
{
int ret;
ret = drm_gem_fb_begin_cpu_access(fb, DMA_FROM_DEVICE);
if (ret)
return ret;
st7586_xrgb8888_to_gray332(dst, src->vaddr, fb, clip);
drm_gem_fb_end_cpu_access(fb, DMA_FROM_DEVICE);
return 0;
}
static void st7586_fb_dirty(struct iosys_map *src, struct drm_framebuffer *fb,
struct drm_rect *rect)
{
struct mipi_dbi_dev *dbidev = drm_to_mipi_dbi_dev(fb->dev);
struct mipi_dbi *dbi = &dbidev->dbi;
int start, end, ret = 0;
/* 3 pixels per byte, so grow clip to nearest multiple of 3 */
rect->x1 = rounddown(rect->x1, 3);
rect->x2 = roundup(rect->x2, 3);
DRM_DEBUG_KMS("Flushing [FB:%d] " DRM_RECT_FMT "\n", fb->base.id, DRM_RECT_ARG(rect));
ret = st7586_buf_copy(dbidev->tx_buf, src, fb, rect);
if (ret)
goto err_msg;
/* Pixels are packed 3 per byte */
start = rect->x1 / 3;
end = rect->x2 / 3;
mipi_dbi_command(dbi, MIPI_DCS_SET_COLUMN_ADDRESS,
(start >> 8) & 0xFF, start & 0xFF,
(end >> 8) & 0xFF, (end - 1) & 0xFF);
mipi_dbi_command(dbi, MIPI_DCS_SET_PAGE_ADDRESS,
(rect->y1 >> 8) & 0xFF, rect->y1 & 0xFF,
(rect->y2 >> 8) & 0xFF, (rect->y2 - 1) & 0xFF);
ret = mipi_dbi_command_buf(dbi, MIPI_DCS_WRITE_MEMORY_START,
(u8 *)dbidev->tx_buf,
(end - start) * (rect->y2 - rect->y1));
err_msg:
if (ret)
dev_err_once(fb->dev->dev, "Failed to update display %d\n", ret);
}
static void st7586_pipe_update(struct drm_simple_display_pipe *pipe,
struct drm_plane_state *old_state)
{
struct drm_plane_state *state = pipe->plane.state;
struct drm_shadow_plane_state *shadow_plane_state = to_drm_shadow_plane_state(state);
struct drm_framebuffer *fb = state->fb;
struct drm_rect rect;
int idx;
if (!pipe->crtc.state->active)
return;
if (!drm_dev_enter(fb->dev, &idx))
return;
if (drm_atomic_helper_damage_merged(old_state, state, &rect))
st7586_fb_dirty(&shadow_plane_state->data[0], fb, &rect);
drm_dev_exit(idx);
}
static void st7586_pipe_enable(struct drm_simple_display_pipe *pipe,
struct drm_crtc_state *crtc_state,
struct drm_plane_state *plane_state)
{
struct mipi_dbi_dev *dbidev = drm_to_mipi_dbi_dev(pipe->crtc.dev);
struct drm_shadow_plane_state *shadow_plane_state = to_drm_shadow_plane_state(plane_state);
struct drm_framebuffer *fb = plane_state->fb;
struct mipi_dbi *dbi = &dbidev->dbi;
struct drm_rect rect = {
.x1 = 0,
.x2 = fb->width,
.y1 = 0,
.y2 = fb->height,
};
int idx, ret;
u8 addr_mode;
if (!drm_dev_enter(pipe->crtc.dev, &idx))
return;
DRM_DEBUG_KMS("\n");
ret = mipi_dbi_poweron_reset(dbidev);
if (ret)
goto out_exit;
mipi_dbi_command(dbi, ST7586_AUTO_READ_CTRL, 0x9f);
mipi_dbi_command(dbi, ST7586_OTP_RW_CTRL, 0x00);
msleep(10);
mipi_dbi_command(dbi, ST7586_OTP_READ);
msleep(20);
mipi_dbi_command(dbi, ST7586_OTP_CTRL_OUT);
mipi_dbi_command(dbi, MIPI_DCS_EXIT_SLEEP_MODE);
mipi_dbi_command(dbi, MIPI_DCS_SET_DISPLAY_OFF);
msleep(50);
mipi_dbi_command(dbi, ST7586_SET_VOP_OFFSET, 0x00);
mipi_dbi_command(dbi, ST7586_SET_VOP, 0xe3, 0x00);
mipi_dbi_command(dbi, ST7586_SET_BIAS_SYSTEM, 0x02);
mipi_dbi_command(dbi, ST7586_SET_BOOST_LEVEL, 0x04);
mipi_dbi_command(dbi, ST7586_ENABLE_ANALOG, 0x1d);
mipi_dbi_command(dbi, ST7586_SET_NLINE_INV, 0x00);
mipi_dbi_command(dbi, ST7586_DISP_MODE_GRAY);
mipi_dbi_command(dbi, ST7586_ENABLE_DDRAM, 0x02);
switch (dbidev->rotation) {
default:
addr_mode = 0x00;
break;
case 90:
addr_mode = ST7586_DISP_CTRL_MY;
break;
case 180:
addr_mode = ST7586_DISP_CTRL_MX | ST7586_DISP_CTRL_MY;
break;
case 270:
addr_mode = ST7586_DISP_CTRL_MX;
break;
}
mipi_dbi_command(dbi, MIPI_DCS_SET_ADDRESS_MODE, addr_mode);
mipi_dbi_command(dbi, ST7586_SET_DISP_DUTY, 0x7f);
mipi_dbi_command(dbi, ST7586_SET_PART_DISP, 0xa0);
mipi_dbi_command(dbi, MIPI_DCS_SET_PARTIAL_ROWS, 0x00, 0x00, 0x00, 0x77);
mipi_dbi_command(dbi, MIPI_DCS_EXIT_INVERT_MODE);
msleep(100);
st7586_fb_dirty(&shadow_plane_state->data[0], fb, &rect);
mipi_dbi_command(dbi, MIPI_DCS_SET_DISPLAY_ON);
out_exit:
drm_dev_exit(idx);
}
static void st7586_pipe_disable(struct drm_simple_display_pipe *pipe)
{
struct mipi_dbi_dev *dbidev = drm_to_mipi_dbi_dev(pipe->crtc.dev);
/*
* This callback is not protected by drm_dev_enter/exit since we want to
* turn off the display on regular driver unload. It's highly unlikely
* that the underlying SPI controller is gone should this be called after
* unplug.
*/
DRM_DEBUG_KMS("\n");
mipi_dbi_command(&dbidev->dbi, MIPI_DCS_SET_DISPLAY_OFF);
}
static const u32 st7586_formats[] = {
DRM_FORMAT_XRGB8888,
};
static const struct drm_simple_display_pipe_funcs st7586_pipe_funcs = {
.mode_valid = mipi_dbi_pipe_mode_valid,
.enable = st7586_pipe_enable,
.disable = st7586_pipe_disable,
.update = st7586_pipe_update,
.begin_fb_access = mipi_dbi_pipe_begin_fb_access,
.end_fb_access = mipi_dbi_pipe_end_fb_access,
.reset_plane = mipi_dbi_pipe_reset_plane,
.duplicate_plane_state = mipi_dbi_pipe_duplicate_plane_state,
.destroy_plane_state = mipi_dbi_pipe_destroy_plane_state,
};
static const struct drm_display_mode st7586_mode = {
DRM_SIMPLE_MODE(178, 128, 37, 27),
};
DEFINE_DRM_GEM_DMA_FOPS(st7586_fops);
static const struct drm_driver st7586_driver = {
.driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC,
.fops = &st7586_fops,
DRM_GEM_DMA_DRIVER_OPS_VMAP,
.debugfs_init = mipi_dbi_debugfs_init,
.name = "st7586",
.desc = "Sitronix ST7586",
.date = "20170801",
.major = 1,
.minor = 0,
};
static const struct of_device_id st7586_of_match[] = {
{ .compatible = "lego,ev3-lcd" },
{},
};
MODULE_DEVICE_TABLE(of, st7586_of_match);
static const struct spi_device_id st7586_id[] = {
{ "ev3-lcd", 0 },
{ },
};
MODULE_DEVICE_TABLE(spi, st7586_id);
static int st7586_probe(struct spi_device *spi)
{
struct device *dev = &spi->dev;
struct mipi_dbi_dev *dbidev;
struct drm_device *drm;
struct mipi_dbi *dbi;
struct gpio_desc *a0;
u32 rotation = 0;
size_t bufsize;
int ret;
dbidev = devm_drm_dev_alloc(dev, &st7586_driver,
struct mipi_dbi_dev, drm);
if (IS_ERR(dbidev))
return PTR_ERR(dbidev);
dbi = &dbidev->dbi;
drm = &dbidev->drm;
bufsize = (st7586_mode.vdisplay + 2) / 3 * st7586_mode.hdisplay;
dbi->reset = devm_gpiod_get(dev, "reset", GPIOD_OUT_HIGH);
if (IS_ERR(dbi->reset))
return dev_err_probe(dev, PTR_ERR(dbi->reset), "Failed to get GPIO 'reset'\n");
a0 = devm_gpiod_get(dev, "a0", GPIOD_OUT_LOW);
if (IS_ERR(a0))
return dev_err_probe(dev, PTR_ERR(a0), "Failed to get GPIO 'a0'\n");
device_property_read_u32(dev, "rotation", &rotation);
ret = mipi_dbi_spi_init(spi, dbi, a0);
if (ret)
return ret;
/* Cannot read from this controller via SPI */
dbi->read_commands = NULL;
ret = mipi_dbi_dev_init_with_formats(dbidev, &st7586_pipe_funcs,
st7586_formats, ARRAY_SIZE(st7586_formats),
&st7586_mode, rotation, bufsize);
if (ret)
return ret;
/*
* we are using 8-bit data, so we are not actually swapping anything,
* but setting mipi->swap_bytes makes mipi_dbi_typec3_command() do the
* right thing and not use 16-bit transfers (which results in swapped
* bytes on little-endian systems and causes out of order data to be
* sent to the display).
*/
dbi->swap_bytes = true;
drm_mode_config_reset(drm);
ret = drm_dev_register(drm, 0);
if (ret)
return ret;
spi_set_drvdata(spi, drm);
drm_fbdev_generic_setup(drm, 0);
return 0;
}
static void st7586_remove(struct spi_device *spi)
{
struct drm_device *drm = spi_get_drvdata(spi);
drm_dev_unplug(drm);
drm_atomic_helper_shutdown(drm);
}
static void st7586_shutdown(struct spi_device *spi)
{
drm_atomic_helper_shutdown(spi_get_drvdata(spi));
}
static struct spi_driver st7586_spi_driver = {
.driver = {
.name = "st7586",
.owner = THIS_MODULE,
.of_match_table = st7586_of_match,
},
.id_table = st7586_id,
.probe = st7586_probe,
.remove = st7586_remove,
.shutdown = st7586_shutdown,
};
module_spi_driver(st7586_spi_driver);
MODULE_DESCRIPTION("Sitronix ST7586 DRM driver");
MODULE_AUTHOR("David Lechner <[email protected]>");
MODULE_LICENSE("GPL");
| linux-master | drivers/gpu/drm/tiny/st7586.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* DRM driver for Ilitek ILI9341 panels
*
* Copyright 2018 David Lechner <[email protected]>
*
* Based on mi0283qt.c:
* Copyright 2016 Noralf Trønnes
*/
#include <linux/backlight.h>
#include <linux/delay.h>
#include <linux/gpio/consumer.h>
#include <linux/module.h>
#include <linux/property.h>
#include <linux/spi/spi.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_drv.h>
#include <drm/drm_fbdev_generic.h>
#include <drm/drm_gem_atomic_helper.h>
#include <drm/drm_gem_dma_helper.h>
#include <drm/drm_managed.h>
#include <drm/drm_mipi_dbi.h>
#include <drm/drm_modeset_helper.h>
#include <video/mipi_display.h>
#define ILI9341_FRMCTR1 0xb1
#define ILI9341_DISCTRL 0xb6
#define ILI9341_ETMOD 0xb7
#define ILI9341_PWCTRL1 0xc0
#define ILI9341_PWCTRL2 0xc1
#define ILI9341_VMCTRL1 0xc5
#define ILI9341_VMCTRL2 0xc7
#define ILI9341_PWCTRLA 0xcb
#define ILI9341_PWCTRLB 0xcf
#define ILI9341_PGAMCTRL 0xe0
#define ILI9341_NGAMCTRL 0xe1
#define ILI9341_DTCTRLA 0xe8
#define ILI9341_DTCTRLB 0xea
#define ILI9341_PWRSEQ 0xed
#define ILI9341_EN3GAM 0xf2
#define ILI9341_PUMPCTRL 0xf7
#define ILI9341_MADCTL_BGR BIT(3)
#define ILI9341_MADCTL_MV BIT(5)
#define ILI9341_MADCTL_MX BIT(6)
#define ILI9341_MADCTL_MY BIT(7)
static void yx240qv29_enable(struct drm_simple_display_pipe *pipe,
struct drm_crtc_state *crtc_state,
struct drm_plane_state *plane_state)
{
struct mipi_dbi_dev *dbidev = drm_to_mipi_dbi_dev(pipe->crtc.dev);
struct mipi_dbi *dbi = &dbidev->dbi;
u8 addr_mode;
int ret, idx;
if (!drm_dev_enter(pipe->crtc.dev, &idx))
return;
DRM_DEBUG_KMS("\n");
ret = mipi_dbi_poweron_conditional_reset(dbidev);
if (ret < 0)
goto out_exit;
if (ret == 1)
goto out_enable;
mipi_dbi_command(dbi, MIPI_DCS_SET_DISPLAY_OFF);
mipi_dbi_command(dbi, ILI9341_PWCTRLB, 0x00, 0xc1, 0x30);
mipi_dbi_command(dbi, ILI9341_PWRSEQ, 0x64, 0x03, 0x12, 0x81);
mipi_dbi_command(dbi, ILI9341_DTCTRLA, 0x85, 0x00, 0x78);
mipi_dbi_command(dbi, ILI9341_PWCTRLA, 0x39, 0x2c, 0x00, 0x34, 0x02);
mipi_dbi_command(dbi, ILI9341_PUMPCTRL, 0x20);
mipi_dbi_command(dbi, ILI9341_DTCTRLB, 0x00, 0x00);
/* Power Control */
mipi_dbi_command(dbi, ILI9341_PWCTRL1, 0x23);
mipi_dbi_command(dbi, ILI9341_PWCTRL2, 0x10);
/* VCOM */
mipi_dbi_command(dbi, ILI9341_VMCTRL1, 0x3e, 0x28);
mipi_dbi_command(dbi, ILI9341_VMCTRL2, 0x86);
/* Memory Access Control */
mipi_dbi_command(dbi, MIPI_DCS_SET_PIXEL_FORMAT, MIPI_DCS_PIXEL_FMT_16BIT);
/* Frame Rate */
mipi_dbi_command(dbi, ILI9341_FRMCTR1, 0x00, 0x1b);
/* Gamma */
mipi_dbi_command(dbi, ILI9341_EN3GAM, 0x00);
mipi_dbi_command(dbi, MIPI_DCS_SET_GAMMA_CURVE, 0x01);
mipi_dbi_command(dbi, ILI9341_PGAMCTRL,
0x0f, 0x31, 0x2b, 0x0c, 0x0e, 0x08, 0x4e, 0xf1,
0x37, 0x07, 0x10, 0x03, 0x0e, 0x09, 0x00);
mipi_dbi_command(dbi, ILI9341_NGAMCTRL,
0x00, 0x0e, 0x14, 0x03, 0x11, 0x07, 0x31, 0xc1,
0x48, 0x08, 0x0f, 0x0c, 0x31, 0x36, 0x0f);
/* DDRAM */
mipi_dbi_command(dbi, ILI9341_ETMOD, 0x07);
/* Display */
mipi_dbi_command(dbi, ILI9341_DISCTRL, 0x08, 0x82, 0x27, 0x00);
mipi_dbi_command(dbi, MIPI_DCS_EXIT_SLEEP_MODE);
msleep(100);
mipi_dbi_command(dbi, MIPI_DCS_SET_DISPLAY_ON);
msleep(100);
out_enable:
switch (dbidev->rotation) {
default:
addr_mode = ILI9341_MADCTL_MX;
break;
case 90:
addr_mode = ILI9341_MADCTL_MV;
break;
case 180:
addr_mode = ILI9341_MADCTL_MY;
break;
case 270:
addr_mode = ILI9341_MADCTL_MV | ILI9341_MADCTL_MY |
ILI9341_MADCTL_MX;
break;
}
addr_mode |= ILI9341_MADCTL_BGR;
mipi_dbi_command(dbi, MIPI_DCS_SET_ADDRESS_MODE, addr_mode);
mipi_dbi_enable_flush(dbidev, crtc_state, plane_state);
out_exit:
drm_dev_exit(idx);
}
static const struct drm_simple_display_pipe_funcs ili9341_pipe_funcs = {
DRM_MIPI_DBI_SIMPLE_DISPLAY_PIPE_FUNCS(yx240qv29_enable),
};
static const struct drm_display_mode yx240qv29_mode = {
DRM_SIMPLE_MODE(240, 320, 37, 49),
};
DEFINE_DRM_GEM_DMA_FOPS(ili9341_fops);
static const struct drm_driver ili9341_driver = {
.driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC,
.fops = &ili9341_fops,
DRM_GEM_DMA_DRIVER_OPS_VMAP,
.debugfs_init = mipi_dbi_debugfs_init,
.name = "ili9341",
.desc = "Ilitek ILI9341",
.date = "20180514",
.major = 1,
.minor = 0,
};
static const struct of_device_id ili9341_of_match[] = {
{ .compatible = "adafruit,yx240qv29" },
{ }
};
MODULE_DEVICE_TABLE(of, ili9341_of_match);
static const struct spi_device_id ili9341_id[] = {
{ "yx240qv29", 0 },
{ }
};
MODULE_DEVICE_TABLE(spi, ili9341_id);
static int ili9341_probe(struct spi_device *spi)
{
struct device *dev = &spi->dev;
struct mipi_dbi_dev *dbidev;
struct drm_device *drm;
struct mipi_dbi *dbi;
struct gpio_desc *dc;
u32 rotation = 0;
int ret;
dbidev = devm_drm_dev_alloc(dev, &ili9341_driver,
struct mipi_dbi_dev, drm);
if (IS_ERR(dbidev))
return PTR_ERR(dbidev);
dbi = &dbidev->dbi;
drm = &dbidev->drm;
dbi->reset = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_HIGH);
if (IS_ERR(dbi->reset))
return dev_err_probe(dev, PTR_ERR(dbi->reset), "Failed to get GPIO 'reset'\n");
dc = devm_gpiod_get_optional(dev, "dc", GPIOD_OUT_LOW);
if (IS_ERR(dc))
return dev_err_probe(dev, PTR_ERR(dc), "Failed to get GPIO 'dc'\n");
dbidev->backlight = devm_of_find_backlight(dev);
if (IS_ERR(dbidev->backlight))
return PTR_ERR(dbidev->backlight);
device_property_read_u32(dev, "rotation", &rotation);
ret = mipi_dbi_spi_init(spi, dbi, dc);
if (ret)
return ret;
ret = mipi_dbi_dev_init(dbidev, &ili9341_pipe_funcs, &yx240qv29_mode, rotation);
if (ret)
return ret;
drm_mode_config_reset(drm);
ret = drm_dev_register(drm, 0);
if (ret)
return ret;
spi_set_drvdata(spi, drm);
drm_fbdev_generic_setup(drm, 0);
return 0;
}
static void ili9341_remove(struct spi_device *spi)
{
struct drm_device *drm = spi_get_drvdata(spi);
drm_dev_unplug(drm);
drm_atomic_helper_shutdown(drm);
}
static void ili9341_shutdown(struct spi_device *spi)
{
drm_atomic_helper_shutdown(spi_get_drvdata(spi));
}
static struct spi_driver ili9341_spi_driver = {
.driver = {
.name = "ili9341",
.of_match_table = ili9341_of_match,
},
.id_table = ili9341_id,
.probe = ili9341_probe,
.remove = ili9341_remove,
.shutdown = ili9341_shutdown,
};
module_spi_driver(ili9341_spi_driver);
MODULE_DESCRIPTION("Ilitek ILI9341 DRM driver");
MODULE_AUTHOR("David Lechner <[email protected]>");
MODULE_LICENSE("GPL");
| linux-master | drivers/gpu/drm/tiny/ili9341.c |
// SPDX-License-Identifier: GPL-2.0-only
#include <linux/clk.h>
#include <linux/of_clk.h>
#include <linux/minmax.h>
#include <linux/of_address.h>
#include <linux/platform_data/simplefb.h>
#include <linux/platform_device.h>
#include <linux/regulator/consumer.h>
#include <drm/drm_aperture.h>
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_state_helper.h>
#include <drm/drm_connector.h>
#include <drm/drm_crtc_helper.h>
#include <drm/drm_damage_helper.h>
#include <drm/drm_device.h>
#include <drm/drm_drv.h>
#include <drm/drm_fbdev_generic.h>
#include <drm/drm_format_helper.h>
#include <drm/drm_gem_atomic_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_gem_shmem_helper.h>
#include <drm/drm_managed.h>
#include <drm/drm_modeset_helper_vtables.h>
#include <drm/drm_plane_helper.h>
#include <drm/drm_probe_helper.h>
#define DRIVER_NAME "simpledrm"
#define DRIVER_DESC "DRM driver for simple-framebuffer platform devices"
#define DRIVER_DATE "20200625"
#define DRIVER_MAJOR 1
#define DRIVER_MINOR 0
/*
* Helpers for simplefb
*/
static int
simplefb_get_validated_int(struct drm_device *dev, const char *name,
uint32_t value)
{
if (value > INT_MAX) {
drm_err(dev, "simplefb: invalid framebuffer %s of %u\n",
name, value);
return -EINVAL;
}
return (int)value;
}
static int
simplefb_get_validated_int0(struct drm_device *dev, const char *name,
uint32_t value)
{
if (!value) {
drm_err(dev, "simplefb: invalid framebuffer %s of %u\n",
name, value);
return -EINVAL;
}
return simplefb_get_validated_int(dev, name, value);
}
static const struct drm_format_info *
simplefb_get_validated_format(struct drm_device *dev, const char *format_name)
{
static const struct simplefb_format formats[] = SIMPLEFB_FORMATS;
const struct simplefb_format *fmt = formats;
const struct simplefb_format *end = fmt + ARRAY_SIZE(formats);
const struct drm_format_info *info;
if (!format_name) {
drm_err(dev, "simplefb: missing framebuffer format\n");
return ERR_PTR(-EINVAL);
}
while (fmt < end) {
if (!strcmp(format_name, fmt->name)) {
info = drm_format_info(fmt->fourcc);
if (!info)
return ERR_PTR(-EINVAL);
return info;
}
++fmt;
}
drm_err(dev, "simplefb: unknown framebuffer format %s\n",
format_name);
return ERR_PTR(-EINVAL);
}
static int
simplefb_get_width_pd(struct drm_device *dev,
const struct simplefb_platform_data *pd)
{
return simplefb_get_validated_int0(dev, "width", pd->width);
}
static int
simplefb_get_height_pd(struct drm_device *dev,
const struct simplefb_platform_data *pd)
{
return simplefb_get_validated_int0(dev, "height", pd->height);
}
static int
simplefb_get_stride_pd(struct drm_device *dev,
const struct simplefb_platform_data *pd)
{
return simplefb_get_validated_int(dev, "stride", pd->stride);
}
static const struct drm_format_info *
simplefb_get_format_pd(struct drm_device *dev,
const struct simplefb_platform_data *pd)
{
return simplefb_get_validated_format(dev, pd->format);
}
static int
simplefb_read_u32_of(struct drm_device *dev, struct device_node *of_node,
const char *name, u32 *value)
{
int ret = of_property_read_u32(of_node, name, value);
if (ret)
drm_err(dev, "simplefb: cannot parse framebuffer %s: error %d\n",
name, ret);
return ret;
}
static int
simplefb_read_string_of(struct drm_device *dev, struct device_node *of_node,
const char *name, const char **value)
{
int ret = of_property_read_string(of_node, name, value);
if (ret)
drm_err(dev, "simplefb: cannot parse framebuffer %s: error %d\n",
name, ret);
return ret;
}
static int
simplefb_get_width_of(struct drm_device *dev, struct device_node *of_node)
{
u32 width;
int ret = simplefb_read_u32_of(dev, of_node, "width", &width);
if (ret)
return ret;
return simplefb_get_validated_int0(dev, "width", width);
}
static int
simplefb_get_height_of(struct drm_device *dev, struct device_node *of_node)
{
u32 height;
int ret = simplefb_read_u32_of(dev, of_node, "height", &height);
if (ret)
return ret;
return simplefb_get_validated_int0(dev, "height", height);
}
static int
simplefb_get_stride_of(struct drm_device *dev, struct device_node *of_node)
{
u32 stride;
int ret = simplefb_read_u32_of(dev, of_node, "stride", &stride);
if (ret)
return ret;
return simplefb_get_validated_int(dev, "stride", stride);
}
static const struct drm_format_info *
simplefb_get_format_of(struct drm_device *dev, struct device_node *of_node)
{
const char *format;
int ret = simplefb_read_string_of(dev, of_node, "format", &format);
if (ret)
return ERR_PTR(ret);
return simplefb_get_validated_format(dev, format);
}
static struct resource *
simplefb_get_memory_of(struct drm_device *dev, struct device_node *of_node)
{
struct device_node *np;
struct resource *res;
int err;
np = of_parse_phandle(of_node, "memory-region", 0);
if (!np)
return NULL;
res = devm_kzalloc(dev->dev, sizeof(*res), GFP_KERNEL);
if (!res)
return ERR_PTR(-ENOMEM);
err = of_address_to_resource(np, 0, res);
if (err)
return ERR_PTR(err);
if (of_property_present(of_node, "reg"))
drm_warn(dev, "preferring \"memory-region\" over \"reg\" property\n");
return res;
}
/*
* Simple Framebuffer device
*/
struct simpledrm_device {
struct drm_device dev;
/* clocks */
#if defined CONFIG_OF && defined CONFIG_COMMON_CLK
unsigned int clk_count;
struct clk **clks;
#endif
/* regulators */
#if defined CONFIG_OF && defined CONFIG_REGULATOR
unsigned int regulator_count;
struct regulator **regulators;
#endif
/* simplefb settings */
struct drm_display_mode mode;
const struct drm_format_info *format;
unsigned int pitch;
/* memory management */
struct iosys_map screen_base;
/* modesetting */
uint32_t formats[8];
size_t nformats;
struct drm_plane primary_plane;
struct drm_crtc crtc;
struct drm_encoder encoder;
struct drm_connector connector;
};
static struct simpledrm_device *simpledrm_device_of_dev(struct drm_device *dev)
{
return container_of(dev, struct simpledrm_device, dev);
}
/*
* Hardware
*/
#if defined CONFIG_OF && defined CONFIG_COMMON_CLK
/*
* Clock handling code.
*
* Here we handle the clocks property of our "simple-framebuffer" dt node.
* This is necessary so that we can make sure that any clocks needed by
* the display engine that the bootloader set up for us (and for which it
* provided a simplefb dt node), stay up, for the life of the simplefb
* driver.
*
* When the driver unloads, we cleanly disable, and then release the clocks.
*
* We only complain about errors here, no action is taken as the most likely
* error can only happen due to a mismatch between the bootloader which set
* up simplefb, and the clock definitions in the device tree. Chances are
* that there are no adverse effects, and if there are, a clean teardown of
* the fb probe will not help us much either. So just complain and carry on,
* and hope that the user actually gets a working fb at the end of things.
*/
static void simpledrm_device_release_clocks(void *res)
{
struct simpledrm_device *sdev = simpledrm_device_of_dev(res);
unsigned int i;
for (i = 0; i < sdev->clk_count; ++i) {
if (sdev->clks[i]) {
clk_disable_unprepare(sdev->clks[i]);
clk_put(sdev->clks[i]);
}
}
}
static int simpledrm_device_init_clocks(struct simpledrm_device *sdev)
{
struct drm_device *dev = &sdev->dev;
struct platform_device *pdev = to_platform_device(dev->dev);
struct device_node *of_node = pdev->dev.of_node;
struct clk *clock;
unsigned int i;
int ret;
if (dev_get_platdata(&pdev->dev) || !of_node)
return 0;
sdev->clk_count = of_clk_get_parent_count(of_node);
if (!sdev->clk_count)
return 0;
sdev->clks = drmm_kzalloc(dev, sdev->clk_count * sizeof(sdev->clks[0]),
GFP_KERNEL);
if (!sdev->clks)
return -ENOMEM;
for (i = 0; i < sdev->clk_count; ++i) {
clock = of_clk_get(of_node, i);
if (IS_ERR(clock)) {
ret = PTR_ERR(clock);
if (ret == -EPROBE_DEFER)
goto err;
drm_err(dev, "clock %u not found: %d\n", i, ret);
continue;
}
ret = clk_prepare_enable(clock);
if (ret) {
drm_err(dev, "failed to enable clock %u: %d\n",
i, ret);
clk_put(clock);
continue;
}
sdev->clks[i] = clock;
}
return devm_add_action_or_reset(&pdev->dev,
simpledrm_device_release_clocks,
sdev);
err:
while (i) {
--i;
if (sdev->clks[i]) {
clk_disable_unprepare(sdev->clks[i]);
clk_put(sdev->clks[i]);
}
}
return ret;
}
#else
static int simpledrm_device_init_clocks(struct simpledrm_device *sdev)
{
return 0;
}
#endif
#if defined CONFIG_OF && defined CONFIG_REGULATOR
#define SUPPLY_SUFFIX "-supply"
/*
* Regulator handling code.
*
* Here we handle the num-supplies and vin*-supply properties of our
* "simple-framebuffer" dt node. This is necessary so that we can make sure
* that any regulators needed by the display hardware that the bootloader
* set up for us (and for which it provided a simplefb dt node), stay up,
* for the life of the simplefb driver.
*
* When the driver unloads, we cleanly disable, and then release the
* regulators.
*
* We only complain about errors here, no action is taken as the most likely
* error can only happen due to a mismatch between the bootloader which set
* up simplefb, and the regulator definitions in the device tree. Chances are
* that there are no adverse effects, and if there are, a clean teardown of
* the fb probe will not help us much either. So just complain and carry on,
* and hope that the user actually gets a working fb at the end of things.
*/
static void simpledrm_device_release_regulators(void *res)
{
struct simpledrm_device *sdev = simpledrm_device_of_dev(res);
unsigned int i;
for (i = 0; i < sdev->regulator_count; ++i) {
if (sdev->regulators[i]) {
regulator_disable(sdev->regulators[i]);
regulator_put(sdev->regulators[i]);
}
}
}
static int simpledrm_device_init_regulators(struct simpledrm_device *sdev)
{
struct drm_device *dev = &sdev->dev;
struct platform_device *pdev = to_platform_device(dev->dev);
struct device_node *of_node = pdev->dev.of_node;
struct property *prop;
struct regulator *regulator;
const char *p;
unsigned int count = 0, i = 0;
int ret;
if (dev_get_platdata(&pdev->dev) || !of_node)
return 0;
/* Count the number of regulator supplies */
for_each_property_of_node(of_node, prop) {
p = strstr(prop->name, SUPPLY_SUFFIX);
if (p && p != prop->name)
++count;
}
if (!count)
return 0;
sdev->regulators = drmm_kzalloc(dev,
count * sizeof(sdev->regulators[0]),
GFP_KERNEL);
if (!sdev->regulators)
return -ENOMEM;
for_each_property_of_node(of_node, prop) {
char name[32]; /* 32 is max size of property name */
size_t len;
p = strstr(prop->name, SUPPLY_SUFFIX);
if (!p || p == prop->name)
continue;
len = strlen(prop->name) - strlen(SUPPLY_SUFFIX) + 1;
strscpy(name, prop->name, min(sizeof(name), len));
regulator = regulator_get_optional(&pdev->dev, name);
if (IS_ERR(regulator)) {
ret = PTR_ERR(regulator);
if (ret == -EPROBE_DEFER)
goto err;
drm_err(dev, "regulator %s not found: %d\n",
name, ret);
continue;
}
ret = regulator_enable(regulator);
if (ret) {
drm_err(dev, "failed to enable regulator %u: %d\n",
i, ret);
regulator_put(regulator);
continue;
}
sdev->regulators[i++] = regulator;
}
sdev->regulator_count = i;
return devm_add_action_or_reset(&pdev->dev,
simpledrm_device_release_regulators,
sdev);
err:
while (i) {
--i;
if (sdev->regulators[i]) {
regulator_disable(sdev->regulators[i]);
regulator_put(sdev->regulators[i]);
}
}
return ret;
}
#else
static int simpledrm_device_init_regulators(struct simpledrm_device *sdev)
{
return 0;
}
#endif
/*
* Modesetting
*/
static const uint64_t simpledrm_primary_plane_format_modifiers[] = {
DRM_FORMAT_MOD_LINEAR,
DRM_FORMAT_MOD_INVALID
};
static void simpledrm_primary_plane_helper_atomic_update(struct drm_plane *plane,
struct drm_atomic_state *state)
{
struct drm_plane_state *plane_state = drm_atomic_get_new_plane_state(state, plane);
struct drm_plane_state *old_plane_state = drm_atomic_get_old_plane_state(state, plane);
struct drm_shadow_plane_state *shadow_plane_state = to_drm_shadow_plane_state(plane_state);
struct drm_framebuffer *fb = plane_state->fb;
struct drm_device *dev = plane->dev;
struct simpledrm_device *sdev = simpledrm_device_of_dev(dev);
struct drm_atomic_helper_damage_iter iter;
struct drm_rect damage;
int ret, idx;
ret = drm_gem_fb_begin_cpu_access(fb, DMA_FROM_DEVICE);
if (ret)
return;
if (!drm_dev_enter(dev, &idx))
goto out_drm_gem_fb_end_cpu_access;
drm_atomic_helper_damage_iter_init(&iter, old_plane_state, plane_state);
drm_atomic_for_each_plane_damage(&iter, &damage) {
struct drm_rect dst_clip = plane_state->dst;
struct iosys_map dst = sdev->screen_base;
if (!drm_rect_intersect(&dst_clip, &damage))
continue;
iosys_map_incr(&dst, drm_fb_clip_offset(sdev->pitch, sdev->format, &dst_clip));
drm_fb_blit(&dst, &sdev->pitch, sdev->format->format, shadow_plane_state->data,
fb, &damage);
}
drm_dev_exit(idx);
out_drm_gem_fb_end_cpu_access:
drm_gem_fb_end_cpu_access(fb, DMA_FROM_DEVICE);
}
static void simpledrm_primary_plane_helper_atomic_disable(struct drm_plane *plane,
struct drm_atomic_state *state)
{
struct drm_device *dev = plane->dev;
struct simpledrm_device *sdev = simpledrm_device_of_dev(dev);
int idx;
if (!drm_dev_enter(dev, &idx))
return;
/* Clear screen to black if disabled */
iosys_map_memset(&sdev->screen_base, 0, 0, sdev->pitch * sdev->mode.vdisplay);
drm_dev_exit(idx);
}
static const struct drm_plane_helper_funcs simpledrm_primary_plane_helper_funcs = {
DRM_GEM_SHADOW_PLANE_HELPER_FUNCS,
.atomic_check = drm_plane_helper_atomic_check,
.atomic_update = simpledrm_primary_plane_helper_atomic_update,
.atomic_disable = simpledrm_primary_plane_helper_atomic_disable,
};
static const struct drm_plane_funcs simpledrm_primary_plane_funcs = {
.update_plane = drm_atomic_helper_update_plane,
.disable_plane = drm_atomic_helper_disable_plane,
.destroy = drm_plane_cleanup,
DRM_GEM_SHADOW_PLANE_FUNCS,
};
static enum drm_mode_status simpledrm_crtc_helper_mode_valid(struct drm_crtc *crtc,
const struct drm_display_mode *mode)
{
struct simpledrm_device *sdev = simpledrm_device_of_dev(crtc->dev);
return drm_crtc_helper_mode_valid_fixed(crtc, mode, &sdev->mode);
}
/*
* The CRTC is always enabled. Screen updates are performed by
* the primary plane's atomic_update function. Disabling clears
* the screen in the primary plane's atomic_disable function.
*/
static const struct drm_crtc_helper_funcs simpledrm_crtc_helper_funcs = {
.mode_valid = simpledrm_crtc_helper_mode_valid,
.atomic_check = drm_crtc_helper_atomic_check,
};
static const struct drm_crtc_funcs simpledrm_crtc_funcs = {
.reset = drm_atomic_helper_crtc_reset,
.destroy = drm_crtc_cleanup,
.set_config = drm_atomic_helper_set_config,
.page_flip = drm_atomic_helper_page_flip,
.atomic_duplicate_state = drm_atomic_helper_crtc_duplicate_state,
.atomic_destroy_state = drm_atomic_helper_crtc_destroy_state,
};
static const struct drm_encoder_funcs simpledrm_encoder_funcs = {
.destroy = drm_encoder_cleanup,
};
static int simpledrm_connector_helper_get_modes(struct drm_connector *connector)
{
struct simpledrm_device *sdev = simpledrm_device_of_dev(connector->dev);
return drm_connector_helper_get_modes_fixed(connector, &sdev->mode);
}
static const struct drm_connector_helper_funcs simpledrm_connector_helper_funcs = {
.get_modes = simpledrm_connector_helper_get_modes,
};
static const struct drm_connector_funcs simpledrm_connector_funcs = {
.reset = drm_atomic_helper_connector_reset,
.fill_modes = drm_helper_probe_single_connector_modes,
.destroy = drm_connector_cleanup,
.atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
};
static const struct drm_mode_config_funcs simpledrm_mode_config_funcs = {
.fb_create = drm_gem_fb_create_with_dirty,
.atomic_check = drm_atomic_helper_check,
.atomic_commit = drm_atomic_helper_commit,
};
/*
* Init / Cleanup
*/
static struct drm_display_mode simpledrm_mode(unsigned int width,
unsigned int height,
unsigned int width_mm,
unsigned int height_mm)
{
const struct drm_display_mode mode = {
DRM_MODE_INIT(60, width, height, width_mm, height_mm)
};
return mode;
}
static struct simpledrm_device *simpledrm_device_create(struct drm_driver *drv,
struct platform_device *pdev)
{
const struct simplefb_platform_data *pd = dev_get_platdata(&pdev->dev);
struct device_node *of_node = pdev->dev.of_node;
struct simpledrm_device *sdev;
struct drm_device *dev;
int width, height, stride;
int width_mm = 0, height_mm = 0;
struct device_node *panel_node;
const struct drm_format_info *format;
struct resource *res, *mem = NULL;
struct drm_plane *primary_plane;
struct drm_crtc *crtc;
struct drm_encoder *encoder;
struct drm_connector *connector;
unsigned long max_width, max_height;
size_t nformats;
int ret;
sdev = devm_drm_dev_alloc(&pdev->dev, drv, struct simpledrm_device, dev);
if (IS_ERR(sdev))
return ERR_CAST(sdev);
dev = &sdev->dev;
platform_set_drvdata(pdev, sdev);
/*
* Hardware settings
*/
ret = simpledrm_device_init_clocks(sdev);
if (ret)
return ERR_PTR(ret);
ret = simpledrm_device_init_regulators(sdev);
if (ret)
return ERR_PTR(ret);
if (pd) {
width = simplefb_get_width_pd(dev, pd);
if (width < 0)
return ERR_PTR(width);
height = simplefb_get_height_pd(dev, pd);
if (height < 0)
return ERR_PTR(height);
stride = simplefb_get_stride_pd(dev, pd);
if (stride < 0)
return ERR_PTR(stride);
format = simplefb_get_format_pd(dev, pd);
if (IS_ERR(format))
return ERR_CAST(format);
} else if (of_node) {
width = simplefb_get_width_of(dev, of_node);
if (width < 0)
return ERR_PTR(width);
height = simplefb_get_height_of(dev, of_node);
if (height < 0)
return ERR_PTR(height);
stride = simplefb_get_stride_of(dev, of_node);
if (stride < 0)
return ERR_PTR(stride);
format = simplefb_get_format_of(dev, of_node);
if (IS_ERR(format))
return ERR_CAST(format);
mem = simplefb_get_memory_of(dev, of_node);
if (IS_ERR(mem))
return ERR_CAST(mem);
panel_node = of_parse_phandle(of_node, "panel", 0);
if (panel_node) {
simplefb_read_u32_of(dev, panel_node, "width-mm", &width_mm);
simplefb_read_u32_of(dev, panel_node, "height-mm", &height_mm);
of_node_put(panel_node);
}
} else {
drm_err(dev, "no simplefb configuration found\n");
return ERR_PTR(-ENODEV);
}
if (!stride) {
stride = drm_format_info_min_pitch(format, 0, width);
if (drm_WARN_ON(dev, !stride))
return ERR_PTR(-EINVAL);
}
/*
* Assume a monitor resolution of 96 dpi if physical dimensions
* are not specified to get a somewhat reasonable screen size.
*/
if (!width_mm)
width_mm = DRM_MODE_RES_MM(width, 96ul);
if (!height_mm)
height_mm = DRM_MODE_RES_MM(height, 96ul);
sdev->mode = simpledrm_mode(width, height, width_mm, height_mm);
sdev->format = format;
sdev->pitch = stride;
drm_dbg(dev, "display mode={" DRM_MODE_FMT "}\n", DRM_MODE_ARG(&sdev->mode));
drm_dbg(dev, "framebuffer format=%p4cc, size=%dx%d, stride=%d byte\n",
&format->format, width, height, stride);
/*
* Memory management
*/
if (mem) {
void *screen_base;
ret = devm_aperture_acquire_from_firmware(dev, mem->start, resource_size(mem));
if (ret) {
drm_err(dev, "could not acquire memory range %pr: %d\n", mem, ret);
return ERR_PTR(ret);
}
drm_dbg(dev, "using system memory framebuffer at %pr\n", mem);
screen_base = devm_memremap(dev->dev, mem->start, resource_size(mem), MEMREMAP_WC);
if (IS_ERR(screen_base))
return screen_base;
iosys_map_set_vaddr(&sdev->screen_base, screen_base);
} else {
void __iomem *screen_base;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!res)
return ERR_PTR(-EINVAL);
ret = devm_aperture_acquire_from_firmware(dev, res->start, resource_size(res));
if (ret) {
drm_err(dev, "could not acquire memory range %pr: %d\n", &res, ret);
return ERR_PTR(ret);
}
drm_dbg(dev, "using I/O memory framebuffer at %pr\n", res);
mem = devm_request_mem_region(&pdev->dev, res->start, resource_size(res),
drv->name);
if (!mem) {
/*
* We cannot make this fatal. Sometimes this comes from magic
* spaces our resource handlers simply don't know about. Use
* the I/O-memory resource as-is and try to map that instead.
*/
drm_warn(dev, "could not acquire memory region %pr\n", res);
mem = res;
}
screen_base = devm_ioremap_wc(&pdev->dev, mem->start, resource_size(mem));
if (!screen_base)
return ERR_PTR(-ENOMEM);
iosys_map_set_vaddr_iomem(&sdev->screen_base, screen_base);
}
/*
* Modesetting
*/
ret = drmm_mode_config_init(dev);
if (ret)
return ERR_PTR(ret);
max_width = max_t(unsigned long, width, DRM_SHADOW_PLANE_MAX_WIDTH);
max_height = max_t(unsigned long, height, DRM_SHADOW_PLANE_MAX_HEIGHT);
dev->mode_config.min_width = width;
dev->mode_config.max_width = max_width;
dev->mode_config.min_height = height;
dev->mode_config.max_height = max_height;
dev->mode_config.preferred_depth = format->depth;
dev->mode_config.funcs = &simpledrm_mode_config_funcs;
/* Primary plane */
nformats = drm_fb_build_fourcc_list(dev, &format->format, 1,
sdev->formats, ARRAY_SIZE(sdev->formats));
primary_plane = &sdev->primary_plane;
ret = drm_universal_plane_init(dev, primary_plane, 0, &simpledrm_primary_plane_funcs,
sdev->formats, nformats,
simpledrm_primary_plane_format_modifiers,
DRM_PLANE_TYPE_PRIMARY, NULL);
if (ret)
return ERR_PTR(ret);
drm_plane_helper_add(primary_plane, &simpledrm_primary_plane_helper_funcs);
drm_plane_enable_fb_damage_clips(primary_plane);
/* CRTC */
crtc = &sdev->crtc;
ret = drm_crtc_init_with_planes(dev, crtc, primary_plane, NULL,
&simpledrm_crtc_funcs, NULL);
if (ret)
return ERR_PTR(ret);
drm_crtc_helper_add(crtc, &simpledrm_crtc_helper_funcs);
/* Encoder */
encoder = &sdev->encoder;
ret = drm_encoder_init(dev, encoder, &simpledrm_encoder_funcs,
DRM_MODE_ENCODER_NONE, NULL);
if (ret)
return ERR_PTR(ret);
encoder->possible_crtcs = drm_crtc_mask(crtc);
/* Connector */
connector = &sdev->connector;
ret = drm_connector_init(dev, connector, &simpledrm_connector_funcs,
DRM_MODE_CONNECTOR_Unknown);
if (ret)
return ERR_PTR(ret);
drm_connector_helper_add(connector, &simpledrm_connector_helper_funcs);
drm_connector_set_panel_orientation_with_quirk(connector,
DRM_MODE_PANEL_ORIENTATION_UNKNOWN,
width, height);
ret = drm_connector_attach_encoder(connector, encoder);
if (ret)
return ERR_PTR(ret);
drm_mode_config_reset(dev);
return sdev;
}
/*
* DRM driver
*/
DEFINE_DRM_GEM_FOPS(simpledrm_fops);
static struct drm_driver simpledrm_driver = {
DRM_GEM_SHMEM_DRIVER_OPS,
.name = DRIVER_NAME,
.desc = DRIVER_DESC,
.date = DRIVER_DATE,
.major = DRIVER_MAJOR,
.minor = DRIVER_MINOR,
.driver_features = DRIVER_ATOMIC | DRIVER_GEM | DRIVER_MODESET,
.fops = &simpledrm_fops,
};
/*
* Platform driver
*/
static int simpledrm_probe(struct platform_device *pdev)
{
struct simpledrm_device *sdev;
struct drm_device *dev;
unsigned int color_mode;
int ret;
sdev = simpledrm_device_create(&simpledrm_driver, pdev);
if (IS_ERR(sdev))
return PTR_ERR(sdev);
dev = &sdev->dev;
ret = drm_dev_register(dev, 0);
if (ret)
return ret;
color_mode = drm_format_info_bpp(sdev->format, 0);
if (color_mode == 16)
color_mode = sdev->format->depth; // can be 15 or 16
drm_fbdev_generic_setup(dev, color_mode);
return 0;
}
static void simpledrm_remove(struct platform_device *pdev)
{
struct simpledrm_device *sdev = platform_get_drvdata(pdev);
struct drm_device *dev = &sdev->dev;
drm_dev_unplug(dev);
}
static const struct of_device_id simpledrm_of_match_table[] = {
{ .compatible = "simple-framebuffer", },
{ },
};
MODULE_DEVICE_TABLE(of, simpledrm_of_match_table);
static struct platform_driver simpledrm_platform_driver = {
.driver = {
.name = "simple-framebuffer", /* connect to sysfb */
.of_match_table = simpledrm_of_match_table,
},
.probe = simpledrm_probe,
.remove_new = simpledrm_remove,
};
module_platform_driver(simpledrm_platform_driver);
MODULE_DESCRIPTION(DRIVER_DESC);
MODULE_LICENSE("GPL v2");
| linux-master | drivers/gpu/drm/tiny/simpledrm.c |
// SPDX-License-Identifier: GPL-2.0-only
#include <linux/of_address.h>
#include <linux/pci.h>
#include <linux/platform_device.h>
#include <drm/drm_aperture.h>
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_state_helper.h>
#include <drm/drm_connector.h>
#include <drm/drm_damage_helper.h>
#include <drm/drm_device.h>
#include <drm/drm_drv.h>
#include <drm/drm_fbdev_generic.h>
#include <drm/drm_format_helper.h>
#include <drm/drm_framebuffer.h>
#include <drm/drm_gem_atomic_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_gem_shmem_helper.h>
#include <drm/drm_managed.h>
#include <drm/drm_modeset_helper_vtables.h>
#include <drm/drm_plane_helper.h>
#include <drm/drm_probe_helper.h>
#include <drm/drm_simple_kms_helper.h>
#define DRIVER_NAME "ofdrm"
#define DRIVER_DESC "DRM driver for OF platform devices"
#define DRIVER_DATE "20220501"
#define DRIVER_MAJOR 1
#define DRIVER_MINOR 0
#define PCI_VENDOR_ID_ATI_R520 0x7100
#define PCI_VENDOR_ID_ATI_R600 0x9400
#define OFDRM_GAMMA_LUT_SIZE 256
/* Definitions used by the Avivo palette */
#define AVIVO_DC_LUT_RW_SELECT 0x6480
#define AVIVO_DC_LUT_RW_MODE 0x6484
#define AVIVO_DC_LUT_RW_INDEX 0x6488
#define AVIVO_DC_LUT_SEQ_COLOR 0x648c
#define AVIVO_DC_LUT_PWL_DATA 0x6490
#define AVIVO_DC_LUT_30_COLOR 0x6494
#define AVIVO_DC_LUT_READ_PIPE_SELECT 0x6498
#define AVIVO_DC_LUT_WRITE_EN_MASK 0x649c
#define AVIVO_DC_LUT_AUTOFILL 0x64a0
#define AVIVO_DC_LUTA_CONTROL 0x64c0
#define AVIVO_DC_LUTA_BLACK_OFFSET_BLUE 0x64c4
#define AVIVO_DC_LUTA_BLACK_OFFSET_GREEN 0x64c8
#define AVIVO_DC_LUTA_BLACK_OFFSET_RED 0x64cc
#define AVIVO_DC_LUTA_WHITE_OFFSET_BLUE 0x64d0
#define AVIVO_DC_LUTA_WHITE_OFFSET_GREEN 0x64d4
#define AVIVO_DC_LUTA_WHITE_OFFSET_RED 0x64d8
#define AVIVO_DC_LUTB_CONTROL 0x6cc0
#define AVIVO_DC_LUTB_BLACK_OFFSET_BLUE 0x6cc4
#define AVIVO_DC_LUTB_BLACK_OFFSET_GREEN 0x6cc8
#define AVIVO_DC_LUTB_BLACK_OFFSET_RED 0x6ccc
#define AVIVO_DC_LUTB_WHITE_OFFSET_BLUE 0x6cd0
#define AVIVO_DC_LUTB_WHITE_OFFSET_GREEN 0x6cd4
#define AVIVO_DC_LUTB_WHITE_OFFSET_RED 0x6cd8
enum ofdrm_model {
OFDRM_MODEL_UNKNOWN,
OFDRM_MODEL_MACH64, /* ATI Mach64 */
OFDRM_MODEL_RAGE128, /* ATI Rage128 */
OFDRM_MODEL_RAGE_M3A, /* ATI Rage Mobility M3 Head A */
OFDRM_MODEL_RAGE_M3B, /* ATI Rage Mobility M3 Head B */
OFDRM_MODEL_RADEON, /* ATI Radeon */
OFDRM_MODEL_GXT2000, /* IBM GXT2000 */
OFDRM_MODEL_AVIVO, /* ATI R5xx */
OFDRM_MODEL_QEMU, /* QEMU VGA */
};
/*
* Helpers for display nodes
*/
static int display_get_validated_int(struct drm_device *dev, const char *name, uint32_t value)
{
if (value > INT_MAX) {
drm_err(dev, "invalid framebuffer %s of %u\n", name, value);
return -EINVAL;
}
return (int)value;
}
static int display_get_validated_int0(struct drm_device *dev, const char *name, uint32_t value)
{
if (!value) {
drm_err(dev, "invalid framebuffer %s of %u\n", name, value);
return -EINVAL;
}
return display_get_validated_int(dev, name, value);
}
static const struct drm_format_info *display_get_validated_format(struct drm_device *dev,
u32 depth, bool big_endian)
{
const struct drm_format_info *info;
u32 format;
switch (depth) {
case 8:
format = drm_mode_legacy_fb_format(8, 8);
break;
case 15:
case 16:
format = drm_mode_legacy_fb_format(16, depth);
break;
case 32:
format = drm_mode_legacy_fb_format(32, 24);
break;
default:
drm_err(dev, "unsupported framebuffer depth %u\n", depth);
return ERR_PTR(-EINVAL);
}
/*
* DRM formats assume little-endian byte order. Update the format
* if the scanout buffer uses big-endian ordering.
*/
if (big_endian) {
switch (format) {
case DRM_FORMAT_XRGB8888:
format = DRM_FORMAT_BGRX8888;
break;
case DRM_FORMAT_ARGB8888:
format = DRM_FORMAT_BGRA8888;
break;
case DRM_FORMAT_RGB565:
format = DRM_FORMAT_RGB565 | DRM_FORMAT_BIG_ENDIAN;
break;
case DRM_FORMAT_XRGB1555:
format = DRM_FORMAT_XRGB1555 | DRM_FORMAT_BIG_ENDIAN;
break;
default:
break;
}
}
info = drm_format_info(format);
if (!info) {
drm_err(dev, "cannot find framebuffer format for depth %u\n", depth);
return ERR_PTR(-EINVAL);
}
return info;
}
static int display_read_u32_of(struct drm_device *dev, struct device_node *of_node,
const char *name, u32 *value)
{
int ret = of_property_read_u32(of_node, name, value);
if (ret)
drm_err(dev, "cannot parse framebuffer %s: error %d\n", name, ret);
return ret;
}
static bool display_get_big_endian_of(struct drm_device *dev, struct device_node *of_node)
{
bool big_endian;
#ifdef __BIG_ENDIAN
big_endian = !of_property_read_bool(of_node, "little-endian");
#else
big_endian = of_property_read_bool(of_node, "big-endian");
#endif
return big_endian;
}
static int display_get_width_of(struct drm_device *dev, struct device_node *of_node)
{
u32 width;
int ret = display_read_u32_of(dev, of_node, "width", &width);
if (ret)
return ret;
return display_get_validated_int0(dev, "width", width);
}
static int display_get_height_of(struct drm_device *dev, struct device_node *of_node)
{
u32 height;
int ret = display_read_u32_of(dev, of_node, "height", &height);
if (ret)
return ret;
return display_get_validated_int0(dev, "height", height);
}
static int display_get_depth_of(struct drm_device *dev, struct device_node *of_node)
{
u32 depth;
int ret = display_read_u32_of(dev, of_node, "depth", &depth);
if (ret)
return ret;
return display_get_validated_int0(dev, "depth", depth);
}
static int display_get_linebytes_of(struct drm_device *dev, struct device_node *of_node)
{
u32 linebytes;
int ret = display_read_u32_of(dev, of_node, "linebytes", &linebytes);
if (ret)
return ret;
return display_get_validated_int(dev, "linebytes", linebytes);
}
static u64 display_get_address_of(struct drm_device *dev, struct device_node *of_node)
{
u32 address;
int ret;
/*
* Not all devices provide an address property, it's not
* a bug if this fails. The driver will try to find the
* framebuffer base address from the device's memory regions.
*/
ret = of_property_read_u32(of_node, "address", &address);
if (ret)
return OF_BAD_ADDR;
return address;
}
static bool is_avivo(u32 vendor, u32 device)
{
/* This will match most R5xx */
return (vendor == PCI_VENDOR_ID_ATI) &&
((device >= PCI_VENDOR_ID_ATI_R520 && device < 0x7800) ||
(PCI_VENDOR_ID_ATI_R600 >= 0x9400));
}
static enum ofdrm_model display_get_model_of(struct drm_device *dev, struct device_node *of_node)
{
enum ofdrm_model model = OFDRM_MODEL_UNKNOWN;
if (of_node_name_prefix(of_node, "ATY,Rage128")) {
model = OFDRM_MODEL_RAGE128;
} else if (of_node_name_prefix(of_node, "ATY,RageM3pA") ||
of_node_name_prefix(of_node, "ATY,RageM3p12A")) {
model = OFDRM_MODEL_RAGE_M3A;
} else if (of_node_name_prefix(of_node, "ATY,RageM3pB")) {
model = OFDRM_MODEL_RAGE_M3B;
} else if (of_node_name_prefix(of_node, "ATY,Rage6")) {
model = OFDRM_MODEL_RADEON;
} else if (of_node_name_prefix(of_node, "ATY,")) {
return OFDRM_MODEL_MACH64;
} else if (of_device_is_compatible(of_node, "pci1014,b7") ||
of_device_is_compatible(of_node, "pci1014,21c")) {
model = OFDRM_MODEL_GXT2000;
} else if (of_node_name_prefix(of_node, "vga,Display-")) {
struct device_node *of_parent;
const __be32 *vendor_p, *device_p;
/* Look for AVIVO initialized by SLOF */
of_parent = of_get_parent(of_node);
vendor_p = of_get_property(of_parent, "vendor-id", NULL);
device_p = of_get_property(of_parent, "device-id", NULL);
if (vendor_p && device_p) {
u32 vendor = be32_to_cpup(vendor_p);
u32 device = be32_to_cpup(device_p);
if (is_avivo(vendor, device))
model = OFDRM_MODEL_AVIVO;
}
of_node_put(of_parent);
} else if (of_device_is_compatible(of_node, "qemu,std-vga")) {
model = OFDRM_MODEL_QEMU;
}
return model;
}
/*
* Open Firmware display device
*/
struct ofdrm_device;
struct ofdrm_device_funcs {
void __iomem *(*cmap_ioremap)(struct ofdrm_device *odev,
struct device_node *of_node,
u64 fb_bas);
void (*cmap_write)(struct ofdrm_device *odev, unsigned char index,
unsigned char r, unsigned char g, unsigned char b);
};
struct ofdrm_device {
struct drm_device dev;
struct platform_device *pdev;
const struct ofdrm_device_funcs *funcs;
/* firmware-buffer settings */
struct iosys_map screen_base;
struct drm_display_mode mode;
const struct drm_format_info *format;
unsigned int pitch;
/* colormap */
void __iomem *cmap_base;
/* modesetting */
uint32_t formats[8];
struct drm_plane primary_plane;
struct drm_crtc crtc;
struct drm_encoder encoder;
struct drm_connector connector;
};
static struct ofdrm_device *ofdrm_device_of_dev(struct drm_device *dev)
{
return container_of(dev, struct ofdrm_device, dev);
}
/*
* Hardware
*/
#if defined(CONFIG_PCI)
static struct pci_dev *display_get_pci_dev_of(struct drm_device *dev, struct device_node *of_node)
{
const __be32 *vendor_p, *device_p;
u32 vendor, device;
struct pci_dev *pcidev;
vendor_p = of_get_property(of_node, "vendor-id", NULL);
if (!vendor_p)
return ERR_PTR(-ENODEV);
vendor = be32_to_cpup(vendor_p);
device_p = of_get_property(of_node, "device-id", NULL);
if (!device_p)
return ERR_PTR(-ENODEV);
device = be32_to_cpup(device_p);
pcidev = pci_get_device(vendor, device, NULL);
if (!pcidev)
return ERR_PTR(-ENODEV);
return pcidev;
}
static void ofdrm_pci_release(void *data)
{
struct pci_dev *pcidev = data;
pci_disable_device(pcidev);
}
static int ofdrm_device_init_pci(struct ofdrm_device *odev)
{
struct drm_device *dev = &odev->dev;
struct platform_device *pdev = to_platform_device(dev->dev);
struct device_node *of_node = pdev->dev.of_node;
struct pci_dev *pcidev;
int ret;
/*
* Never use pcim_ or other managed helpers on the returned PCI
* device. Otherwise, probing the native driver will fail for
* resource conflicts. PCI-device management has to be tied to
* the lifetime of the platform device until the native driver
* takes over.
*/
pcidev = display_get_pci_dev_of(dev, of_node);
if (IS_ERR(pcidev))
return 0; /* no PCI device found; ignore the error */
ret = pci_enable_device(pcidev);
if (ret) {
drm_err(dev, "pci_enable_device(%s) failed: %d\n",
dev_name(&pcidev->dev), ret);
return ret;
}
ret = devm_add_action_or_reset(&pdev->dev, ofdrm_pci_release, pcidev);
if (ret)
return ret;
return 0;
}
#else
static int ofdrm_device_init_pci(struct ofdrm_device *odev)
{
return 0;
}
#endif
/*
* OF display settings
*/
static struct resource *ofdrm_find_fb_resource(struct ofdrm_device *odev,
struct resource *fb_res)
{
struct platform_device *pdev = to_platform_device(odev->dev.dev);
struct resource *res, *max_res = NULL;
u32 i;
for (i = 0; pdev->num_resources; ++i) {
res = platform_get_resource(pdev, IORESOURCE_MEM, i);
if (!res)
break; /* all resources processed */
if (resource_size(res) < resource_size(fb_res))
continue; /* resource too small */
if (fb_res->start && resource_contains(res, fb_res))
return res; /* resource contains framebuffer */
if (!max_res || resource_size(res) > resource_size(max_res))
max_res = res; /* store largest resource as fallback */
}
return max_res;
}
/*
* Colormap / Palette
*/
static void __iomem *get_cmap_address_of(struct ofdrm_device *odev, struct device_node *of_node,
int bar_no, unsigned long offset, unsigned long size)
{
struct drm_device *dev = &odev->dev;
const __be32 *addr_p;
u64 max_size, address;
unsigned int flags;
void __iomem *mem;
addr_p = of_get_pci_address(of_node, bar_no, &max_size, &flags);
if (!addr_p)
addr_p = of_get_address(of_node, bar_no, &max_size, &flags);
if (!addr_p)
return IOMEM_ERR_PTR(-ENODEV);
if ((flags & (IORESOURCE_IO | IORESOURCE_MEM)) == 0)
return IOMEM_ERR_PTR(-ENODEV);
if ((offset + size) >= max_size)
return IOMEM_ERR_PTR(-ENODEV);
address = of_translate_address(of_node, addr_p);
if (address == OF_BAD_ADDR)
return IOMEM_ERR_PTR(-ENODEV);
mem = devm_ioremap(dev->dev, address + offset, size);
if (!mem)
return IOMEM_ERR_PTR(-ENOMEM);
return mem;
}
static void __iomem *ofdrm_mach64_cmap_ioremap(struct ofdrm_device *odev,
struct device_node *of_node,
u64 fb_base)
{
struct drm_device *dev = &odev->dev;
u64 address;
void __iomem *cmap_base;
address = fb_base & 0xff000000ul;
address += 0x7ff000;
cmap_base = devm_ioremap(dev->dev, address, 0x1000);
if (!cmap_base)
return IOMEM_ERR_PTR(-ENOMEM);
return cmap_base;
}
static void ofdrm_mach64_cmap_write(struct ofdrm_device *odev, unsigned char index,
unsigned char r, unsigned char g, unsigned char b)
{
void __iomem *addr = odev->cmap_base + 0xcc0;
void __iomem *data = odev->cmap_base + 0xcc0 + 1;
writeb(index, addr);
writeb(r, data);
writeb(g, data);
writeb(b, data);
}
static void __iomem *ofdrm_rage128_cmap_ioremap(struct ofdrm_device *odev,
struct device_node *of_node,
u64 fb_base)
{
return get_cmap_address_of(odev, of_node, 2, 0, 0x1fff);
}
static void ofdrm_rage128_cmap_write(struct ofdrm_device *odev, unsigned char index,
unsigned char r, unsigned char g, unsigned char b)
{
void __iomem *addr = odev->cmap_base + 0xb0;
void __iomem *data = odev->cmap_base + 0xb4;
u32 color = (r << 16) | (g << 8) | b;
writeb(index, addr);
writel(color, data);
}
static void __iomem *ofdrm_rage_m3a_cmap_ioremap(struct ofdrm_device *odev,
struct device_node *of_node,
u64 fb_base)
{
return get_cmap_address_of(odev, of_node, 2, 0, 0x1fff);
}
static void ofdrm_rage_m3a_cmap_write(struct ofdrm_device *odev, unsigned char index,
unsigned char r, unsigned char g, unsigned char b)
{
void __iomem *dac_ctl = odev->cmap_base + 0x58;
void __iomem *addr = odev->cmap_base + 0xb0;
void __iomem *data = odev->cmap_base + 0xb4;
u32 color = (r << 16) | (g << 8) | b;
u32 val;
/* Clear PALETTE_ACCESS_CNTL in DAC_CNTL */
val = readl(dac_ctl);
val &= ~0x20;
writel(val, dac_ctl);
/* Set color at palette index */
writeb(index, addr);
writel(color, data);
}
static void __iomem *ofdrm_rage_m3b_cmap_ioremap(struct ofdrm_device *odev,
struct device_node *of_node,
u64 fb_base)
{
return get_cmap_address_of(odev, of_node, 2, 0, 0x1fff);
}
static void ofdrm_rage_m3b_cmap_write(struct ofdrm_device *odev, unsigned char index,
unsigned char r, unsigned char g, unsigned char b)
{
void __iomem *dac_ctl = odev->cmap_base + 0x58;
void __iomem *addr = odev->cmap_base + 0xb0;
void __iomem *data = odev->cmap_base + 0xb4;
u32 color = (r << 16) | (g << 8) | b;
u32 val;
/* Set PALETTE_ACCESS_CNTL in DAC_CNTL */
val = readl(dac_ctl);
val |= 0x20;
writel(val, dac_ctl);
/* Set color at palette index */
writeb(index, addr);
writel(color, data);
}
static void __iomem *ofdrm_radeon_cmap_ioremap(struct ofdrm_device *odev,
struct device_node *of_node,
u64 fb_base)
{
return get_cmap_address_of(odev, of_node, 1, 0, 0x1fff);
}
static void __iomem *ofdrm_gxt2000_cmap_ioremap(struct ofdrm_device *odev,
struct device_node *of_node,
u64 fb_base)
{
return get_cmap_address_of(odev, of_node, 0, 0x6000, 0x1000);
}
static void ofdrm_gxt2000_cmap_write(struct ofdrm_device *odev, unsigned char index,
unsigned char r, unsigned char g, unsigned char b)
{
void __iomem *data = ((unsigned int __iomem *)odev->cmap_base) + index;
u32 color = (r << 16) | (g << 8) | b;
writel(color, data);
}
static void __iomem *ofdrm_avivo_cmap_ioremap(struct ofdrm_device *odev,
struct device_node *of_node,
u64 fb_base)
{
struct device_node *of_parent;
void __iomem *cmap_base;
of_parent = of_get_parent(of_node);
cmap_base = get_cmap_address_of(odev, of_parent, 0, 0, 0x10000);
of_node_put(of_parent);
return cmap_base;
}
static void ofdrm_avivo_cmap_write(struct ofdrm_device *odev, unsigned char index,
unsigned char r, unsigned char g, unsigned char b)
{
void __iomem *lutsel = odev->cmap_base + AVIVO_DC_LUT_RW_SELECT;
void __iomem *addr = odev->cmap_base + AVIVO_DC_LUT_RW_INDEX;
void __iomem *data = odev->cmap_base + AVIVO_DC_LUT_30_COLOR;
u32 color = (r << 22) | (g << 12) | (b << 2);
/* Write to both LUTs for now */
writel(1, lutsel);
writeb(index, addr);
writel(color, data);
writel(0, lutsel);
writeb(index, addr);
writel(color, data);
}
static void __iomem *ofdrm_qemu_cmap_ioremap(struct ofdrm_device *odev,
struct device_node *of_node,
u64 fb_base)
{
static const __be32 io_of_addr[3] = {
cpu_to_be32(0x01000000),
cpu_to_be32(0x00),
cpu_to_be32(0x00),
};
struct drm_device *dev = &odev->dev;
u64 address;
void __iomem *cmap_base;
address = of_translate_address(of_node, io_of_addr);
if (address == OF_BAD_ADDR)
return IOMEM_ERR_PTR(-ENODEV);
cmap_base = devm_ioremap(dev->dev, address + 0x3c8, 2);
if (!cmap_base)
return IOMEM_ERR_PTR(-ENOMEM);
return cmap_base;
}
static void ofdrm_qemu_cmap_write(struct ofdrm_device *odev, unsigned char index,
unsigned char r, unsigned char g, unsigned char b)
{
void __iomem *addr = odev->cmap_base;
void __iomem *data = odev->cmap_base + 1;
writeb(index, addr);
writeb(r, data);
writeb(g, data);
writeb(b, data);
}
static void ofdrm_device_set_gamma_linear(struct ofdrm_device *odev,
const struct drm_format_info *format)
{
struct drm_device *dev = &odev->dev;
int i;
switch (format->format) {
case DRM_FORMAT_RGB565:
case DRM_FORMAT_RGB565 | DRM_FORMAT_BIG_ENDIAN:
/* Use better interpolation, to take 32 values from 0 to 255 */
for (i = 0; i < OFDRM_GAMMA_LUT_SIZE / 8; i++) {
unsigned char r = i * 8 + i / 4;
unsigned char g = i * 4 + i / 16;
unsigned char b = i * 8 + i / 4;
odev->funcs->cmap_write(odev, i, r, g, b);
}
/* Green has one more bit, so add padding with 0 for red and blue. */
for (i = OFDRM_GAMMA_LUT_SIZE / 8; i < OFDRM_GAMMA_LUT_SIZE / 4; i++) {
unsigned char r = 0;
unsigned char g = i * 4 + i / 16;
unsigned char b = 0;
odev->funcs->cmap_write(odev, i, r, g, b);
}
break;
case DRM_FORMAT_XRGB8888:
case DRM_FORMAT_BGRX8888:
for (i = 0; i < OFDRM_GAMMA_LUT_SIZE; i++)
odev->funcs->cmap_write(odev, i, i, i, i);
break;
default:
drm_warn_once(dev, "Unsupported format %p4cc for gamma correction\n",
&format->format);
break;
}
}
static void ofdrm_device_set_gamma(struct ofdrm_device *odev,
const struct drm_format_info *format,
struct drm_color_lut *lut)
{
struct drm_device *dev = &odev->dev;
int i;
switch (format->format) {
case DRM_FORMAT_RGB565:
case DRM_FORMAT_RGB565 | DRM_FORMAT_BIG_ENDIAN:
/* Use better interpolation, to take 32 values from lut[0] to lut[255] */
for (i = 0; i < OFDRM_GAMMA_LUT_SIZE / 8; i++) {
unsigned char r = lut[i * 8 + i / 4].red >> 8;
unsigned char g = lut[i * 4 + i / 16].green >> 8;
unsigned char b = lut[i * 8 + i / 4].blue >> 8;
odev->funcs->cmap_write(odev, i, r, g, b);
}
/* Green has one more bit, so add padding with 0 for red and blue. */
for (i = OFDRM_GAMMA_LUT_SIZE / 8; i < OFDRM_GAMMA_LUT_SIZE / 4; i++) {
unsigned char r = 0;
unsigned char g = lut[i * 4 + i / 16].green >> 8;
unsigned char b = 0;
odev->funcs->cmap_write(odev, i, r, g, b);
}
break;
case DRM_FORMAT_XRGB8888:
case DRM_FORMAT_BGRX8888:
for (i = 0; i < OFDRM_GAMMA_LUT_SIZE; i++) {
unsigned char r = lut[i].red >> 8;
unsigned char g = lut[i].green >> 8;
unsigned char b = lut[i].blue >> 8;
odev->funcs->cmap_write(odev, i, r, g, b);
}
break;
default:
drm_warn_once(dev, "Unsupported format %p4cc for gamma correction\n",
&format->format);
break;
}
}
/*
* Modesetting
*/
struct ofdrm_crtc_state {
struct drm_crtc_state base;
/* Primary-plane format; required for color mgmt. */
const struct drm_format_info *format;
};
static struct ofdrm_crtc_state *to_ofdrm_crtc_state(struct drm_crtc_state *base)
{
return container_of(base, struct ofdrm_crtc_state, base);
}
static void ofdrm_crtc_state_destroy(struct ofdrm_crtc_state *ofdrm_crtc_state)
{
__drm_atomic_helper_crtc_destroy_state(&ofdrm_crtc_state->base);
kfree(ofdrm_crtc_state);
}
static const uint64_t ofdrm_primary_plane_format_modifiers[] = {
DRM_FORMAT_MOD_LINEAR,
DRM_FORMAT_MOD_INVALID
};
static int ofdrm_primary_plane_helper_atomic_check(struct drm_plane *plane,
struct drm_atomic_state *new_state)
{
struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(new_state, plane);
struct drm_framebuffer *new_fb = new_plane_state->fb;
struct drm_crtc *new_crtc = new_plane_state->crtc;
struct drm_crtc_state *new_crtc_state = NULL;
struct ofdrm_crtc_state *new_ofdrm_crtc_state;
int ret;
if (new_crtc)
new_crtc_state = drm_atomic_get_new_crtc_state(new_state, new_plane_state->crtc);
ret = drm_atomic_helper_check_plane_state(new_plane_state, new_crtc_state,
DRM_PLANE_NO_SCALING,
DRM_PLANE_NO_SCALING,
false, false);
if (ret)
return ret;
else if (!new_plane_state->visible)
return 0;
new_crtc_state = drm_atomic_get_new_crtc_state(new_state, new_plane_state->crtc);
new_ofdrm_crtc_state = to_ofdrm_crtc_state(new_crtc_state);
new_ofdrm_crtc_state->format = new_fb->format;
return 0;
}
static void ofdrm_primary_plane_helper_atomic_update(struct drm_plane *plane,
struct drm_atomic_state *state)
{
struct drm_device *dev = plane->dev;
struct ofdrm_device *odev = ofdrm_device_of_dev(dev);
struct drm_plane_state *plane_state = drm_atomic_get_new_plane_state(state, plane);
struct drm_plane_state *old_plane_state = drm_atomic_get_old_plane_state(state, plane);
struct drm_shadow_plane_state *shadow_plane_state = to_drm_shadow_plane_state(plane_state);
struct drm_framebuffer *fb = plane_state->fb;
unsigned int dst_pitch = odev->pitch;
const struct drm_format_info *dst_format = odev->format;
struct drm_atomic_helper_damage_iter iter;
struct drm_rect damage;
int ret, idx;
ret = drm_gem_fb_begin_cpu_access(fb, DMA_FROM_DEVICE);
if (ret)
return;
if (!drm_dev_enter(dev, &idx))
goto out_drm_gem_fb_end_cpu_access;
drm_atomic_helper_damage_iter_init(&iter, old_plane_state, plane_state);
drm_atomic_for_each_plane_damage(&iter, &damage) {
struct iosys_map dst = odev->screen_base;
struct drm_rect dst_clip = plane_state->dst;
if (!drm_rect_intersect(&dst_clip, &damage))
continue;
iosys_map_incr(&dst, drm_fb_clip_offset(dst_pitch, dst_format, &dst_clip));
drm_fb_blit(&dst, &dst_pitch, dst_format->format, shadow_plane_state->data, fb,
&damage);
}
drm_dev_exit(idx);
out_drm_gem_fb_end_cpu_access:
drm_gem_fb_end_cpu_access(fb, DMA_FROM_DEVICE);
}
static void ofdrm_primary_plane_helper_atomic_disable(struct drm_plane *plane,
struct drm_atomic_state *state)
{
struct drm_device *dev = plane->dev;
struct ofdrm_device *odev = ofdrm_device_of_dev(dev);
struct iosys_map dst = odev->screen_base;
struct drm_plane_state *plane_state = drm_atomic_get_new_plane_state(state, plane);
void __iomem *dst_vmap = dst.vaddr_iomem; /* TODO: Use mapping abstraction */
unsigned int dst_pitch = odev->pitch;
const struct drm_format_info *dst_format = odev->format;
struct drm_rect dst_clip;
unsigned long lines, linepixels, i;
int idx;
drm_rect_init(&dst_clip,
plane_state->src_x >> 16, plane_state->src_y >> 16,
plane_state->src_w >> 16, plane_state->src_h >> 16);
lines = drm_rect_height(&dst_clip);
linepixels = drm_rect_width(&dst_clip);
if (!drm_dev_enter(dev, &idx))
return;
/* Clear buffer to black if disabled */
dst_vmap += drm_fb_clip_offset(dst_pitch, dst_format, &dst_clip);
for (i = 0; i < lines; ++i) {
memset_io(dst_vmap, 0, linepixels * dst_format->cpp[0]);
dst_vmap += dst_pitch;
}
drm_dev_exit(idx);
}
static const struct drm_plane_helper_funcs ofdrm_primary_plane_helper_funcs = {
DRM_GEM_SHADOW_PLANE_HELPER_FUNCS,
.atomic_check = ofdrm_primary_plane_helper_atomic_check,
.atomic_update = ofdrm_primary_plane_helper_atomic_update,
.atomic_disable = ofdrm_primary_plane_helper_atomic_disable,
};
static const struct drm_plane_funcs ofdrm_primary_plane_funcs = {
.update_plane = drm_atomic_helper_update_plane,
.disable_plane = drm_atomic_helper_disable_plane,
.destroy = drm_plane_cleanup,
DRM_GEM_SHADOW_PLANE_FUNCS,
};
static enum drm_mode_status ofdrm_crtc_helper_mode_valid(struct drm_crtc *crtc,
const struct drm_display_mode *mode)
{
struct ofdrm_device *odev = ofdrm_device_of_dev(crtc->dev);
return drm_crtc_helper_mode_valid_fixed(crtc, mode, &odev->mode);
}
static int ofdrm_crtc_helper_atomic_check(struct drm_crtc *crtc,
struct drm_atomic_state *new_state)
{
static const size_t gamma_lut_length = OFDRM_GAMMA_LUT_SIZE * sizeof(struct drm_color_lut);
struct drm_device *dev = crtc->dev;
struct drm_crtc_state *new_crtc_state = drm_atomic_get_new_crtc_state(new_state, crtc);
int ret;
if (!new_crtc_state->enable)
return 0;
ret = drm_atomic_helper_check_crtc_primary_plane(new_crtc_state);
if (ret)
return ret;
if (new_crtc_state->color_mgmt_changed) {
struct drm_property_blob *gamma_lut = new_crtc_state->gamma_lut;
if (gamma_lut && (gamma_lut->length != gamma_lut_length)) {
drm_dbg(dev, "Incorrect gamma_lut length %zu\n", gamma_lut->length);
return -EINVAL;
}
}
return 0;
}
static void ofdrm_crtc_helper_atomic_flush(struct drm_crtc *crtc, struct drm_atomic_state *state)
{
struct ofdrm_device *odev = ofdrm_device_of_dev(crtc->dev);
struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
struct ofdrm_crtc_state *ofdrm_crtc_state = to_ofdrm_crtc_state(crtc_state);
if (crtc_state->enable && crtc_state->color_mgmt_changed) {
const struct drm_format_info *format = ofdrm_crtc_state->format;
if (crtc_state->gamma_lut)
ofdrm_device_set_gamma(odev, format, crtc_state->gamma_lut->data);
else
ofdrm_device_set_gamma_linear(odev, format);
}
}
/*
* The CRTC is always enabled. Screen updates are performed by
* the primary plane's atomic_update function. Disabling clears
* the screen in the primary plane's atomic_disable function.
*/
static const struct drm_crtc_helper_funcs ofdrm_crtc_helper_funcs = {
.mode_valid = ofdrm_crtc_helper_mode_valid,
.atomic_check = ofdrm_crtc_helper_atomic_check,
.atomic_flush = ofdrm_crtc_helper_atomic_flush,
};
static void ofdrm_crtc_reset(struct drm_crtc *crtc)
{
struct ofdrm_crtc_state *ofdrm_crtc_state =
kzalloc(sizeof(*ofdrm_crtc_state), GFP_KERNEL);
if (crtc->state)
ofdrm_crtc_state_destroy(to_ofdrm_crtc_state(crtc->state));
if (ofdrm_crtc_state)
__drm_atomic_helper_crtc_reset(crtc, &ofdrm_crtc_state->base);
else
__drm_atomic_helper_crtc_reset(crtc, NULL);
}
static struct drm_crtc_state *ofdrm_crtc_atomic_duplicate_state(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
struct drm_crtc_state *crtc_state = crtc->state;
struct ofdrm_crtc_state *new_ofdrm_crtc_state;
struct ofdrm_crtc_state *ofdrm_crtc_state;
if (drm_WARN_ON(dev, !crtc_state))
return NULL;
new_ofdrm_crtc_state = kzalloc(sizeof(*new_ofdrm_crtc_state), GFP_KERNEL);
if (!new_ofdrm_crtc_state)
return NULL;
ofdrm_crtc_state = to_ofdrm_crtc_state(crtc_state);
__drm_atomic_helper_crtc_duplicate_state(crtc, &new_ofdrm_crtc_state->base);
new_ofdrm_crtc_state->format = ofdrm_crtc_state->format;
return &new_ofdrm_crtc_state->base;
}
static void ofdrm_crtc_atomic_destroy_state(struct drm_crtc *crtc,
struct drm_crtc_state *crtc_state)
{
ofdrm_crtc_state_destroy(to_ofdrm_crtc_state(crtc_state));
}
static const struct drm_crtc_funcs ofdrm_crtc_funcs = {
.reset = ofdrm_crtc_reset,
.destroy = drm_crtc_cleanup,
.set_config = drm_atomic_helper_set_config,
.page_flip = drm_atomic_helper_page_flip,
.atomic_duplicate_state = ofdrm_crtc_atomic_duplicate_state,
.atomic_destroy_state = ofdrm_crtc_atomic_destroy_state,
};
static int ofdrm_connector_helper_get_modes(struct drm_connector *connector)
{
struct ofdrm_device *odev = ofdrm_device_of_dev(connector->dev);
return drm_connector_helper_get_modes_fixed(connector, &odev->mode);
}
static const struct drm_connector_helper_funcs ofdrm_connector_helper_funcs = {
.get_modes = ofdrm_connector_helper_get_modes,
};
static const struct drm_connector_funcs ofdrm_connector_funcs = {
.reset = drm_atomic_helper_connector_reset,
.fill_modes = drm_helper_probe_single_connector_modes,
.destroy = drm_connector_cleanup,
.atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
};
static const struct drm_mode_config_funcs ofdrm_mode_config_funcs = {
.fb_create = drm_gem_fb_create_with_dirty,
.atomic_check = drm_atomic_helper_check,
.atomic_commit = drm_atomic_helper_commit,
};
/*
* Init / Cleanup
*/
static const struct ofdrm_device_funcs ofdrm_unknown_device_funcs = {
};
static const struct ofdrm_device_funcs ofdrm_mach64_device_funcs = {
.cmap_ioremap = ofdrm_mach64_cmap_ioremap,
.cmap_write = ofdrm_mach64_cmap_write,
};
static const struct ofdrm_device_funcs ofdrm_rage128_device_funcs = {
.cmap_ioremap = ofdrm_rage128_cmap_ioremap,
.cmap_write = ofdrm_rage128_cmap_write,
};
static const struct ofdrm_device_funcs ofdrm_rage_m3a_device_funcs = {
.cmap_ioremap = ofdrm_rage_m3a_cmap_ioremap,
.cmap_write = ofdrm_rage_m3a_cmap_write,
};
static const struct ofdrm_device_funcs ofdrm_rage_m3b_device_funcs = {
.cmap_ioremap = ofdrm_rage_m3b_cmap_ioremap,
.cmap_write = ofdrm_rage_m3b_cmap_write,
};
static const struct ofdrm_device_funcs ofdrm_radeon_device_funcs = {
.cmap_ioremap = ofdrm_radeon_cmap_ioremap,
.cmap_write = ofdrm_rage128_cmap_write, /* same as Rage128 */
};
static const struct ofdrm_device_funcs ofdrm_gxt2000_device_funcs = {
.cmap_ioremap = ofdrm_gxt2000_cmap_ioremap,
.cmap_write = ofdrm_gxt2000_cmap_write,
};
static const struct ofdrm_device_funcs ofdrm_avivo_device_funcs = {
.cmap_ioremap = ofdrm_avivo_cmap_ioremap,
.cmap_write = ofdrm_avivo_cmap_write,
};
static const struct ofdrm_device_funcs ofdrm_qemu_device_funcs = {
.cmap_ioremap = ofdrm_qemu_cmap_ioremap,
.cmap_write = ofdrm_qemu_cmap_write,
};
static struct drm_display_mode ofdrm_mode(unsigned int width, unsigned int height)
{
/*
* Assume a monitor resolution of 96 dpi to
* get a somewhat reasonable screen size.
*/
const struct drm_display_mode mode = {
DRM_MODE_INIT(60, width, height,
DRM_MODE_RES_MM(width, 96ul),
DRM_MODE_RES_MM(height, 96ul))
};
return mode;
}
static struct ofdrm_device *ofdrm_device_create(struct drm_driver *drv,
struct platform_device *pdev)
{
struct device_node *of_node = pdev->dev.of_node;
struct ofdrm_device *odev;
struct drm_device *dev;
enum ofdrm_model model;
bool big_endian;
int width, height, depth, linebytes;
const struct drm_format_info *format;
u64 address;
resource_size_t fb_size, fb_base, fb_pgbase, fb_pgsize;
struct resource *res, *mem;
void __iomem *screen_base;
struct drm_plane *primary_plane;
struct drm_crtc *crtc;
struct drm_encoder *encoder;
struct drm_connector *connector;
unsigned long max_width, max_height;
size_t nformats;
int ret;
odev = devm_drm_dev_alloc(&pdev->dev, drv, struct ofdrm_device, dev);
if (IS_ERR(odev))
return ERR_CAST(odev);
dev = &odev->dev;
platform_set_drvdata(pdev, dev);
ret = ofdrm_device_init_pci(odev);
if (ret)
return ERR_PTR(ret);
/*
* OF display-node settings
*/
model = display_get_model_of(dev, of_node);
drm_dbg(dev, "detected model %d\n", model);
switch (model) {
case OFDRM_MODEL_UNKNOWN:
odev->funcs = &ofdrm_unknown_device_funcs;
break;
case OFDRM_MODEL_MACH64:
odev->funcs = &ofdrm_mach64_device_funcs;
break;
case OFDRM_MODEL_RAGE128:
odev->funcs = &ofdrm_rage128_device_funcs;
break;
case OFDRM_MODEL_RAGE_M3A:
odev->funcs = &ofdrm_rage_m3a_device_funcs;
break;
case OFDRM_MODEL_RAGE_M3B:
odev->funcs = &ofdrm_rage_m3b_device_funcs;
break;
case OFDRM_MODEL_RADEON:
odev->funcs = &ofdrm_radeon_device_funcs;
break;
case OFDRM_MODEL_GXT2000:
odev->funcs = &ofdrm_gxt2000_device_funcs;
break;
case OFDRM_MODEL_AVIVO:
odev->funcs = &ofdrm_avivo_device_funcs;
break;
case OFDRM_MODEL_QEMU:
odev->funcs = &ofdrm_qemu_device_funcs;
break;
}
big_endian = display_get_big_endian_of(dev, of_node);
width = display_get_width_of(dev, of_node);
if (width < 0)
return ERR_PTR(width);
height = display_get_height_of(dev, of_node);
if (height < 0)
return ERR_PTR(height);
depth = display_get_depth_of(dev, of_node);
if (depth < 0)
return ERR_PTR(depth);
linebytes = display_get_linebytes_of(dev, of_node);
if (linebytes < 0)
return ERR_PTR(linebytes);
format = display_get_validated_format(dev, depth, big_endian);
if (IS_ERR(format))
return ERR_CAST(format);
if (!linebytes) {
linebytes = drm_format_info_min_pitch(format, 0, width);
if (drm_WARN_ON(dev, !linebytes))
return ERR_PTR(-EINVAL);
}
fb_size = linebytes * height;
/*
* Try to figure out the address of the framebuffer. Unfortunately, Open
* Firmware doesn't provide a standard way to do so. All we can do is a
* dodgy heuristic that happens to work in practice.
*
* On most machines, the "address" property contains what we need, though
* not on Matrox cards found in IBM machines. What appears to give good
* results is to go through the PCI ranges and pick one that encloses the
* "address" property. If none match, we pick the largest.
*/
address = display_get_address_of(dev, of_node);
if (address != OF_BAD_ADDR) {
struct resource fb_res = DEFINE_RES_MEM(address, fb_size);
res = ofdrm_find_fb_resource(odev, &fb_res);
if (!res)
return ERR_PTR(-EINVAL);
if (resource_contains(res, &fb_res))
fb_base = address;
else
fb_base = res->start;
} else {
struct resource fb_res = DEFINE_RES_MEM(0u, fb_size);
res = ofdrm_find_fb_resource(odev, &fb_res);
if (!res)
return ERR_PTR(-EINVAL);
fb_base = res->start;
}
/*
* I/O resources
*/
fb_pgbase = round_down(fb_base, PAGE_SIZE);
fb_pgsize = fb_base - fb_pgbase + round_up(fb_size, PAGE_SIZE);
ret = devm_aperture_acquire_from_firmware(dev, fb_pgbase, fb_pgsize);
if (ret) {
drm_err(dev, "could not acquire memory range %pr: error %d\n", &res, ret);
return ERR_PTR(ret);
}
mem = devm_request_mem_region(&pdev->dev, fb_pgbase, fb_pgsize, drv->name);
if (!mem) {
drm_warn(dev, "could not acquire memory region %pr\n", &res);
return ERR_PTR(-ENOMEM);
}
screen_base = devm_ioremap(&pdev->dev, mem->start, resource_size(mem));
if (!screen_base)
return ERR_PTR(-ENOMEM);
if (odev->funcs->cmap_ioremap) {
void __iomem *cmap_base = odev->funcs->cmap_ioremap(odev, of_node, fb_base);
if (IS_ERR(cmap_base)) {
/* Don't fail; continue without colormap */
drm_warn(dev, "could not find colormap: error %ld\n", PTR_ERR(cmap_base));
} else {
odev->cmap_base = cmap_base;
}
}
/*
* Firmware framebuffer
*/
iosys_map_set_vaddr_iomem(&odev->screen_base, screen_base);
odev->mode = ofdrm_mode(width, height);
odev->format = format;
odev->pitch = linebytes;
drm_dbg(dev, "display mode={" DRM_MODE_FMT "}\n", DRM_MODE_ARG(&odev->mode));
drm_dbg(dev, "framebuffer format=%p4cc, size=%dx%d, linebytes=%d byte\n",
&format->format, width, height, linebytes);
/*
* Mode-setting pipeline
*/
ret = drmm_mode_config_init(dev);
if (ret)
return ERR_PTR(ret);
max_width = max_t(unsigned long, width, DRM_SHADOW_PLANE_MAX_WIDTH);
max_height = max_t(unsigned long, height, DRM_SHADOW_PLANE_MAX_HEIGHT);
dev->mode_config.min_width = width;
dev->mode_config.max_width = max_width;
dev->mode_config.min_height = height;
dev->mode_config.max_height = max_height;
dev->mode_config.funcs = &ofdrm_mode_config_funcs;
dev->mode_config.preferred_depth = format->depth;
dev->mode_config.quirk_addfb_prefer_host_byte_order = true;
/* Primary plane */
nformats = drm_fb_build_fourcc_list(dev, &format->format, 1,
odev->formats, ARRAY_SIZE(odev->formats));
primary_plane = &odev->primary_plane;
ret = drm_universal_plane_init(dev, primary_plane, 0, &ofdrm_primary_plane_funcs,
odev->formats, nformats,
ofdrm_primary_plane_format_modifiers,
DRM_PLANE_TYPE_PRIMARY, NULL);
if (ret)
return ERR_PTR(ret);
drm_plane_helper_add(primary_plane, &ofdrm_primary_plane_helper_funcs);
drm_plane_enable_fb_damage_clips(primary_plane);
/* CRTC */
crtc = &odev->crtc;
ret = drm_crtc_init_with_planes(dev, crtc, primary_plane, NULL,
&ofdrm_crtc_funcs, NULL);
if (ret)
return ERR_PTR(ret);
drm_crtc_helper_add(crtc, &ofdrm_crtc_helper_funcs);
if (odev->cmap_base) {
drm_mode_crtc_set_gamma_size(crtc, OFDRM_GAMMA_LUT_SIZE);
drm_crtc_enable_color_mgmt(crtc, 0, false, OFDRM_GAMMA_LUT_SIZE);
}
/* Encoder */
encoder = &odev->encoder;
ret = drm_simple_encoder_init(dev, encoder, DRM_MODE_ENCODER_NONE);
if (ret)
return ERR_PTR(ret);
encoder->possible_crtcs = drm_crtc_mask(crtc);
/* Connector */
connector = &odev->connector;
ret = drm_connector_init(dev, connector, &ofdrm_connector_funcs,
DRM_MODE_CONNECTOR_Unknown);
if (ret)
return ERR_PTR(ret);
drm_connector_helper_add(connector, &ofdrm_connector_helper_funcs);
drm_connector_set_panel_orientation_with_quirk(connector,
DRM_MODE_PANEL_ORIENTATION_UNKNOWN,
width, height);
ret = drm_connector_attach_encoder(connector, encoder);
if (ret)
return ERR_PTR(ret);
drm_mode_config_reset(dev);
return odev;
}
/*
* DRM driver
*/
DEFINE_DRM_GEM_FOPS(ofdrm_fops);
static struct drm_driver ofdrm_driver = {
DRM_GEM_SHMEM_DRIVER_OPS,
.name = DRIVER_NAME,
.desc = DRIVER_DESC,
.date = DRIVER_DATE,
.major = DRIVER_MAJOR,
.minor = DRIVER_MINOR,
.driver_features = DRIVER_ATOMIC | DRIVER_GEM | DRIVER_MODESET,
.fops = &ofdrm_fops,
};
/*
* Platform driver
*/
static int ofdrm_probe(struct platform_device *pdev)
{
struct ofdrm_device *odev;
struct drm_device *dev;
unsigned int color_mode;
int ret;
odev = ofdrm_device_create(&ofdrm_driver, pdev);
if (IS_ERR(odev))
return PTR_ERR(odev);
dev = &odev->dev;
ret = drm_dev_register(dev, 0);
if (ret)
return ret;
color_mode = drm_format_info_bpp(odev->format, 0);
if (color_mode == 16)
color_mode = odev->format->depth; // can be 15 or 16
drm_fbdev_generic_setup(dev, color_mode);
return 0;
}
static void ofdrm_remove(struct platform_device *pdev)
{
struct drm_device *dev = platform_get_drvdata(pdev);
drm_dev_unplug(dev);
}
static const struct of_device_id ofdrm_of_match_display[] = {
{ .compatible = "display", },
{ },
};
MODULE_DEVICE_TABLE(of, ofdrm_of_match_display);
static struct platform_driver ofdrm_platform_driver = {
.driver = {
.name = "of-display",
.of_match_table = ofdrm_of_match_display,
},
.probe = ofdrm_probe,
.remove_new = ofdrm_remove,
};
module_platform_driver(ofdrm_platform_driver);
MODULE_DESCRIPTION(DRIVER_DESC);
MODULE_LICENSE("GPL");
| linux-master | drivers/gpu/drm/tiny/ofdrm.c |
// SPDX-License-Identifier: GPL-2.0+
#include <linux/backlight.h>
#include <linux/delay.h>
#include <linux/gpio/consumer.h>
#include <linux/module.h>
#include <linux/property.h>
#include <linux/spi/spi.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_drv.h>
#include <drm/drm_fbdev_generic.h>
#include <drm/drm_gem_atomic_helper.h>
#include <drm/drm_gem_dma_helper.h>
#include <drm/drm_mipi_dbi.h>
#include <drm/drm_modeset_helper.h>
#include <video/mipi_display.h>
#define ILI9163_FRMCTR1 0xb1
#define ILI9163_PWCTRL1 0xc0
#define ILI9163_PWCTRL2 0xc1
#define ILI9163_VMCTRL1 0xc5
#define ILI9163_VMCTRL2 0xc7
#define ILI9163_PWCTRLA 0xcb
#define ILI9163_PWCTRLB 0xcf
#define ILI9163_EN3GAM 0xf2
#define ILI9163_MADCTL_BGR BIT(3)
#define ILI9163_MADCTL_MV BIT(5)
#define ILI9163_MADCTL_MX BIT(6)
#define ILI9163_MADCTL_MY BIT(7)
static void yx240qv29_enable(struct drm_simple_display_pipe *pipe,
struct drm_crtc_state *crtc_state,
struct drm_plane_state *plane_state)
{
struct mipi_dbi_dev *dbidev = drm_to_mipi_dbi_dev(pipe->crtc.dev);
struct mipi_dbi *dbi = &dbidev->dbi;
u8 addr_mode;
int ret, idx;
if (!drm_dev_enter(pipe->crtc.dev, &idx))
return;
DRM_DEBUG_KMS("\n");
ret = mipi_dbi_poweron_conditional_reset(dbidev);
if (ret < 0)
goto out_exit;
if (ret == 1)
goto out_enable;
/* Gamma */
mipi_dbi_command(dbi, MIPI_DCS_SET_GAMMA_CURVE, 0x04);
mipi_dbi_command(dbi, ILI9163_EN3GAM, 0x00);
/* Frame Rate */
mipi_dbi_command(dbi, ILI9163_FRMCTR1, 0x0a, 0x14);
/* Power Control */
mipi_dbi_command(dbi, ILI9163_PWCTRL1, 0x0a, 0x00);
mipi_dbi_command(dbi, ILI9163_PWCTRL2, 0x02);
/* VCOM */
mipi_dbi_command(dbi, ILI9163_VMCTRL1, 0x2f, 0x3e);
mipi_dbi_command(dbi, ILI9163_VMCTRL2, 0x40);
/* Memory Access Control */
mipi_dbi_command(dbi, MIPI_DCS_SET_PIXEL_FORMAT, MIPI_DCS_PIXEL_FMT_16BIT);
mipi_dbi_command(dbi, MIPI_DCS_EXIT_SLEEP_MODE);
msleep(100);
mipi_dbi_command(dbi, MIPI_DCS_SET_DISPLAY_ON);
msleep(100);
out_enable:
switch (dbidev->rotation) {
default:
addr_mode = ILI9163_MADCTL_MX | ILI9163_MADCTL_MY;
break;
case 90:
addr_mode = ILI9163_MADCTL_MX | ILI9163_MADCTL_MV;
break;
case 180:
addr_mode = 0;
break;
case 270:
addr_mode = ILI9163_MADCTL_MY | ILI9163_MADCTL_MV;
break;
}
addr_mode |= ILI9163_MADCTL_BGR;
mipi_dbi_command(dbi, MIPI_DCS_SET_ADDRESS_MODE, addr_mode);
mipi_dbi_enable_flush(dbidev, crtc_state, plane_state);
out_exit:
drm_dev_exit(idx);
}
static const struct drm_simple_display_pipe_funcs ili9163_pipe_funcs = {
DRM_MIPI_DBI_SIMPLE_DISPLAY_PIPE_FUNCS(yx240qv29_enable),
};
static const struct drm_display_mode yx240qv29_mode = {
DRM_SIMPLE_MODE(128, 160, 28, 35),
};
DEFINE_DRM_GEM_DMA_FOPS(ili9163_fops);
static struct drm_driver ili9163_driver = {
.driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC,
.fops = &ili9163_fops,
DRM_GEM_DMA_DRIVER_OPS_VMAP,
.debugfs_init = mipi_dbi_debugfs_init,
.name = "ili9163",
.desc = "Ilitek ILI9163",
.date = "20210208",
.major = 1,
.minor = 0,
};
static const struct of_device_id ili9163_of_match[] = {
{ .compatible = "newhaven,1.8-128160EF" },
{ }
};
MODULE_DEVICE_TABLE(of, ili9163_of_match);
static const struct spi_device_id ili9163_id[] = {
{ "nhd-1.8-128160EF", 0 },
{ }
};
MODULE_DEVICE_TABLE(spi, ili9163_id);
static int ili9163_probe(struct spi_device *spi)
{
struct device *dev = &spi->dev;
struct mipi_dbi_dev *dbidev;
struct drm_device *drm;
struct mipi_dbi *dbi;
struct gpio_desc *dc;
u32 rotation = 0;
int ret;
dbidev = devm_drm_dev_alloc(dev, &ili9163_driver,
struct mipi_dbi_dev, drm);
if (IS_ERR(dbidev))
return PTR_ERR(dbidev);
dbi = &dbidev->dbi;
drm = &dbidev->drm;
spi_set_drvdata(spi, drm);
dbi->reset = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_HIGH);
if (IS_ERR(dbi->reset)) {
DRM_DEV_ERROR(dev, "Failed to get gpio 'reset'\n");
return PTR_ERR(dbi->reset);
}
dc = devm_gpiod_get_optional(dev, "dc", GPIOD_OUT_LOW);
if (IS_ERR(dc)) {
DRM_DEV_ERROR(dev, "Failed to get gpio 'dc'\n");
return PTR_ERR(dc);
}
dbidev->backlight = devm_of_find_backlight(dev);
if (IS_ERR(dbidev->backlight))
return PTR_ERR(dbidev->backlight);
device_property_read_u32(dev, "rotation", &rotation);
ret = mipi_dbi_spi_init(spi, dbi, dc);
if (ret)
return ret;
ret = mipi_dbi_dev_init(dbidev, &ili9163_pipe_funcs, &yx240qv29_mode, rotation);
if (ret)
return ret;
drm_mode_config_reset(drm);
ret = drm_dev_register(drm, 0);
if (ret)
return ret;
drm_fbdev_generic_setup(drm, 0);
return 0;
}
static void ili9163_remove(struct spi_device *spi)
{
struct drm_device *drm = spi_get_drvdata(spi);
drm_dev_unplug(drm);
drm_atomic_helper_shutdown(drm);
}
static void ili9163_shutdown(struct spi_device *spi)
{
drm_atomic_helper_shutdown(spi_get_drvdata(spi));
}
static struct spi_driver ili9163_spi_driver = {
.driver = {
.name = "ili9163",
.of_match_table = ili9163_of_match,
},
.id_table = ili9163_id,
.probe = ili9163_probe,
.remove = ili9163_remove,
.shutdown = ili9163_shutdown,
};
module_spi_driver(ili9163_spi_driver);
MODULE_DESCRIPTION("Ilitek ILI9163 DRM driver");
MODULE_AUTHOR("Daniel Mack <[email protected]>");
MODULE_LICENSE("GPL");
| linux-master | drivers/gpu/drm/tiny/ili9163.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* Copyright 2019 Hans de Goede <[email protected]>
*/
#include <linux/module.h>
#include <linux/pm.h>
#include <linux/usb.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_atomic_state_helper.h>
#include <drm/drm_connector.h>
#include <drm/drm_damage_helper.h>
#include <drm/drm_drv.h>
#include <drm/drm_edid.h>
#include <drm/drm_fbdev_generic.h>
#include <drm/drm_file.h>
#include <drm/drm_format_helper.h>
#include <drm/drm_fourcc.h>
#include <drm/drm_framebuffer.h>
#include <drm/drm_gem_atomic_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_gem_shmem_helper.h>
#include <drm/drm_ioctl.h>
#include <drm/drm_managed.h>
#include <drm/drm_modeset_helper_vtables.h>
#include <drm/drm_probe_helper.h>
#include <drm/drm_simple_kms_helper.h>
static bool eco_mode;
module_param(eco_mode, bool, 0644);
MODULE_PARM_DESC(eco_mode, "Turn on Eco mode (less bright, more silent)");
#define DRIVER_NAME "gm12u320"
#define DRIVER_DESC "Grain Media GM12U320 USB projector display"
#define DRIVER_DATE "2019"
#define DRIVER_MAJOR 1
#define DRIVER_MINOR 0
/*
* The DLP has an actual width of 854 pixels, but that is not a multiple
* of 8, breaking things left and right, so we export a width of 848.
*/
#define GM12U320_USER_WIDTH 848
#define GM12U320_REAL_WIDTH 854
#define GM12U320_HEIGHT 480
#define GM12U320_BLOCK_COUNT 20
#define GM12U320_ERR(fmt, ...) \
DRM_DEV_ERROR(gm12u320->dev.dev, fmt, ##__VA_ARGS__)
#define MISC_RCV_EPT 1
#define DATA_RCV_EPT 2
#define DATA_SND_EPT 3
#define MISC_SND_EPT 4
#define DATA_BLOCK_HEADER_SIZE 84
#define DATA_BLOCK_CONTENT_SIZE 64512
#define DATA_BLOCK_FOOTER_SIZE 20
#define DATA_BLOCK_SIZE (DATA_BLOCK_HEADER_SIZE + \
DATA_BLOCK_CONTENT_SIZE + \
DATA_BLOCK_FOOTER_SIZE)
#define DATA_LAST_BLOCK_CONTENT_SIZE 4032
#define DATA_LAST_BLOCK_SIZE (DATA_BLOCK_HEADER_SIZE + \
DATA_LAST_BLOCK_CONTENT_SIZE + \
DATA_BLOCK_FOOTER_SIZE)
#define CMD_SIZE 31
#define READ_STATUS_SIZE 13
#define MISC_VALUE_SIZE 4
#define CMD_TIMEOUT 200
#define DATA_TIMEOUT 1000
#define IDLE_TIMEOUT 2000
#define FIRST_FRAME_TIMEOUT 2000
#define MISC_REQ_GET_SET_ECO_A 0xff
#define MISC_REQ_GET_SET_ECO_B 0x35
/* Windows driver does once every second, with arg d = 1, other args 0 */
#define MISC_REQ_UNKNOWN1_A 0xff
#define MISC_REQ_UNKNOWN1_B 0x38
/* Windows driver does this on init, with arg a, b = 0, c = 0xa0, d = 4 */
#define MISC_REQ_UNKNOWN2_A 0xa5
#define MISC_REQ_UNKNOWN2_B 0x00
struct gm12u320_device {
struct drm_device dev;
struct device *dmadev;
struct drm_simple_display_pipe pipe;
struct drm_connector conn;
unsigned char *cmd_buf;
unsigned char *data_buf[GM12U320_BLOCK_COUNT];
struct {
struct delayed_work work;
struct mutex lock;
struct drm_framebuffer *fb;
struct drm_rect rect;
int frame;
int draw_status_timeout;
struct iosys_map src_map;
} fb_update;
};
#define to_gm12u320(__dev) container_of(__dev, struct gm12u320_device, dev)
static const char cmd_data[CMD_SIZE] = {
0x55, 0x53, 0x42, 0x43, 0x00, 0x00, 0x00, 0x00,
0x68, 0xfc, 0x00, 0x00, 0x00, 0x00, 0x10, 0xff,
0x00, 0x00, 0x00, 0x00, 0xfc, 0x00, 0x80, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00
};
static const char cmd_draw[CMD_SIZE] = {
0x55, 0x53, 0x42, 0x43, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x10, 0xfe,
0x00, 0x00, 0x00, 0xc0, 0xd1, 0x05, 0x00, 0x40,
0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00
};
static const char cmd_misc[CMD_SIZE] = {
0x55, 0x53, 0x42, 0x43, 0x00, 0x00, 0x00, 0x00,
0x04, 0x00, 0x00, 0x00, 0x80, 0x01, 0x10, 0xfd,
0x00, 0x00, 0x00, 0xc0, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00
};
static const char data_block_header[DATA_BLOCK_HEADER_SIZE] = {
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0xfb, 0x14, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x04, 0x15, 0x00, 0x00, 0xfc, 0x00, 0x00,
0x01, 0x00, 0x00, 0xdb
};
static const char data_last_block_header[DATA_BLOCK_HEADER_SIZE] = {
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0xfb, 0x14, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x2a, 0x00, 0x20, 0x00, 0xc0, 0x0f, 0x00, 0x00,
0x01, 0x00, 0x00, 0xd7
};
static const char data_block_footer[DATA_BLOCK_FOOTER_SIZE] = {
0xfb, 0x14, 0x02, 0x20, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x80, 0x00, 0x00, 0x4f
};
static inline struct usb_device *gm12u320_to_usb_device(struct gm12u320_device *gm12u320)
{
return interface_to_usbdev(to_usb_interface(gm12u320->dev.dev));
}
static int gm12u320_usb_alloc(struct gm12u320_device *gm12u320)
{
int i, block_size;
const char *hdr;
gm12u320->cmd_buf = drmm_kmalloc(&gm12u320->dev, CMD_SIZE, GFP_KERNEL);
if (!gm12u320->cmd_buf)
return -ENOMEM;
for (i = 0; i < GM12U320_BLOCK_COUNT; i++) {
if (i == GM12U320_BLOCK_COUNT - 1) {
block_size = DATA_LAST_BLOCK_SIZE;
hdr = data_last_block_header;
} else {
block_size = DATA_BLOCK_SIZE;
hdr = data_block_header;
}
gm12u320->data_buf[i] = drmm_kzalloc(&gm12u320->dev,
block_size, GFP_KERNEL);
if (!gm12u320->data_buf[i])
return -ENOMEM;
memcpy(gm12u320->data_buf[i], hdr, DATA_BLOCK_HEADER_SIZE);
memcpy(gm12u320->data_buf[i] +
(block_size - DATA_BLOCK_FOOTER_SIZE),
data_block_footer, DATA_BLOCK_FOOTER_SIZE);
}
return 0;
}
static int gm12u320_misc_request(struct gm12u320_device *gm12u320,
u8 req_a, u8 req_b,
u8 arg_a, u8 arg_b, u8 arg_c, u8 arg_d)
{
struct usb_device *udev = gm12u320_to_usb_device(gm12u320);
int ret, len;
memcpy(gm12u320->cmd_buf, &cmd_misc, CMD_SIZE);
gm12u320->cmd_buf[20] = req_a;
gm12u320->cmd_buf[21] = req_b;
gm12u320->cmd_buf[22] = arg_a;
gm12u320->cmd_buf[23] = arg_b;
gm12u320->cmd_buf[24] = arg_c;
gm12u320->cmd_buf[25] = arg_d;
/* Send request */
ret = usb_bulk_msg(udev, usb_sndbulkpipe(udev, MISC_SND_EPT),
gm12u320->cmd_buf, CMD_SIZE, &len, CMD_TIMEOUT);
if (ret || len != CMD_SIZE) {
GM12U320_ERR("Misc. req. error %d\n", ret);
return -EIO;
}
/* Read value */
ret = usb_bulk_msg(udev, usb_rcvbulkpipe(udev, MISC_RCV_EPT),
gm12u320->cmd_buf, MISC_VALUE_SIZE, &len,
DATA_TIMEOUT);
if (ret || len != MISC_VALUE_SIZE) {
GM12U320_ERR("Misc. value error %d\n", ret);
return -EIO;
}
/* cmd_buf[0] now contains the read value, which we don't use */
/* Read status */
ret = usb_bulk_msg(udev, usb_rcvbulkpipe(udev, MISC_RCV_EPT),
gm12u320->cmd_buf, READ_STATUS_SIZE, &len,
CMD_TIMEOUT);
if (ret || len != READ_STATUS_SIZE) {
GM12U320_ERR("Misc. status error %d\n", ret);
return -EIO;
}
return 0;
}
static void gm12u320_32bpp_to_24bpp_packed(u8 *dst, u8 *src, int len)
{
while (len--) {
*dst++ = *src++;
*dst++ = *src++;
*dst++ = *src++;
src++;
}
}
static void gm12u320_copy_fb_to_blocks(struct gm12u320_device *gm12u320)
{
int block, dst_offset, len, remain, ret, x1, x2, y1, y2;
struct drm_framebuffer *fb;
void *vaddr;
u8 *src;
mutex_lock(&gm12u320->fb_update.lock);
if (!gm12u320->fb_update.fb)
goto unlock;
fb = gm12u320->fb_update.fb;
x1 = gm12u320->fb_update.rect.x1;
x2 = gm12u320->fb_update.rect.x2;
y1 = gm12u320->fb_update.rect.y1;
y2 = gm12u320->fb_update.rect.y2;
vaddr = gm12u320->fb_update.src_map.vaddr; /* TODO: Use mapping abstraction properly */
ret = drm_gem_fb_begin_cpu_access(fb, DMA_FROM_DEVICE);
if (ret) {
GM12U320_ERR("drm_gem_fb_begin_cpu_access err: %d\n", ret);
goto put_fb;
}
src = vaddr + y1 * fb->pitches[0] + x1 * 4;
x1 += (GM12U320_REAL_WIDTH - GM12U320_USER_WIDTH) / 2;
x2 += (GM12U320_REAL_WIDTH - GM12U320_USER_WIDTH) / 2;
for (; y1 < y2; y1++) {
remain = 0;
len = (x2 - x1) * 3;
dst_offset = (y1 * GM12U320_REAL_WIDTH + x1) * 3;
block = dst_offset / DATA_BLOCK_CONTENT_SIZE;
dst_offset %= DATA_BLOCK_CONTENT_SIZE;
if ((dst_offset + len) > DATA_BLOCK_CONTENT_SIZE) {
remain = dst_offset + len - DATA_BLOCK_CONTENT_SIZE;
len = DATA_BLOCK_CONTENT_SIZE - dst_offset;
}
dst_offset += DATA_BLOCK_HEADER_SIZE;
len /= 3;
gm12u320_32bpp_to_24bpp_packed(
gm12u320->data_buf[block] + dst_offset,
src, len);
if (remain) {
block++;
dst_offset = DATA_BLOCK_HEADER_SIZE;
gm12u320_32bpp_to_24bpp_packed(
gm12u320->data_buf[block] + dst_offset,
src + len * 4, remain / 3);
}
src += fb->pitches[0];
}
drm_gem_fb_end_cpu_access(fb, DMA_FROM_DEVICE);
put_fb:
drm_framebuffer_put(fb);
gm12u320->fb_update.fb = NULL;
unlock:
mutex_unlock(&gm12u320->fb_update.lock);
}
static void gm12u320_fb_update_work(struct work_struct *work)
{
struct gm12u320_device *gm12u320 =
container_of(to_delayed_work(work), struct gm12u320_device,
fb_update.work);
struct usb_device *udev = gm12u320_to_usb_device(gm12u320);
int block, block_size, len;
int ret = 0;
gm12u320_copy_fb_to_blocks(gm12u320);
for (block = 0; block < GM12U320_BLOCK_COUNT; block++) {
if (block == GM12U320_BLOCK_COUNT - 1)
block_size = DATA_LAST_BLOCK_SIZE;
else
block_size = DATA_BLOCK_SIZE;
/* Send data command to device */
memcpy(gm12u320->cmd_buf, cmd_data, CMD_SIZE);
gm12u320->cmd_buf[8] = block_size & 0xff;
gm12u320->cmd_buf[9] = block_size >> 8;
gm12u320->cmd_buf[20] = 0xfc - block * 4;
gm12u320->cmd_buf[21] =
block | (gm12u320->fb_update.frame << 7);
ret = usb_bulk_msg(udev,
usb_sndbulkpipe(udev, DATA_SND_EPT),
gm12u320->cmd_buf, CMD_SIZE, &len,
CMD_TIMEOUT);
if (ret || len != CMD_SIZE)
goto err;
/* Send data block to device */
ret = usb_bulk_msg(udev,
usb_sndbulkpipe(udev, DATA_SND_EPT),
gm12u320->data_buf[block], block_size,
&len, DATA_TIMEOUT);
if (ret || len != block_size)
goto err;
/* Read status */
ret = usb_bulk_msg(udev,
usb_rcvbulkpipe(udev, DATA_RCV_EPT),
gm12u320->cmd_buf, READ_STATUS_SIZE, &len,
CMD_TIMEOUT);
if (ret || len != READ_STATUS_SIZE)
goto err;
}
/* Send draw command to device */
memcpy(gm12u320->cmd_buf, cmd_draw, CMD_SIZE);
ret = usb_bulk_msg(udev, usb_sndbulkpipe(udev, DATA_SND_EPT),
gm12u320->cmd_buf, CMD_SIZE, &len, CMD_TIMEOUT);
if (ret || len != CMD_SIZE)
goto err;
/* Read status */
ret = usb_bulk_msg(udev, usb_rcvbulkpipe(udev, DATA_RCV_EPT),
gm12u320->cmd_buf, READ_STATUS_SIZE, &len,
gm12u320->fb_update.draw_status_timeout);
if (ret || len != READ_STATUS_SIZE)
goto err;
gm12u320->fb_update.draw_status_timeout = CMD_TIMEOUT;
gm12u320->fb_update.frame = !gm12u320->fb_update.frame;
/*
* We must draw a frame every 2s otherwise the projector
* switches back to showing its logo.
*/
queue_delayed_work(system_long_wq, &gm12u320->fb_update.work,
msecs_to_jiffies(IDLE_TIMEOUT));
return;
err:
/* Do not log errors caused by module unload or device unplug */
if (ret != -ENODEV && ret != -ECONNRESET && ret != -ESHUTDOWN)
GM12U320_ERR("Frame update error: %d\n", ret);
}
static void gm12u320_fb_mark_dirty(struct drm_framebuffer *fb,
const struct iosys_map *map,
struct drm_rect *dirty)
{
struct gm12u320_device *gm12u320 = to_gm12u320(fb->dev);
struct drm_framebuffer *old_fb = NULL;
bool wakeup = false;
mutex_lock(&gm12u320->fb_update.lock);
if (gm12u320->fb_update.fb != fb) {
old_fb = gm12u320->fb_update.fb;
drm_framebuffer_get(fb);
gm12u320->fb_update.fb = fb;
gm12u320->fb_update.rect = *dirty;
gm12u320->fb_update.src_map = *map;
wakeup = true;
} else {
struct drm_rect *rect = &gm12u320->fb_update.rect;
rect->x1 = min(rect->x1, dirty->x1);
rect->y1 = min(rect->y1, dirty->y1);
rect->x2 = max(rect->x2, dirty->x2);
rect->y2 = max(rect->y2, dirty->y2);
}
mutex_unlock(&gm12u320->fb_update.lock);
if (wakeup)
mod_delayed_work(system_long_wq, &gm12u320->fb_update.work, 0);
if (old_fb)
drm_framebuffer_put(old_fb);
}
static void gm12u320_stop_fb_update(struct gm12u320_device *gm12u320)
{
struct drm_framebuffer *old_fb;
cancel_delayed_work_sync(&gm12u320->fb_update.work);
mutex_lock(&gm12u320->fb_update.lock);
old_fb = gm12u320->fb_update.fb;
gm12u320->fb_update.fb = NULL;
iosys_map_clear(&gm12u320->fb_update.src_map);
mutex_unlock(&gm12u320->fb_update.lock);
drm_framebuffer_put(old_fb);
}
static int gm12u320_set_ecomode(struct gm12u320_device *gm12u320)
{
return gm12u320_misc_request(gm12u320, MISC_REQ_GET_SET_ECO_A,
MISC_REQ_GET_SET_ECO_B, 0x01 /* set */,
eco_mode ? 0x01 : 0x00, 0x00, 0x01);
}
/* ------------------------------------------------------------------ */
/* gm12u320 connector */
/*
* We use fake EDID info so that userspace know that it is dealing with
* an Acer projector, rather then listing this as an "unknown" monitor.
* Note this assumes this driver is only ever used with the Acer C120, if we
* add support for other devices the vendor and model should be parameterized.
*/
static struct edid gm12u320_edid = {
.header = { 0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00 },
.mfg_id = { 0x04, 0x72 }, /* "ACR" */
.prod_code = { 0x20, 0xc1 }, /* C120h */
.serial = 0xaa55aa55,
.mfg_week = 1,
.mfg_year = 16,
.version = 1, /* EDID 1.3 */
.revision = 3, /* EDID 1.3 */
.input = 0x08, /* Analog input */
.features = 0x0a, /* Pref timing in DTD 1 */
.standard_timings = { { 1, 1 }, { 1, 1 }, { 1, 1 }, { 1, 1 },
{ 1, 1 }, { 1, 1 }, { 1, 1 }, { 1, 1 } },
.detailed_timings = { {
.pixel_clock = 3383,
/* hactive = 848, hblank = 256 */
.data.pixel_data.hactive_lo = 0x50,
.data.pixel_data.hblank_lo = 0x00,
.data.pixel_data.hactive_hblank_hi = 0x31,
/* vactive = 480, vblank = 28 */
.data.pixel_data.vactive_lo = 0xe0,
.data.pixel_data.vblank_lo = 0x1c,
.data.pixel_data.vactive_vblank_hi = 0x10,
/* hsync offset 40 pw 128, vsync offset 1 pw 4 */
.data.pixel_data.hsync_offset_lo = 0x28,
.data.pixel_data.hsync_pulse_width_lo = 0x80,
.data.pixel_data.vsync_offset_pulse_width_lo = 0x14,
.data.pixel_data.hsync_vsync_offset_pulse_width_hi = 0x00,
/* Digital separate syncs, hsync+, vsync+ */
.data.pixel_data.misc = 0x1e,
}, {
.pixel_clock = 0,
.data.other_data.type = 0xfd, /* Monitor ranges */
.data.other_data.data.range.min_vfreq = 59,
.data.other_data.data.range.max_vfreq = 61,
.data.other_data.data.range.min_hfreq_khz = 29,
.data.other_data.data.range.max_hfreq_khz = 32,
.data.other_data.data.range.pixel_clock_mhz = 4, /* 40 MHz */
.data.other_data.data.range.flags = 0,
.data.other_data.data.range.formula.cvt = {
0xa0, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20 },
}, {
.pixel_clock = 0,
.data.other_data.type = 0xfc, /* Model string */
.data.other_data.data.str.str = {
'P', 'r', 'o', 'j', 'e', 'c', 't', 'o', 'r', '\n',
' ', ' ', ' ' },
}, {
.pixel_clock = 0,
.data.other_data.type = 0xfe, /* Unspecified text / padding */
.data.other_data.data.str.str = {
'\n', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ',
' ', ' ', ' ' },
} },
.checksum = 0x13,
};
static int gm12u320_conn_get_modes(struct drm_connector *connector)
{
drm_connector_update_edid_property(connector, &gm12u320_edid);
return drm_add_edid_modes(connector, &gm12u320_edid);
}
static const struct drm_connector_helper_funcs gm12u320_conn_helper_funcs = {
.get_modes = gm12u320_conn_get_modes,
};
static const struct drm_connector_funcs gm12u320_conn_funcs = {
.fill_modes = drm_helper_probe_single_connector_modes,
.destroy = drm_connector_cleanup,
.reset = drm_atomic_helper_connector_reset,
.atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
};
static int gm12u320_conn_init(struct gm12u320_device *gm12u320)
{
drm_connector_helper_add(&gm12u320->conn, &gm12u320_conn_helper_funcs);
return drm_connector_init(&gm12u320->dev, &gm12u320->conn,
&gm12u320_conn_funcs, DRM_MODE_CONNECTOR_VGA);
}
/* ------------------------------------------------------------------ */
/* gm12u320 (simple) display pipe */
static void gm12u320_pipe_enable(struct drm_simple_display_pipe *pipe,
struct drm_crtc_state *crtc_state,
struct drm_plane_state *plane_state)
{
struct drm_rect rect = { 0, 0, GM12U320_USER_WIDTH, GM12U320_HEIGHT };
struct gm12u320_device *gm12u320 = to_gm12u320(pipe->crtc.dev);
struct drm_shadow_plane_state *shadow_plane_state = to_drm_shadow_plane_state(plane_state);
gm12u320->fb_update.draw_status_timeout = FIRST_FRAME_TIMEOUT;
gm12u320_fb_mark_dirty(plane_state->fb, &shadow_plane_state->data[0], &rect);
}
static void gm12u320_pipe_disable(struct drm_simple_display_pipe *pipe)
{
struct gm12u320_device *gm12u320 = to_gm12u320(pipe->crtc.dev);
gm12u320_stop_fb_update(gm12u320);
}
static void gm12u320_pipe_update(struct drm_simple_display_pipe *pipe,
struct drm_plane_state *old_state)
{
struct drm_plane_state *state = pipe->plane.state;
struct drm_shadow_plane_state *shadow_plane_state = to_drm_shadow_plane_state(state);
struct drm_rect rect;
if (drm_atomic_helper_damage_merged(old_state, state, &rect))
gm12u320_fb_mark_dirty(state->fb, &shadow_plane_state->data[0], &rect);
}
static const struct drm_simple_display_pipe_funcs gm12u320_pipe_funcs = {
.enable = gm12u320_pipe_enable,
.disable = gm12u320_pipe_disable,
.update = gm12u320_pipe_update,
DRM_GEM_SIMPLE_DISPLAY_PIPE_SHADOW_PLANE_FUNCS,
};
static const uint32_t gm12u320_pipe_formats[] = {
DRM_FORMAT_XRGB8888,
};
static const uint64_t gm12u320_pipe_modifiers[] = {
DRM_FORMAT_MOD_LINEAR,
DRM_FORMAT_MOD_INVALID
};
/*
* FIXME: Dma-buf sharing requires DMA support by the importing device.
* This function is a workaround to make USB devices work as well.
* See todo.rst for how to fix the issue in the dma-buf framework.
*/
static struct drm_gem_object *gm12u320_gem_prime_import(struct drm_device *dev,
struct dma_buf *dma_buf)
{
struct gm12u320_device *gm12u320 = to_gm12u320(dev);
if (!gm12u320->dmadev)
return ERR_PTR(-ENODEV);
return drm_gem_prime_import_dev(dev, dma_buf, gm12u320->dmadev);
}
DEFINE_DRM_GEM_FOPS(gm12u320_fops);
static const struct drm_driver gm12u320_drm_driver = {
.driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_ATOMIC,
.name = DRIVER_NAME,
.desc = DRIVER_DESC,
.date = DRIVER_DATE,
.major = DRIVER_MAJOR,
.minor = DRIVER_MINOR,
.fops = &gm12u320_fops,
DRM_GEM_SHMEM_DRIVER_OPS,
.gem_prime_import = gm12u320_gem_prime_import,
};
static const struct drm_mode_config_funcs gm12u320_mode_config_funcs = {
.fb_create = drm_gem_fb_create_with_dirty,
.atomic_check = drm_atomic_helper_check,
.atomic_commit = drm_atomic_helper_commit,
};
static int gm12u320_usb_probe(struct usb_interface *interface,
const struct usb_device_id *id)
{
struct gm12u320_device *gm12u320;
struct drm_device *dev;
int ret;
/*
* The gm12u320 presents itself to the system as 2 usb mass-storage
* interfaces, we only care about / need the first one.
*/
if (interface->cur_altsetting->desc.bInterfaceNumber != 0)
return -ENODEV;
gm12u320 = devm_drm_dev_alloc(&interface->dev, &gm12u320_drm_driver,
struct gm12u320_device, dev);
if (IS_ERR(gm12u320))
return PTR_ERR(gm12u320);
dev = &gm12u320->dev;
gm12u320->dmadev = usb_intf_get_dma_device(to_usb_interface(dev->dev));
if (!gm12u320->dmadev)
drm_warn(dev, "buffer sharing not supported"); /* not an error */
INIT_DELAYED_WORK(&gm12u320->fb_update.work, gm12u320_fb_update_work);
mutex_init(&gm12u320->fb_update.lock);
ret = drmm_mode_config_init(dev);
if (ret)
goto err_put_device;
dev->mode_config.min_width = GM12U320_USER_WIDTH;
dev->mode_config.max_width = GM12U320_USER_WIDTH;
dev->mode_config.min_height = GM12U320_HEIGHT;
dev->mode_config.max_height = GM12U320_HEIGHT;
dev->mode_config.funcs = &gm12u320_mode_config_funcs;
ret = gm12u320_usb_alloc(gm12u320);
if (ret)
goto err_put_device;
ret = gm12u320_set_ecomode(gm12u320);
if (ret)
goto err_put_device;
ret = gm12u320_conn_init(gm12u320);
if (ret)
goto err_put_device;
ret = drm_simple_display_pipe_init(&gm12u320->dev,
&gm12u320->pipe,
&gm12u320_pipe_funcs,
gm12u320_pipe_formats,
ARRAY_SIZE(gm12u320_pipe_formats),
gm12u320_pipe_modifiers,
&gm12u320->conn);
if (ret)
goto err_put_device;
drm_mode_config_reset(dev);
usb_set_intfdata(interface, dev);
ret = drm_dev_register(dev, 0);
if (ret)
goto err_put_device;
drm_fbdev_generic_setup(dev, 0);
return 0;
err_put_device:
put_device(gm12u320->dmadev);
return ret;
}
static void gm12u320_usb_disconnect(struct usb_interface *interface)
{
struct drm_device *dev = usb_get_intfdata(interface);
struct gm12u320_device *gm12u320 = to_gm12u320(dev);
put_device(gm12u320->dmadev);
gm12u320->dmadev = NULL;
drm_dev_unplug(dev);
drm_atomic_helper_shutdown(dev);
}
static int gm12u320_suspend(struct usb_interface *interface,
pm_message_t message)
{
struct drm_device *dev = usb_get_intfdata(interface);
return drm_mode_config_helper_suspend(dev);
}
static int gm12u320_resume(struct usb_interface *interface)
{
struct drm_device *dev = usb_get_intfdata(interface);
struct gm12u320_device *gm12u320 = to_gm12u320(dev);
gm12u320_set_ecomode(gm12u320);
return drm_mode_config_helper_resume(dev);
}
static const struct usb_device_id id_table[] = {
{ USB_DEVICE(0x1de1, 0xc102) },
{},
};
MODULE_DEVICE_TABLE(usb, id_table);
static struct usb_driver gm12u320_usb_driver = {
.name = "gm12u320",
.probe = gm12u320_usb_probe,
.disconnect = gm12u320_usb_disconnect,
.id_table = id_table,
.suspend = pm_ptr(gm12u320_suspend),
.resume = pm_ptr(gm12u320_resume),
.reset_resume = pm_ptr(gm12u320_resume),
};
module_usb_driver(gm12u320_usb_driver);
MODULE_AUTHOR("Hans de Goede <[email protected]>");
MODULE_LICENSE("GPL");
| linux-master | drivers/gpu/drm/tiny/gm12u320.c |
// SPDX-License-Identifier: GPL-2.0
/*
* DRM driver for MIPI DBI compatible display panels
*
* Copyright 2022 Noralf Trønnes
*/
#include <linux/backlight.h>
#include <linux/delay.h>
#include <linux/firmware.h>
#include <linux/gpio/consumer.h>
#include <linux/module.h>
#include <linux/property.h>
#include <linux/regulator/consumer.h>
#include <linux/spi/spi.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_drv.h>
#include <drm/drm_fbdev_generic.h>
#include <drm/drm_gem_atomic_helper.h>
#include <drm/drm_gem_dma_helper.h>
#include <drm/drm_managed.h>
#include <drm/drm_mipi_dbi.h>
#include <drm/drm_modes.h>
#include <drm/drm_modeset_helper.h>
#include <video/mipi_display.h>
static const u8 panel_mipi_dbi_magic[15] = { 'M', 'I', 'P', 'I', ' ', 'D', 'B', 'I',
0, 0, 0, 0, 0, 0, 0 };
/*
* The display controller configuration is stored in a firmware file.
* The Device Tree 'compatible' property value with a '.bin' suffix is passed
* to request_firmware() to fetch this file.
*/
struct panel_mipi_dbi_config {
/* Magic string: panel_mipi_dbi_magic */
u8 magic[15];
/* Config file format version */
u8 file_format_version;
/*
* MIPI commands to execute when the display pipeline is enabled.
* This is used to configure the display controller.
*
* The commands are stored in a byte array with the format:
* command, num_parameters, [ parameter, ...], command, ...
*
* Some commands require a pause before the next command can be received.
* Inserting a delay in the command sequence is done by using the NOP command with one
* parameter: delay in miliseconds (the No Operation command is part of the MIPI Display
* Command Set where it has no parameters).
*
* Example:
* command 0x11
* sleep 120ms
* command 0xb1 parameters 0x01, 0x2c, 0x2d
* command 0x29
*
* Byte sequence:
* 0x11 0x00
* 0x00 0x01 0x78
* 0xb1 0x03 0x01 0x2c 0x2d
* 0x29 0x00
*/
u8 commands[];
};
struct panel_mipi_dbi_commands {
const u8 *buf;
size_t len;
};
static struct panel_mipi_dbi_commands *
panel_mipi_dbi_check_commands(struct device *dev, const struct firmware *fw)
{
const struct panel_mipi_dbi_config *config = (struct panel_mipi_dbi_config *)fw->data;
struct panel_mipi_dbi_commands *commands;
size_t size = fw->size, commands_len;
unsigned int i = 0;
if (size < sizeof(*config) + 2) { /* At least 1 command */
dev_err(dev, "config: file size=%zu is too small\n", size);
return ERR_PTR(-EINVAL);
}
if (memcmp(config->magic, panel_mipi_dbi_magic, sizeof(config->magic))) {
dev_err(dev, "config: Bad magic: %15ph\n", config->magic);
return ERR_PTR(-EINVAL);
}
if (config->file_format_version != 1) {
dev_err(dev, "config: version=%u is not supported\n", config->file_format_version);
return ERR_PTR(-EINVAL);
}
drm_dev_dbg(dev, DRM_UT_DRIVER, "size=%zu version=%u\n", size, config->file_format_version);
commands_len = size - sizeof(*config);
while ((i + 1) < commands_len) {
u8 command = config->commands[i++];
u8 num_parameters = config->commands[i++];
const u8 *parameters = &config->commands[i];
i += num_parameters;
if (i > commands_len) {
dev_err(dev, "config: command=0x%02x num_parameters=%u overflows\n",
command, num_parameters);
return ERR_PTR(-EINVAL);
}
if (command == 0x00 && num_parameters == 1)
drm_dev_dbg(dev, DRM_UT_DRIVER, "sleep %ums\n", parameters[0]);
else
drm_dev_dbg(dev, DRM_UT_DRIVER, "command %02x %*ph\n",
command, num_parameters, parameters);
}
if (i != commands_len) {
dev_err(dev, "config: malformed command array\n");
return ERR_PTR(-EINVAL);
}
commands = devm_kzalloc(dev, sizeof(*commands), GFP_KERNEL);
if (!commands)
return ERR_PTR(-ENOMEM);
commands->len = commands_len;
commands->buf = devm_kmemdup(dev, config->commands, commands->len, GFP_KERNEL);
if (!commands->buf)
return ERR_PTR(-ENOMEM);
return commands;
}
static struct panel_mipi_dbi_commands *panel_mipi_dbi_commands_from_fw(struct device *dev)
{
struct panel_mipi_dbi_commands *commands;
const struct firmware *fw;
const char *compatible;
char fw_name[40];
int ret;
ret = of_property_read_string_index(dev->of_node, "compatible", 0, &compatible);
if (ret)
return ERR_PTR(ret);
snprintf(fw_name, sizeof(fw_name), "%s.bin", compatible);
ret = request_firmware(&fw, fw_name, dev);
if (ret) {
dev_err(dev, "No config file found for compatible '%s' (error=%d)\n",
compatible, ret);
return ERR_PTR(ret);
}
commands = panel_mipi_dbi_check_commands(dev, fw);
release_firmware(fw);
return commands;
}
static void panel_mipi_dbi_commands_execute(struct mipi_dbi *dbi,
struct panel_mipi_dbi_commands *commands)
{
unsigned int i = 0;
if (!commands)
return;
while (i < commands->len) {
u8 command = commands->buf[i++];
u8 num_parameters = commands->buf[i++];
const u8 *parameters = &commands->buf[i];
if (command == 0x00 && num_parameters == 1)
msleep(parameters[0]);
else if (num_parameters)
mipi_dbi_command_stackbuf(dbi, command, parameters, num_parameters);
else
mipi_dbi_command(dbi, command);
i += num_parameters;
}
}
static void panel_mipi_dbi_enable(struct drm_simple_display_pipe *pipe,
struct drm_crtc_state *crtc_state,
struct drm_plane_state *plane_state)
{
struct mipi_dbi_dev *dbidev = drm_to_mipi_dbi_dev(pipe->crtc.dev);
struct mipi_dbi *dbi = &dbidev->dbi;
int ret, idx;
if (!drm_dev_enter(pipe->crtc.dev, &idx))
return;
drm_dbg(pipe->crtc.dev, "\n");
ret = mipi_dbi_poweron_conditional_reset(dbidev);
if (ret < 0)
goto out_exit;
if (!ret)
panel_mipi_dbi_commands_execute(dbi, dbidev->driver_private);
mipi_dbi_enable_flush(dbidev, crtc_state, plane_state);
out_exit:
drm_dev_exit(idx);
}
static const struct drm_simple_display_pipe_funcs panel_mipi_dbi_pipe_funcs = {
DRM_MIPI_DBI_SIMPLE_DISPLAY_PIPE_FUNCS(panel_mipi_dbi_enable),
};
DEFINE_DRM_GEM_DMA_FOPS(panel_mipi_dbi_fops);
static const struct drm_driver panel_mipi_dbi_driver = {
.driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC,
.fops = &panel_mipi_dbi_fops,
DRM_GEM_DMA_DRIVER_OPS_VMAP,
.debugfs_init = mipi_dbi_debugfs_init,
.name = "panel-mipi-dbi",
.desc = "MIPI DBI compatible display panel",
.date = "20220103",
.major = 1,
.minor = 0,
};
static int panel_mipi_dbi_get_mode(struct mipi_dbi_dev *dbidev, struct drm_display_mode *mode)
{
struct device *dev = dbidev->drm.dev;
u16 hback_porch, vback_porch;
int ret;
ret = of_get_drm_panel_display_mode(dev->of_node, mode, NULL);
if (ret) {
dev_err(dev, "%pOF: failed to get panel-timing (error=%d)\n", dev->of_node, ret);
return ret;
}
mode->type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED;
hback_porch = mode->htotal - mode->hsync_end;
vback_porch = mode->vtotal - mode->vsync_end;
/*
* Make sure width and height are set and that only back porch and
* pixelclock are set in the other timing values. Also check that
* width and height don't exceed the 16-bit value specified by MIPI DCS.
*/
if (!mode->hdisplay || !mode->vdisplay || mode->flags ||
mode->hsync_end > mode->hdisplay || (hback_porch + mode->hdisplay) > 0xffff ||
mode->vsync_end > mode->vdisplay || (vback_porch + mode->vdisplay) > 0xffff) {
dev_err(dev, "%pOF: panel-timing out of bounds\n", dev->of_node);
return -EINVAL;
}
/* The driver doesn't use the pixel clock but it is mandatory so fake one if not set */
if (!mode->clock)
mode->clock = mode->htotal * mode->vtotal * 60 / 1000;
dbidev->top_offset = vback_porch;
dbidev->left_offset = hback_porch;
return 0;
}
static int panel_mipi_dbi_spi_probe(struct spi_device *spi)
{
struct device *dev = &spi->dev;
struct drm_display_mode mode;
struct mipi_dbi_dev *dbidev;
struct drm_device *drm;
struct mipi_dbi *dbi;
struct gpio_desc *dc;
int ret;
dbidev = devm_drm_dev_alloc(dev, &panel_mipi_dbi_driver, struct mipi_dbi_dev, drm);
if (IS_ERR(dbidev))
return PTR_ERR(dbidev);
dbi = &dbidev->dbi;
drm = &dbidev->drm;
ret = panel_mipi_dbi_get_mode(dbidev, &mode);
if (ret)
return ret;
dbidev->regulator = devm_regulator_get(dev, "power");
if (IS_ERR(dbidev->regulator))
return dev_err_probe(dev, PTR_ERR(dbidev->regulator),
"Failed to get regulator 'power'\n");
dbidev->io_regulator = devm_regulator_get(dev, "io");
if (IS_ERR(dbidev->io_regulator))
return dev_err_probe(dev, PTR_ERR(dbidev->io_regulator),
"Failed to get regulator 'io'\n");
dbidev->backlight = devm_of_find_backlight(dev);
if (IS_ERR(dbidev->backlight))
return dev_err_probe(dev, PTR_ERR(dbidev->backlight), "Failed to get backlight\n");
dbi->reset = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_HIGH);
if (IS_ERR(dbi->reset))
return dev_err_probe(dev, PTR_ERR(dbi->reset), "Failed to get GPIO 'reset'\n");
/* Multiple panels can share the "dc" GPIO, but only if they are on the same SPI bus! */
dc = devm_gpiod_get_optional(dev, "dc", GPIOD_OUT_LOW | GPIOD_FLAGS_BIT_NONEXCLUSIVE);
if (IS_ERR(dc))
return dev_err_probe(dev, PTR_ERR(dc), "Failed to get GPIO 'dc'\n");
ret = mipi_dbi_spi_init(spi, dbi, dc);
if (ret)
return ret;
if (device_property_present(dev, "write-only"))
dbi->read_commands = NULL;
dbidev->driver_private = panel_mipi_dbi_commands_from_fw(dev);
if (IS_ERR(dbidev->driver_private))
return PTR_ERR(dbidev->driver_private);
ret = mipi_dbi_dev_init(dbidev, &panel_mipi_dbi_pipe_funcs, &mode, 0);
if (ret)
return ret;
drm_mode_config_reset(drm);
ret = drm_dev_register(drm, 0);
if (ret)
return ret;
spi_set_drvdata(spi, drm);
drm_fbdev_generic_setup(drm, 0);
return 0;
}
static void panel_mipi_dbi_spi_remove(struct spi_device *spi)
{
struct drm_device *drm = spi_get_drvdata(spi);
drm_dev_unplug(drm);
drm_atomic_helper_shutdown(drm);
}
static void panel_mipi_dbi_spi_shutdown(struct spi_device *spi)
{
drm_atomic_helper_shutdown(spi_get_drvdata(spi));
}
static int __maybe_unused panel_mipi_dbi_pm_suspend(struct device *dev)
{
return drm_mode_config_helper_suspend(dev_get_drvdata(dev));
}
static int __maybe_unused panel_mipi_dbi_pm_resume(struct device *dev)
{
drm_mode_config_helper_resume(dev_get_drvdata(dev));
return 0;
}
static const struct dev_pm_ops panel_mipi_dbi_pm_ops = {
SET_SYSTEM_SLEEP_PM_OPS(panel_mipi_dbi_pm_suspend, panel_mipi_dbi_pm_resume)
};
static const struct of_device_id panel_mipi_dbi_spi_of_match[] = {
{ .compatible = "panel-mipi-dbi-spi" },
{},
};
MODULE_DEVICE_TABLE(of, panel_mipi_dbi_spi_of_match);
static const struct spi_device_id panel_mipi_dbi_spi_id[] = {
{ "panel-mipi-dbi-spi", 0 },
{ },
};
MODULE_DEVICE_TABLE(spi, panel_mipi_dbi_spi_id);
static struct spi_driver panel_mipi_dbi_spi_driver = {
.driver = {
.name = "panel-mipi-dbi-spi",
.owner = THIS_MODULE,
.of_match_table = panel_mipi_dbi_spi_of_match,
.pm = &panel_mipi_dbi_pm_ops,
},
.id_table = panel_mipi_dbi_spi_id,
.probe = panel_mipi_dbi_spi_probe,
.remove = panel_mipi_dbi_spi_remove,
.shutdown = panel_mipi_dbi_spi_shutdown,
};
module_spi_driver(panel_mipi_dbi_spi_driver);
MODULE_DESCRIPTION("MIPI DBI compatible display panel driver");
MODULE_AUTHOR("Noralf Trønnes");
MODULE_LICENSE("GPL");
| linux-master | drivers/gpu/drm/tiny/panel-mipi-dbi.c |
// SPDX-License-Identifier: GPL-2.0-or-later
#include <linux/module.h>
#include <linux/pci.h>
#include <drm/drm_aperture.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_drv.h>
#include <drm/drm_edid.h>
#include <drm/drm_fbdev_generic.h>
#include <drm/drm_fourcc.h>
#include <drm/drm_framebuffer.h>
#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_gem_vram_helper.h>
#include <drm/drm_managed.h>
#include <drm/drm_module.h>
#include <drm/drm_probe_helper.h>
#include <drm/drm_simple_kms_helper.h>
#include <video/vga.h>
/* ---------------------------------------------------------------------- */
#define VBE_DISPI_IOPORT_INDEX 0x01CE
#define VBE_DISPI_IOPORT_DATA 0x01CF
#define VBE_DISPI_INDEX_ID 0x0
#define VBE_DISPI_INDEX_XRES 0x1
#define VBE_DISPI_INDEX_YRES 0x2
#define VBE_DISPI_INDEX_BPP 0x3
#define VBE_DISPI_INDEX_ENABLE 0x4
#define VBE_DISPI_INDEX_BANK 0x5
#define VBE_DISPI_INDEX_VIRT_WIDTH 0x6
#define VBE_DISPI_INDEX_VIRT_HEIGHT 0x7
#define VBE_DISPI_INDEX_X_OFFSET 0x8
#define VBE_DISPI_INDEX_Y_OFFSET 0x9
#define VBE_DISPI_INDEX_VIDEO_MEMORY_64K 0xa
#define VBE_DISPI_ID0 0xB0C0
#define VBE_DISPI_ID1 0xB0C1
#define VBE_DISPI_ID2 0xB0C2
#define VBE_DISPI_ID3 0xB0C3
#define VBE_DISPI_ID4 0xB0C4
#define VBE_DISPI_ID5 0xB0C5
#define VBE_DISPI_DISABLED 0x00
#define VBE_DISPI_ENABLED 0x01
#define VBE_DISPI_GETCAPS 0x02
#define VBE_DISPI_8BIT_DAC 0x20
#define VBE_DISPI_LFB_ENABLED 0x40
#define VBE_DISPI_NOCLEARMEM 0x80
static int bochs_modeset = -1;
static int defx = 1024;
static int defy = 768;
module_param_named(modeset, bochs_modeset, int, 0444);
MODULE_PARM_DESC(modeset, "enable/disable kernel modesetting");
module_param(defx, int, 0444);
module_param(defy, int, 0444);
MODULE_PARM_DESC(defx, "default x resolution");
MODULE_PARM_DESC(defy, "default y resolution");
/* ---------------------------------------------------------------------- */
enum bochs_types {
BOCHS_QEMU_STDVGA,
BOCHS_SIMICS,
BOCHS_UNKNOWN,
};
struct bochs_device {
/* hw */
void __iomem *mmio;
int ioports;
void __iomem *fb_map;
unsigned long fb_base;
unsigned long fb_size;
unsigned long qext_size;
/* mode */
u16 xres;
u16 yres;
u16 yres_virtual;
u32 stride;
u32 bpp;
struct edid *edid;
/* drm */
struct drm_device *dev;
struct drm_simple_display_pipe pipe;
struct drm_connector connector;
};
/* ---------------------------------------------------------------------- */
static void bochs_vga_writeb(struct bochs_device *bochs, u16 ioport, u8 val)
{
if (WARN_ON(ioport < 0x3c0 || ioport > 0x3df))
return;
if (bochs->mmio) {
int offset = ioport - 0x3c0 + 0x400;
writeb(val, bochs->mmio + offset);
} else {
outb(val, ioport);
}
}
static u8 bochs_vga_readb(struct bochs_device *bochs, u16 ioport)
{
if (WARN_ON(ioport < 0x3c0 || ioport > 0x3df))
return 0xff;
if (bochs->mmio) {
int offset = ioport - 0x3c0 + 0x400;
return readb(bochs->mmio + offset);
} else {
return inb(ioport);
}
}
static u16 bochs_dispi_read(struct bochs_device *bochs, u16 reg)
{
u16 ret = 0;
if (bochs->mmio) {
int offset = 0x500 + (reg << 1);
ret = readw(bochs->mmio + offset);
} else {
outw(reg, VBE_DISPI_IOPORT_INDEX);
ret = inw(VBE_DISPI_IOPORT_DATA);
}
return ret;
}
static void bochs_dispi_write(struct bochs_device *bochs, u16 reg, u16 val)
{
if (bochs->mmio) {
int offset = 0x500 + (reg << 1);
writew(val, bochs->mmio + offset);
} else {
outw(reg, VBE_DISPI_IOPORT_INDEX);
outw(val, VBE_DISPI_IOPORT_DATA);
}
}
static void bochs_hw_set_big_endian(struct bochs_device *bochs)
{
if (bochs->qext_size < 8)
return;
writel(0xbebebebe, bochs->mmio + 0x604);
}
static void bochs_hw_set_little_endian(struct bochs_device *bochs)
{
if (bochs->qext_size < 8)
return;
writel(0x1e1e1e1e, bochs->mmio + 0x604);
}
#ifdef __BIG_ENDIAN
#define bochs_hw_set_native_endian(_b) bochs_hw_set_big_endian(_b)
#else
#define bochs_hw_set_native_endian(_b) bochs_hw_set_little_endian(_b)
#endif
static int bochs_get_edid_block(void *data, u8 *buf,
unsigned int block, size_t len)
{
struct bochs_device *bochs = data;
size_t i, start = block * EDID_LENGTH;
if (start + len > 0x400 /* vga register offset */)
return -1;
for (i = 0; i < len; i++)
buf[i] = readb(bochs->mmio + start + i);
return 0;
}
static int bochs_hw_load_edid(struct bochs_device *bochs)
{
u8 header[8];
if (!bochs->mmio)
return -1;
/* check header to detect whenever edid support is enabled in qemu */
bochs_get_edid_block(bochs, header, 0, ARRAY_SIZE(header));
if (drm_edid_header_is_valid(header) != 8)
return -1;
kfree(bochs->edid);
bochs->edid = drm_do_get_edid(&bochs->connector,
bochs_get_edid_block, bochs);
if (bochs->edid == NULL)
return -1;
return 0;
}
static int bochs_hw_init(struct drm_device *dev)
{
struct bochs_device *bochs = dev->dev_private;
struct pci_dev *pdev = to_pci_dev(dev->dev);
unsigned long addr, size, mem, ioaddr, iosize;
u16 id;
if (pdev->resource[2].flags & IORESOURCE_MEM) {
/* mmio bar with vga and bochs registers present */
if (pci_request_region(pdev, 2, "bochs-drm") != 0) {
DRM_ERROR("Cannot request mmio region\n");
return -EBUSY;
}
ioaddr = pci_resource_start(pdev, 2);
iosize = pci_resource_len(pdev, 2);
bochs->mmio = ioremap(ioaddr, iosize);
if (bochs->mmio == NULL) {
DRM_ERROR("Cannot map mmio region\n");
return -ENOMEM;
}
} else {
ioaddr = VBE_DISPI_IOPORT_INDEX;
iosize = 2;
if (!request_region(ioaddr, iosize, "bochs-drm")) {
DRM_ERROR("Cannot request ioports\n");
return -EBUSY;
}
bochs->ioports = 1;
}
id = bochs_dispi_read(bochs, VBE_DISPI_INDEX_ID);
mem = bochs_dispi_read(bochs, VBE_DISPI_INDEX_VIDEO_MEMORY_64K)
* 64 * 1024;
if ((id & 0xfff0) != VBE_DISPI_ID0) {
DRM_ERROR("ID mismatch\n");
return -ENODEV;
}
if ((pdev->resource[0].flags & IORESOURCE_MEM) == 0)
return -ENODEV;
addr = pci_resource_start(pdev, 0);
size = pci_resource_len(pdev, 0);
if (addr == 0)
return -ENODEV;
if (size != mem) {
DRM_ERROR("Size mismatch: pci=%ld, bochs=%ld\n",
size, mem);
size = min(size, mem);
}
if (pci_request_region(pdev, 0, "bochs-drm") != 0)
DRM_WARN("Cannot request framebuffer, boot fb still active?\n");
bochs->fb_map = ioremap(addr, size);
if (bochs->fb_map == NULL) {
DRM_ERROR("Cannot map framebuffer\n");
return -ENOMEM;
}
bochs->fb_base = addr;
bochs->fb_size = size;
DRM_INFO("Found bochs VGA, ID 0x%x.\n", id);
DRM_INFO("Framebuffer size %ld kB @ 0x%lx, %s @ 0x%lx.\n",
size / 1024, addr,
bochs->ioports ? "ioports" : "mmio",
ioaddr);
if (bochs->mmio && pdev->revision >= 2) {
bochs->qext_size = readl(bochs->mmio + 0x600);
if (bochs->qext_size < 4 || bochs->qext_size > iosize) {
bochs->qext_size = 0;
goto noext;
}
DRM_DEBUG("Found qemu ext regs, size %ld\n",
bochs->qext_size);
bochs_hw_set_native_endian(bochs);
}
noext:
return 0;
}
static void bochs_hw_fini(struct drm_device *dev)
{
struct bochs_device *bochs = dev->dev_private;
/* TODO: shot down existing vram mappings */
if (bochs->mmio)
iounmap(bochs->mmio);
if (bochs->ioports)
release_region(VBE_DISPI_IOPORT_INDEX, 2);
if (bochs->fb_map)
iounmap(bochs->fb_map);
pci_release_regions(to_pci_dev(dev->dev));
kfree(bochs->edid);
}
static void bochs_hw_blank(struct bochs_device *bochs, bool blank)
{
DRM_DEBUG_DRIVER("hw_blank %d\n", blank);
/* enable color bit (so VGA_IS1_RC access works) */
bochs_vga_writeb(bochs, VGA_MIS_W, VGA_MIS_COLOR);
/* discard ar_flip_flop */
(void)bochs_vga_readb(bochs, VGA_IS1_RC);
/* blank or unblank; we need only update index and set 0x20 */
bochs_vga_writeb(bochs, VGA_ATT_W, blank ? 0 : 0x20);
}
static void bochs_hw_setmode(struct bochs_device *bochs, struct drm_display_mode *mode)
{
int idx;
if (!drm_dev_enter(bochs->dev, &idx))
return;
bochs->xres = mode->hdisplay;
bochs->yres = mode->vdisplay;
bochs->bpp = 32;
bochs->stride = mode->hdisplay * (bochs->bpp / 8);
bochs->yres_virtual = bochs->fb_size / bochs->stride;
DRM_DEBUG_DRIVER("%dx%d @ %d bpp, vy %d\n",
bochs->xres, bochs->yres, bochs->bpp,
bochs->yres_virtual);
bochs_hw_blank(bochs, false);
bochs_dispi_write(bochs, VBE_DISPI_INDEX_ENABLE, 0);
bochs_dispi_write(bochs, VBE_DISPI_INDEX_BPP, bochs->bpp);
bochs_dispi_write(bochs, VBE_DISPI_INDEX_XRES, bochs->xres);
bochs_dispi_write(bochs, VBE_DISPI_INDEX_YRES, bochs->yres);
bochs_dispi_write(bochs, VBE_DISPI_INDEX_BANK, 0);
bochs_dispi_write(bochs, VBE_DISPI_INDEX_VIRT_WIDTH, bochs->xres);
bochs_dispi_write(bochs, VBE_DISPI_INDEX_VIRT_HEIGHT,
bochs->yres_virtual);
bochs_dispi_write(bochs, VBE_DISPI_INDEX_X_OFFSET, 0);
bochs_dispi_write(bochs, VBE_DISPI_INDEX_Y_OFFSET, 0);
bochs_dispi_write(bochs, VBE_DISPI_INDEX_ENABLE,
VBE_DISPI_ENABLED | VBE_DISPI_LFB_ENABLED);
drm_dev_exit(idx);
}
static void bochs_hw_setformat(struct bochs_device *bochs, const struct drm_format_info *format)
{
int idx;
if (!drm_dev_enter(bochs->dev, &idx))
return;
DRM_DEBUG_DRIVER("format %c%c%c%c\n",
(format->format >> 0) & 0xff,
(format->format >> 8) & 0xff,
(format->format >> 16) & 0xff,
(format->format >> 24) & 0xff);
switch (format->format) {
case DRM_FORMAT_XRGB8888:
bochs_hw_set_little_endian(bochs);
break;
case DRM_FORMAT_BGRX8888:
bochs_hw_set_big_endian(bochs);
break;
default:
/* should not happen */
DRM_ERROR("%s: Huh? Got framebuffer format 0x%x",
__func__, format->format);
break;
}
drm_dev_exit(idx);
}
static void bochs_hw_setbase(struct bochs_device *bochs, int x, int y, int stride, u64 addr)
{
unsigned long offset;
unsigned int vx, vy, vwidth, idx;
if (!drm_dev_enter(bochs->dev, &idx))
return;
bochs->stride = stride;
offset = (unsigned long)addr +
y * bochs->stride +
x * (bochs->bpp / 8);
vy = offset / bochs->stride;
vx = (offset % bochs->stride) * 8 / bochs->bpp;
vwidth = stride * 8 / bochs->bpp;
DRM_DEBUG_DRIVER("x %d, y %d, addr %llx -> offset %lx, vx %d, vy %d\n",
x, y, addr, offset, vx, vy);
bochs_dispi_write(bochs, VBE_DISPI_INDEX_VIRT_WIDTH, vwidth);
bochs_dispi_write(bochs, VBE_DISPI_INDEX_X_OFFSET, vx);
bochs_dispi_write(bochs, VBE_DISPI_INDEX_Y_OFFSET, vy);
drm_dev_exit(idx);
}
/* ---------------------------------------------------------------------- */
static const uint32_t bochs_formats[] = {
DRM_FORMAT_XRGB8888,
DRM_FORMAT_BGRX8888,
};
static void bochs_plane_update(struct bochs_device *bochs, struct drm_plane_state *state)
{
struct drm_gem_vram_object *gbo;
s64 gpu_addr;
if (!state->fb || !bochs->stride)
return;
gbo = drm_gem_vram_of_gem(state->fb->obj[0]);
gpu_addr = drm_gem_vram_offset(gbo);
if (WARN_ON_ONCE(gpu_addr < 0))
return; /* Bug: we didn't pin the BO to VRAM in prepare_fb. */
bochs_hw_setbase(bochs,
state->crtc_x,
state->crtc_y,
state->fb->pitches[0],
state->fb->offsets[0] + gpu_addr);
bochs_hw_setformat(bochs, state->fb->format);
}
static void bochs_pipe_enable(struct drm_simple_display_pipe *pipe,
struct drm_crtc_state *crtc_state,
struct drm_plane_state *plane_state)
{
struct bochs_device *bochs = pipe->crtc.dev->dev_private;
bochs_hw_setmode(bochs, &crtc_state->mode);
bochs_plane_update(bochs, plane_state);
}
static void bochs_pipe_disable(struct drm_simple_display_pipe *pipe)
{
struct bochs_device *bochs = pipe->crtc.dev->dev_private;
bochs_hw_blank(bochs, true);
}
static void bochs_pipe_update(struct drm_simple_display_pipe *pipe,
struct drm_plane_state *old_state)
{
struct bochs_device *bochs = pipe->crtc.dev->dev_private;
bochs_plane_update(bochs, pipe->plane.state);
}
static const struct drm_simple_display_pipe_funcs bochs_pipe_funcs = {
.enable = bochs_pipe_enable,
.disable = bochs_pipe_disable,
.update = bochs_pipe_update,
.prepare_fb = drm_gem_vram_simple_display_pipe_prepare_fb,
.cleanup_fb = drm_gem_vram_simple_display_pipe_cleanup_fb,
};
static int bochs_connector_get_modes(struct drm_connector *connector)
{
struct bochs_device *bochs =
container_of(connector, struct bochs_device, connector);
int count = 0;
if (bochs->edid)
count = drm_add_edid_modes(connector, bochs->edid);
if (!count) {
count = drm_add_modes_noedid(connector, 8192, 8192);
drm_set_preferred_mode(connector, defx, defy);
}
return count;
}
static const struct drm_connector_helper_funcs bochs_connector_connector_helper_funcs = {
.get_modes = bochs_connector_get_modes,
};
static const struct drm_connector_funcs bochs_connector_connector_funcs = {
.fill_modes = drm_helper_probe_single_connector_modes,
.destroy = drm_connector_cleanup,
.reset = drm_atomic_helper_connector_reset,
.atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
};
static void bochs_connector_init(struct drm_device *dev)
{
struct bochs_device *bochs = dev->dev_private;
struct drm_connector *connector = &bochs->connector;
drm_connector_init(dev, connector, &bochs_connector_connector_funcs,
DRM_MODE_CONNECTOR_VIRTUAL);
drm_connector_helper_add(connector, &bochs_connector_connector_helper_funcs);
bochs_hw_load_edid(bochs);
if (bochs->edid) {
DRM_INFO("Found EDID data blob.\n");
drm_connector_attach_edid_property(connector);
drm_connector_update_edid_property(connector, bochs->edid);
}
}
static struct drm_framebuffer *
bochs_gem_fb_create(struct drm_device *dev, struct drm_file *file,
const struct drm_mode_fb_cmd2 *mode_cmd)
{
if (mode_cmd->pixel_format != DRM_FORMAT_XRGB8888 &&
mode_cmd->pixel_format != DRM_FORMAT_BGRX8888)
return ERR_PTR(-EINVAL);
return drm_gem_fb_create(dev, file, mode_cmd);
}
static const struct drm_mode_config_funcs bochs_mode_funcs = {
.fb_create = bochs_gem_fb_create,
.mode_valid = drm_vram_helper_mode_valid,
.atomic_check = drm_atomic_helper_check,
.atomic_commit = drm_atomic_helper_commit,
};
static int bochs_kms_init(struct bochs_device *bochs)
{
int ret;
ret = drmm_mode_config_init(bochs->dev);
if (ret)
return ret;
bochs->dev->mode_config.max_width = 8192;
bochs->dev->mode_config.max_height = 8192;
bochs->dev->mode_config.preferred_depth = 24;
bochs->dev->mode_config.prefer_shadow = 0;
bochs->dev->mode_config.quirk_addfb_prefer_host_byte_order = true;
bochs->dev->mode_config.funcs = &bochs_mode_funcs;
bochs_connector_init(bochs->dev);
drm_simple_display_pipe_init(bochs->dev,
&bochs->pipe,
&bochs_pipe_funcs,
bochs_formats,
ARRAY_SIZE(bochs_formats),
NULL,
&bochs->connector);
drm_mode_config_reset(bochs->dev);
return 0;
}
/* ---------------------------------------------------------------------- */
/* drm interface */
static int bochs_load(struct drm_device *dev)
{
struct bochs_device *bochs;
int ret;
bochs = drmm_kzalloc(dev, sizeof(*bochs), GFP_KERNEL);
if (bochs == NULL)
return -ENOMEM;
dev->dev_private = bochs;
bochs->dev = dev;
ret = bochs_hw_init(dev);
if (ret)
return ret;
ret = drmm_vram_helper_init(dev, bochs->fb_base, bochs->fb_size);
if (ret)
goto err_hw_fini;
ret = bochs_kms_init(bochs);
if (ret)
goto err_hw_fini;
return 0;
err_hw_fini:
bochs_hw_fini(dev);
return ret;
}
DEFINE_DRM_GEM_FOPS(bochs_fops);
static const struct drm_driver bochs_driver = {
.driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC,
.fops = &bochs_fops,
.name = "bochs-drm",
.desc = "bochs dispi vga interface (qemu stdvga)",
.date = "20130925",
.major = 1,
.minor = 0,
DRM_GEM_VRAM_DRIVER,
};
/* ---------------------------------------------------------------------- */
/* pm interface */
#ifdef CONFIG_PM_SLEEP
static int bochs_pm_suspend(struct device *dev)
{
struct drm_device *drm_dev = dev_get_drvdata(dev);
return drm_mode_config_helper_suspend(drm_dev);
}
static int bochs_pm_resume(struct device *dev)
{
struct drm_device *drm_dev = dev_get_drvdata(dev);
return drm_mode_config_helper_resume(drm_dev);
}
#endif
static const struct dev_pm_ops bochs_pm_ops = {
SET_SYSTEM_SLEEP_PM_OPS(bochs_pm_suspend,
bochs_pm_resume)
};
/* ---------------------------------------------------------------------- */
/* pci interface */
static int bochs_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
{
struct drm_device *dev;
unsigned long fbsize;
int ret;
fbsize = pci_resource_len(pdev, 0);
if (fbsize < 4 * 1024 * 1024) {
DRM_ERROR("less than 4 MB video memory, ignoring device\n");
return -ENOMEM;
}
ret = drm_aperture_remove_conflicting_pci_framebuffers(pdev, &bochs_driver);
if (ret)
return ret;
dev = drm_dev_alloc(&bochs_driver, &pdev->dev);
if (IS_ERR(dev))
return PTR_ERR(dev);
ret = pcim_enable_device(pdev);
if (ret)
goto err_free_dev;
pci_set_drvdata(pdev, dev);
ret = bochs_load(dev);
if (ret)
goto err_free_dev;
ret = drm_dev_register(dev, 0);
if (ret)
goto err_hw_fini;
drm_fbdev_generic_setup(dev, 32);
return ret;
err_hw_fini:
bochs_hw_fini(dev);
err_free_dev:
drm_dev_put(dev);
return ret;
}
static void bochs_pci_remove(struct pci_dev *pdev)
{
struct drm_device *dev = pci_get_drvdata(pdev);
drm_dev_unplug(dev);
drm_atomic_helper_shutdown(dev);
bochs_hw_fini(dev);
drm_dev_put(dev);
}
static const struct pci_device_id bochs_pci_tbl[] = {
{
.vendor = 0x1234,
.device = 0x1111,
.subvendor = PCI_SUBVENDOR_ID_REDHAT_QUMRANET,
.subdevice = PCI_SUBDEVICE_ID_QEMU,
.driver_data = BOCHS_QEMU_STDVGA,
},
{
.vendor = 0x1234,
.device = 0x1111,
.subvendor = PCI_ANY_ID,
.subdevice = PCI_ANY_ID,
.driver_data = BOCHS_UNKNOWN,
},
{
.vendor = 0x4321,
.device = 0x1111,
.subvendor = PCI_ANY_ID,
.subdevice = PCI_ANY_ID,
.driver_data = BOCHS_SIMICS,
},
{ /* end of list */ }
};
static struct pci_driver bochs_pci_driver = {
.name = "bochs-drm",
.id_table = bochs_pci_tbl,
.probe = bochs_pci_probe,
.remove = bochs_pci_remove,
.driver.pm = &bochs_pm_ops,
};
/* ---------------------------------------------------------------------- */
/* module init/exit */
drm_module_pci_driver_if_modeset(bochs_pci_driver, bochs_modeset);
MODULE_DEVICE_TABLE(pci, bochs_pci_tbl);
MODULE_AUTHOR("Gerd Hoffmann <[email protected]>");
MODULE_LICENSE("GPL");
| linux-master | drivers/gpu/drm/tiny/bochs.c |
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright 2012-2019 Red Hat
*
* This file is subject to the terms and conditions of the GNU General
* Public License version 2. See the file COPYING in the main
* directory of this archive for more details.
*
* Authors: Matthew Garrett
* Dave Airlie
* Gerd Hoffmann
*
* Portions of this code derived from cirrusfb.c:
* drivers/video/cirrusfb.c - driver for Cirrus Logic chipsets
*
* Copyright 1999-2001 Jeff Garzik <[email protected]>
*/
#include <linux/iosys-map.h>
#include <linux/module.h>
#include <linux/pci.h>
#include <video/cirrus.h>
#include <video/vga.h>
#include <drm/drm_aperture.h>
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_atomic_state_helper.h>
#include <drm/drm_connector.h>
#include <drm/drm_damage_helper.h>
#include <drm/drm_drv.h>
#include <drm/drm_edid.h>
#include <drm/drm_fbdev_generic.h>
#include <drm/drm_file.h>
#include <drm/drm_format_helper.h>
#include <drm/drm_fourcc.h>
#include <drm/drm_framebuffer.h>
#include <drm/drm_gem_atomic_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_gem_shmem_helper.h>
#include <drm/drm_ioctl.h>
#include <drm/drm_managed.h>
#include <drm/drm_modeset_helper_vtables.h>
#include <drm/drm_module.h>
#include <drm/drm_probe_helper.h>
#define DRIVER_NAME "cirrus"
#define DRIVER_DESC "qemu cirrus vga"
#define DRIVER_DATE "2019"
#define DRIVER_MAJOR 2
#define DRIVER_MINOR 0
#define CIRRUS_MAX_PITCH (0x1FF << 3) /* (4096 - 1) & ~111b bytes */
#define CIRRUS_VRAM_SIZE (4 * 1024 * 1024) /* 4 MB */
struct cirrus_device {
struct drm_device dev;
/* modesetting pipeline */
struct drm_plane primary_plane;
struct drm_crtc crtc;
struct drm_encoder encoder;
struct drm_connector connector;
/* HW resources */
void __iomem *vram;
void __iomem *mmio;
};
#define to_cirrus(_dev) container_of(_dev, struct cirrus_device, dev)
struct cirrus_primary_plane_state {
struct drm_shadow_plane_state base;
/* HW scanout buffer */
const struct drm_format_info *format;
unsigned int pitch;
};
static inline struct cirrus_primary_plane_state *
to_cirrus_primary_plane_state(struct drm_plane_state *plane_state)
{
return container_of(plane_state, struct cirrus_primary_plane_state, base.base);
};
/* ------------------------------------------------------------------ */
/*
* The meat of this driver. The core passes us a mode and we have to program
* it. The modesetting here is the bare minimum required to satisfy the qemu
* emulation of this hardware, and running this against a real device is
* likely to result in an inadequately programmed mode. We've already had
* the opportunity to modify the mode, so whatever we receive here should
* be something that can be correctly programmed and displayed
*/
#define SEQ_INDEX 4
#define SEQ_DATA 5
static u8 rreg_seq(struct cirrus_device *cirrus, u8 reg)
{
iowrite8(reg, cirrus->mmio + SEQ_INDEX);
return ioread8(cirrus->mmio + SEQ_DATA);
}
static void wreg_seq(struct cirrus_device *cirrus, u8 reg, u8 val)
{
iowrite8(reg, cirrus->mmio + SEQ_INDEX);
iowrite8(val, cirrus->mmio + SEQ_DATA);
}
#define CRT_INDEX 0x14
#define CRT_DATA 0x15
static u8 rreg_crt(struct cirrus_device *cirrus, u8 reg)
{
iowrite8(reg, cirrus->mmio + CRT_INDEX);
return ioread8(cirrus->mmio + CRT_DATA);
}
static void wreg_crt(struct cirrus_device *cirrus, u8 reg, u8 val)
{
iowrite8(reg, cirrus->mmio + CRT_INDEX);
iowrite8(val, cirrus->mmio + CRT_DATA);
}
#define GFX_INDEX 0xe
#define GFX_DATA 0xf
static void wreg_gfx(struct cirrus_device *cirrus, u8 reg, u8 val)
{
iowrite8(reg, cirrus->mmio + GFX_INDEX);
iowrite8(val, cirrus->mmio + GFX_DATA);
}
#define VGA_DAC_MASK 0x06
static void wreg_hdr(struct cirrus_device *cirrus, u8 val)
{
ioread8(cirrus->mmio + VGA_DAC_MASK);
ioread8(cirrus->mmio + VGA_DAC_MASK);
ioread8(cirrus->mmio + VGA_DAC_MASK);
ioread8(cirrus->mmio + VGA_DAC_MASK);
iowrite8(val, cirrus->mmio + VGA_DAC_MASK);
}
static const struct drm_format_info *cirrus_convert_to(struct drm_framebuffer *fb)
{
if (fb->format->format == DRM_FORMAT_XRGB8888 && fb->pitches[0] > CIRRUS_MAX_PITCH) {
if (fb->width * 3 <= CIRRUS_MAX_PITCH)
/* convert from XR24 to RG24 */
return drm_format_info(DRM_FORMAT_RGB888);
else
/* convert from XR24 to RG16 */
return drm_format_info(DRM_FORMAT_RGB565);
}
return NULL;
}
static const struct drm_format_info *cirrus_format(struct drm_framebuffer *fb)
{
const struct drm_format_info *format = cirrus_convert_to(fb);
if (format)
return format;
return fb->format;
}
static int cirrus_pitch(struct drm_framebuffer *fb)
{
const struct drm_format_info *format = cirrus_convert_to(fb);
if (format)
return drm_format_info_min_pitch(format, 0, fb->width);
return fb->pitches[0];
}
static void cirrus_set_start_address(struct cirrus_device *cirrus, u32 offset)
{
u32 addr;
u8 tmp;
addr = offset >> 2;
wreg_crt(cirrus, 0x0c, (u8)((addr >> 8) & 0xff));
wreg_crt(cirrus, 0x0d, (u8)(addr & 0xff));
tmp = rreg_crt(cirrus, 0x1b);
tmp &= 0xf2;
tmp |= (addr >> 16) & 0x01;
tmp |= (addr >> 15) & 0x0c;
wreg_crt(cirrus, 0x1b, tmp);
tmp = rreg_crt(cirrus, 0x1d);
tmp &= 0x7f;
tmp |= (addr >> 12) & 0x80;
wreg_crt(cirrus, 0x1d, tmp);
}
static void cirrus_mode_set(struct cirrus_device *cirrus,
struct drm_display_mode *mode)
{
int hsyncstart, hsyncend, htotal, hdispend;
int vtotal, vdispend;
int tmp;
htotal = mode->htotal / 8;
hsyncend = mode->hsync_end / 8;
hsyncstart = mode->hsync_start / 8;
hdispend = mode->hdisplay / 8;
vtotal = mode->vtotal;
vdispend = mode->vdisplay;
vdispend -= 1;
vtotal -= 2;
htotal -= 5;
hdispend -= 1;
hsyncstart += 1;
hsyncend += 1;
wreg_crt(cirrus, VGA_CRTC_V_SYNC_END, 0x20);
wreg_crt(cirrus, VGA_CRTC_H_TOTAL, htotal);
wreg_crt(cirrus, VGA_CRTC_H_DISP, hdispend);
wreg_crt(cirrus, VGA_CRTC_H_SYNC_START, hsyncstart);
wreg_crt(cirrus, VGA_CRTC_H_SYNC_END, hsyncend);
wreg_crt(cirrus, VGA_CRTC_V_TOTAL, vtotal & 0xff);
wreg_crt(cirrus, VGA_CRTC_V_DISP_END, vdispend & 0xff);
tmp = 0x40;
if ((vdispend + 1) & 512)
tmp |= 0x20;
wreg_crt(cirrus, VGA_CRTC_MAX_SCAN, tmp);
/*
* Overflow bits for values that don't fit in the standard registers
*/
tmp = 0x10;
if (vtotal & 0x100)
tmp |= 0x01;
if (vdispend & 0x100)
tmp |= 0x02;
if ((vdispend + 1) & 0x100)
tmp |= 0x08;
if (vtotal & 0x200)
tmp |= 0x20;
if (vdispend & 0x200)
tmp |= 0x40;
wreg_crt(cirrus, VGA_CRTC_OVERFLOW, tmp);
tmp = 0;
/* More overflow bits */
if ((htotal + 5) & 0x40)
tmp |= 0x10;
if ((htotal + 5) & 0x80)
tmp |= 0x20;
if (vtotal & 0x100)
tmp |= 0x40;
if (vtotal & 0x200)
tmp |= 0x80;
wreg_crt(cirrus, CL_CRT1A, tmp);
/* Disable Hercules/CGA compatibility */
wreg_crt(cirrus, VGA_CRTC_MODE, 0x03);
}
static void cirrus_format_set(struct cirrus_device *cirrus,
const struct drm_format_info *format)
{
u8 sr07, hdr;
sr07 = rreg_seq(cirrus, 0x07);
sr07 &= 0xe0;
switch (format->format) {
case DRM_FORMAT_C8:
sr07 |= 0x11;
hdr = 0x00;
break;
case DRM_FORMAT_RGB565:
sr07 |= 0x17;
hdr = 0xc1;
break;
case DRM_FORMAT_RGB888:
sr07 |= 0x15;
hdr = 0xc5;
break;
case DRM_FORMAT_XRGB8888:
sr07 |= 0x19;
hdr = 0xc5;
break;
default:
return;
}
wreg_seq(cirrus, 0x7, sr07);
/* Enable high-colour modes */
wreg_gfx(cirrus, VGA_GFX_MODE, 0x40);
/* And set graphics mode */
wreg_gfx(cirrus, VGA_GFX_MISC, 0x01);
wreg_hdr(cirrus, hdr);
}
static void cirrus_pitch_set(struct cirrus_device *cirrus, unsigned int pitch)
{
u8 cr13, cr1b;
/* Program the pitch */
cr13 = pitch / 8;
wreg_crt(cirrus, VGA_CRTC_OFFSET, cr13);
/* Enable extended blanking and pitch bits, and enable full memory */
cr1b = 0x22;
cr1b |= (pitch >> 7) & 0x10;
cr1b |= (pitch >> 6) & 0x40;
wreg_crt(cirrus, 0x1b, cr1b);
cirrus_set_start_address(cirrus, 0);
}
/* ------------------------------------------------------------------ */
/* cirrus display pipe */
static const uint32_t cirrus_primary_plane_formats[] = {
DRM_FORMAT_RGB565,
DRM_FORMAT_RGB888,
DRM_FORMAT_XRGB8888,
};
static const uint64_t cirrus_primary_plane_format_modifiers[] = {
DRM_FORMAT_MOD_LINEAR,
DRM_FORMAT_MOD_INVALID
};
static int cirrus_primary_plane_helper_atomic_check(struct drm_plane *plane,
struct drm_atomic_state *state)
{
struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state, plane);
struct cirrus_primary_plane_state *new_primary_plane_state =
to_cirrus_primary_plane_state(new_plane_state);
struct drm_framebuffer *fb = new_plane_state->fb;
struct drm_crtc *new_crtc = new_plane_state->crtc;
struct drm_crtc_state *new_crtc_state = NULL;
int ret;
unsigned int pitch;
if (new_crtc)
new_crtc_state = drm_atomic_get_new_crtc_state(state, new_crtc);
ret = drm_atomic_helper_check_plane_state(new_plane_state, new_crtc_state,
DRM_PLANE_NO_SCALING,
DRM_PLANE_NO_SCALING,
false, false);
if (ret)
return ret;
else if (!new_plane_state->visible)
return 0;
pitch = cirrus_pitch(fb);
/* validate size constraints */
if (pitch > CIRRUS_MAX_PITCH)
return -EINVAL;
else if (pitch * fb->height > CIRRUS_VRAM_SIZE)
return -EINVAL;
new_primary_plane_state->format = cirrus_format(fb);
new_primary_plane_state->pitch = pitch;
return 0;
}
static void cirrus_primary_plane_helper_atomic_update(struct drm_plane *plane,
struct drm_atomic_state *state)
{
struct cirrus_device *cirrus = to_cirrus(plane->dev);
struct drm_plane_state *plane_state = drm_atomic_get_new_plane_state(state, plane);
struct cirrus_primary_plane_state *primary_plane_state =
to_cirrus_primary_plane_state(plane_state);
struct drm_shadow_plane_state *shadow_plane_state = to_drm_shadow_plane_state(plane_state);
struct drm_framebuffer *fb = plane_state->fb;
const struct drm_format_info *format = primary_plane_state->format;
unsigned int pitch = primary_plane_state->pitch;
struct drm_plane_state *old_plane_state = drm_atomic_get_old_plane_state(state, plane);
struct cirrus_primary_plane_state *old_primary_plane_state =
to_cirrus_primary_plane_state(old_plane_state);
struct iosys_map vaddr = IOSYS_MAP_INIT_VADDR_IOMEM(cirrus->vram);
struct drm_atomic_helper_damage_iter iter;
struct drm_rect damage;
int idx;
if (!fb)
return;
if (!drm_dev_enter(&cirrus->dev, &idx))
return;
if (old_primary_plane_state->format != format)
cirrus_format_set(cirrus, format);
if (old_primary_plane_state->pitch != pitch)
cirrus_pitch_set(cirrus, pitch);
drm_atomic_helper_damage_iter_init(&iter, old_plane_state, plane_state);
drm_atomic_for_each_plane_damage(&iter, &damage) {
unsigned int offset = drm_fb_clip_offset(pitch, format, &damage);
struct iosys_map dst = IOSYS_MAP_INIT_OFFSET(&vaddr, offset);
drm_fb_blit(&dst, &pitch, format->format, shadow_plane_state->data, fb, &damage);
}
drm_dev_exit(idx);
}
static const struct drm_plane_helper_funcs cirrus_primary_plane_helper_funcs = {
DRM_GEM_SHADOW_PLANE_HELPER_FUNCS,
.atomic_check = cirrus_primary_plane_helper_atomic_check,
.atomic_update = cirrus_primary_plane_helper_atomic_update,
};
static struct drm_plane_state *
cirrus_primary_plane_atomic_duplicate_state(struct drm_plane *plane)
{
struct drm_plane_state *plane_state = plane->state;
struct cirrus_primary_plane_state *primary_plane_state =
to_cirrus_primary_plane_state(plane_state);
struct cirrus_primary_plane_state *new_primary_plane_state;
struct drm_shadow_plane_state *new_shadow_plane_state;
if (!plane_state)
return NULL;
new_primary_plane_state = kzalloc(sizeof(*new_primary_plane_state), GFP_KERNEL);
if (!new_primary_plane_state)
return NULL;
new_shadow_plane_state = &new_primary_plane_state->base;
__drm_gem_duplicate_shadow_plane_state(plane, new_shadow_plane_state);
new_primary_plane_state->format = primary_plane_state->format;
new_primary_plane_state->pitch = primary_plane_state->pitch;
return &new_shadow_plane_state->base;
}
static void cirrus_primary_plane_atomic_destroy_state(struct drm_plane *plane,
struct drm_plane_state *plane_state)
{
struct cirrus_primary_plane_state *primary_plane_state =
to_cirrus_primary_plane_state(plane_state);
__drm_gem_destroy_shadow_plane_state(&primary_plane_state->base);
kfree(primary_plane_state);
}
static void cirrus_reset_primary_plane(struct drm_plane *plane)
{
struct cirrus_primary_plane_state *primary_plane_state;
if (plane->state) {
cirrus_primary_plane_atomic_destroy_state(plane, plane->state);
plane->state = NULL; /* must be set to NULL here */
}
primary_plane_state = kzalloc(sizeof(*primary_plane_state), GFP_KERNEL);
if (!primary_plane_state)
return;
__drm_gem_reset_shadow_plane(plane, &primary_plane_state->base);
}
static const struct drm_plane_funcs cirrus_primary_plane_funcs = {
.update_plane = drm_atomic_helper_update_plane,
.disable_plane = drm_atomic_helper_disable_plane,
.destroy = drm_plane_cleanup,
.reset = cirrus_reset_primary_plane,
.atomic_duplicate_state = cirrus_primary_plane_atomic_duplicate_state,
.atomic_destroy_state = cirrus_primary_plane_atomic_destroy_state,
};
static int cirrus_crtc_helper_atomic_check(struct drm_crtc *crtc, struct drm_atomic_state *state)
{
struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
int ret;
if (!crtc_state->enable)
return 0;
ret = drm_atomic_helper_check_crtc_primary_plane(crtc_state);
if (ret)
return ret;
return 0;
}
static void cirrus_crtc_helper_atomic_enable(struct drm_crtc *crtc,
struct drm_atomic_state *state)
{
struct cirrus_device *cirrus = to_cirrus(crtc->dev);
struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
int idx;
if (!drm_dev_enter(&cirrus->dev, &idx))
return;
cirrus_mode_set(cirrus, &crtc_state->mode);
/* Unblank (needed on S3 resume, vgabios doesn't do it then) */
outb(VGA_AR_ENABLE_DISPLAY, VGA_ATT_W);
drm_dev_exit(idx);
}
static const struct drm_crtc_helper_funcs cirrus_crtc_helper_funcs = {
.atomic_check = cirrus_crtc_helper_atomic_check,
.atomic_enable = cirrus_crtc_helper_atomic_enable,
};
static const struct drm_crtc_funcs cirrus_crtc_funcs = {
.reset = drm_atomic_helper_crtc_reset,
.destroy = drm_crtc_cleanup,
.set_config = drm_atomic_helper_set_config,
.page_flip = drm_atomic_helper_page_flip,
.atomic_duplicate_state = drm_atomic_helper_crtc_duplicate_state,
.atomic_destroy_state = drm_atomic_helper_crtc_destroy_state,
};
static const struct drm_encoder_funcs cirrus_encoder_funcs = {
.destroy = drm_encoder_cleanup,
};
static int cirrus_connector_helper_get_modes(struct drm_connector *connector)
{
int count;
count = drm_add_modes_noedid(connector,
connector->dev->mode_config.max_width,
connector->dev->mode_config.max_height);
drm_set_preferred_mode(connector, 1024, 768);
return count;
}
static const struct drm_connector_helper_funcs cirrus_connector_helper_funcs = {
.get_modes = cirrus_connector_helper_get_modes,
};
static const struct drm_connector_funcs cirrus_connector_funcs = {
.fill_modes = drm_helper_probe_single_connector_modes,
.destroy = drm_connector_cleanup,
.reset = drm_atomic_helper_connector_reset,
.atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
};
static int cirrus_pipe_init(struct cirrus_device *cirrus)
{
struct drm_device *dev = &cirrus->dev;
struct drm_plane *primary_plane;
struct drm_crtc *crtc;
struct drm_encoder *encoder;
struct drm_connector *connector;
int ret;
primary_plane = &cirrus->primary_plane;
ret = drm_universal_plane_init(dev, primary_plane, 0,
&cirrus_primary_plane_funcs,
cirrus_primary_plane_formats,
ARRAY_SIZE(cirrus_primary_plane_formats),
cirrus_primary_plane_format_modifiers,
DRM_PLANE_TYPE_PRIMARY, NULL);
if (ret)
return ret;
drm_plane_helper_add(primary_plane, &cirrus_primary_plane_helper_funcs);
drm_plane_enable_fb_damage_clips(primary_plane);
crtc = &cirrus->crtc;
ret = drm_crtc_init_with_planes(dev, crtc, primary_plane, NULL,
&cirrus_crtc_funcs, NULL);
if (ret)
return ret;
drm_crtc_helper_add(crtc, &cirrus_crtc_helper_funcs);
encoder = &cirrus->encoder;
ret = drm_encoder_init(dev, encoder, &cirrus_encoder_funcs,
DRM_MODE_ENCODER_DAC, NULL);
if (ret)
return ret;
encoder->possible_crtcs = drm_crtc_mask(crtc);
connector = &cirrus->connector;
ret = drm_connector_init(dev, connector, &cirrus_connector_funcs,
DRM_MODE_CONNECTOR_VGA);
if (ret)
return ret;
drm_connector_helper_add(connector, &cirrus_connector_helper_funcs);
ret = drm_connector_attach_encoder(connector, encoder);
if (ret)
return ret;
return 0;
}
/* ------------------------------------------------------------------ */
/* cirrus framebuffers & mode config */
static enum drm_mode_status cirrus_mode_config_mode_valid(struct drm_device *dev,
const struct drm_display_mode *mode)
{
const struct drm_format_info *format = drm_format_info(DRM_FORMAT_XRGB8888);
uint64_t pitch = drm_format_info_min_pitch(format, 0, mode->hdisplay);
if (pitch * mode->vdisplay > CIRRUS_VRAM_SIZE)
return MODE_MEM;
return MODE_OK;
}
static const struct drm_mode_config_funcs cirrus_mode_config_funcs = {
.fb_create = drm_gem_fb_create_with_dirty,
.mode_valid = cirrus_mode_config_mode_valid,
.atomic_check = drm_atomic_helper_check,
.atomic_commit = drm_atomic_helper_commit,
};
static int cirrus_mode_config_init(struct cirrus_device *cirrus)
{
struct drm_device *dev = &cirrus->dev;
int ret;
ret = drmm_mode_config_init(dev);
if (ret)
return ret;
dev->mode_config.min_width = 0;
dev->mode_config.min_height = 0;
dev->mode_config.max_width = CIRRUS_MAX_PITCH / 2;
dev->mode_config.max_height = 1024;
dev->mode_config.preferred_depth = 16;
dev->mode_config.prefer_shadow = 0;
dev->mode_config.funcs = &cirrus_mode_config_funcs;
return 0;
}
/* ------------------------------------------------------------------ */
DEFINE_DRM_GEM_FOPS(cirrus_fops);
static const struct drm_driver cirrus_driver = {
.driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_ATOMIC,
.name = DRIVER_NAME,
.desc = DRIVER_DESC,
.date = DRIVER_DATE,
.major = DRIVER_MAJOR,
.minor = DRIVER_MINOR,
.fops = &cirrus_fops,
DRM_GEM_SHMEM_DRIVER_OPS,
};
static int cirrus_pci_probe(struct pci_dev *pdev,
const struct pci_device_id *ent)
{
struct drm_device *dev;
struct cirrus_device *cirrus;
int ret;
ret = drm_aperture_remove_conflicting_pci_framebuffers(pdev, &cirrus_driver);
if (ret)
return ret;
ret = pcim_enable_device(pdev);
if (ret)
return ret;
ret = pci_request_regions(pdev, DRIVER_NAME);
if (ret)
return ret;
ret = -ENOMEM;
cirrus = devm_drm_dev_alloc(&pdev->dev, &cirrus_driver,
struct cirrus_device, dev);
if (IS_ERR(cirrus))
return PTR_ERR(cirrus);
dev = &cirrus->dev;
cirrus->vram = devm_ioremap(&pdev->dev, pci_resource_start(pdev, 0),
pci_resource_len(pdev, 0));
if (cirrus->vram == NULL)
return -ENOMEM;
cirrus->mmio = devm_ioremap(&pdev->dev, pci_resource_start(pdev, 1),
pci_resource_len(pdev, 1));
if (cirrus->mmio == NULL)
return -ENOMEM;
ret = cirrus_mode_config_init(cirrus);
if (ret)
return ret;
ret = cirrus_pipe_init(cirrus);
if (ret < 0)
return ret;
drm_mode_config_reset(dev);
pci_set_drvdata(pdev, dev);
ret = drm_dev_register(dev, 0);
if (ret)
return ret;
drm_fbdev_generic_setup(dev, 16);
return 0;
}
static void cirrus_pci_remove(struct pci_dev *pdev)
{
struct drm_device *dev = pci_get_drvdata(pdev);
drm_dev_unplug(dev);
drm_atomic_helper_shutdown(dev);
}
static const struct pci_device_id pciidlist[] = {
{
.vendor = PCI_VENDOR_ID_CIRRUS,
.device = PCI_DEVICE_ID_CIRRUS_5446,
/* only bind to the cirrus chip in qemu */
.subvendor = PCI_SUBVENDOR_ID_REDHAT_QUMRANET,
.subdevice = PCI_SUBDEVICE_ID_QEMU,
}, {
.vendor = PCI_VENDOR_ID_CIRRUS,
.device = PCI_DEVICE_ID_CIRRUS_5446,
.subvendor = PCI_VENDOR_ID_XEN,
.subdevice = 0x0001,
},
{ /* end if list */ }
};
static struct pci_driver cirrus_pci_driver = {
.name = DRIVER_NAME,
.id_table = pciidlist,
.probe = cirrus_pci_probe,
.remove = cirrus_pci_remove,
};
drm_module_pci_driver(cirrus_pci_driver)
MODULE_DEVICE_TABLE(pci, pciidlist);
MODULE_LICENSE("GPL");
| linux-master | drivers/gpu/drm/tiny/cirrus.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* DRM driver for Pervasive Displays RePaper branded e-ink panels
*
* Copyright 2013-2017 Pervasive Displays, Inc.
* Copyright 2017 Noralf Trønnes
*
* The driver supports:
* Material Film: Aurora Mb (V231)
* Driver IC: G2 (eTC)
*
* The controller code was taken from the userspace driver:
* https://github.com/repaper/gratis
*/
#include <linux/delay.h>
#include <linux/gpio/consumer.h>
#include <linux/module.h>
#include <linux/property.h>
#include <linux/sched/clock.h>
#include <linux/spi/spi.h>
#include <linux/thermal.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_connector.h>
#include <drm/drm_damage_helper.h>
#include <drm/drm_drv.h>
#include <drm/drm_fb_dma_helper.h>
#include <drm/drm_fbdev_generic.h>
#include <drm/drm_format_helper.h>
#include <drm/drm_framebuffer.h>
#include <drm/drm_gem_atomic_helper.h>
#include <drm/drm_gem_dma_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_managed.h>
#include <drm/drm_modes.h>
#include <drm/drm_rect.h>
#include <drm/drm_probe_helper.h>
#include <drm/drm_simple_kms_helper.h>
#define REPAPER_RID_G2_COG_ID 0x12
enum repaper_model {
/* 0 is reserved to avoid clashing with NULL */
E1144CS021 = 1,
E1190CS021,
E2200CS021,
E2271CS021,
};
enum repaper_stage { /* Image pixel -> Display pixel */
REPAPER_COMPENSATE, /* B -> W, W -> B (Current Image) */
REPAPER_WHITE, /* B -> N, W -> W (Current Image) */
REPAPER_INVERSE, /* B -> N, W -> B (New Image) */
REPAPER_NORMAL /* B -> B, W -> W (New Image) */
};
enum repaper_epd_border_byte {
REPAPER_BORDER_BYTE_NONE,
REPAPER_BORDER_BYTE_ZERO,
REPAPER_BORDER_BYTE_SET,
};
struct repaper_epd {
struct drm_device drm;
struct drm_simple_display_pipe pipe;
const struct drm_display_mode *mode;
struct drm_connector connector;
struct spi_device *spi;
struct gpio_desc *panel_on;
struct gpio_desc *border;
struct gpio_desc *discharge;
struct gpio_desc *reset;
struct gpio_desc *busy;
struct thermal_zone_device *thermal;
unsigned int height;
unsigned int width;
unsigned int bytes_per_scan;
const u8 *channel_select;
unsigned int stage_time;
unsigned int factored_stage_time;
bool middle_scan;
bool pre_border_byte;
enum repaper_epd_border_byte border_byte;
u8 *line_buffer;
void *current_frame;
bool cleared;
bool partial;
};
static inline struct repaper_epd *drm_to_epd(struct drm_device *drm)
{
return container_of(drm, struct repaper_epd, drm);
}
static int repaper_spi_transfer(struct spi_device *spi, u8 header,
const void *tx, void *rx, size_t len)
{
void *txbuf = NULL, *rxbuf = NULL;
struct spi_transfer tr[2] = {};
u8 *headerbuf;
int ret;
headerbuf = kmalloc(1, GFP_KERNEL);
if (!headerbuf)
return -ENOMEM;
headerbuf[0] = header;
tr[0].tx_buf = headerbuf;
tr[0].len = 1;
/* Stack allocated tx? */
if (tx && len <= 32) {
txbuf = kmemdup(tx, len, GFP_KERNEL);
if (!txbuf) {
ret = -ENOMEM;
goto out_free;
}
}
if (rx) {
rxbuf = kmalloc(len, GFP_KERNEL);
if (!rxbuf) {
ret = -ENOMEM;
goto out_free;
}
}
tr[1].tx_buf = txbuf ? txbuf : tx;
tr[1].rx_buf = rxbuf;
tr[1].len = len;
ndelay(80);
ret = spi_sync_transfer(spi, tr, 2);
if (rx && !ret)
memcpy(rx, rxbuf, len);
out_free:
kfree(headerbuf);
kfree(txbuf);
kfree(rxbuf);
return ret;
}
static int repaper_write_buf(struct spi_device *spi, u8 reg,
const u8 *buf, size_t len)
{
int ret;
ret = repaper_spi_transfer(spi, 0x70, ®, NULL, 1);
if (ret)
return ret;
return repaper_spi_transfer(spi, 0x72, buf, NULL, len);
}
static int repaper_write_val(struct spi_device *spi, u8 reg, u8 val)
{
return repaper_write_buf(spi, reg, &val, 1);
}
static int repaper_read_val(struct spi_device *spi, u8 reg)
{
int ret;
u8 val;
ret = repaper_spi_transfer(spi, 0x70, ®, NULL, 1);
if (ret)
return ret;
ret = repaper_spi_transfer(spi, 0x73, NULL, &val, 1);
return ret ? ret : val;
}
static int repaper_read_id(struct spi_device *spi)
{
int ret;
u8 id;
ret = repaper_spi_transfer(spi, 0x71, NULL, &id, 1);
return ret ? ret : id;
}
static void repaper_spi_mosi_low(struct spi_device *spi)
{
const u8 buf[1] = { 0 };
spi_write(spi, buf, 1);
}
/* pixels on display are numbered from 1 so even is actually bits 1,3,5,... */
static void repaper_even_pixels(struct repaper_epd *epd, u8 **pp,
const u8 *data, u8 fixed_value, const u8 *mask,
enum repaper_stage stage)
{
unsigned int b;
for (b = 0; b < (epd->width / 8); b++) {
if (data) {
u8 pixels = data[b] & 0xaa;
u8 pixel_mask = 0xff;
u8 p1, p2, p3, p4;
if (mask) {
pixel_mask = (mask[b] ^ pixels) & 0xaa;
pixel_mask |= pixel_mask >> 1;
}
switch (stage) {
case REPAPER_COMPENSATE: /* B -> W, W -> B (Current) */
pixels = 0xaa | ((pixels ^ 0xaa) >> 1);
break;
case REPAPER_WHITE: /* B -> N, W -> W (Current) */
pixels = 0x55 + ((pixels ^ 0xaa) >> 1);
break;
case REPAPER_INVERSE: /* B -> N, W -> B (New) */
pixels = 0x55 | (pixels ^ 0xaa);
break;
case REPAPER_NORMAL: /* B -> B, W -> W (New) */
pixels = 0xaa | (pixels >> 1);
break;
}
pixels = (pixels & pixel_mask) | (~pixel_mask & 0x55);
p1 = (pixels >> 6) & 0x03;
p2 = (pixels >> 4) & 0x03;
p3 = (pixels >> 2) & 0x03;
p4 = (pixels >> 0) & 0x03;
pixels = (p1 << 0) | (p2 << 2) | (p3 << 4) | (p4 << 6);
*(*pp)++ = pixels;
} else {
*(*pp)++ = fixed_value;
}
}
}
/* pixels on display are numbered from 1 so odd is actually bits 0,2,4,... */
static void repaper_odd_pixels(struct repaper_epd *epd, u8 **pp,
const u8 *data, u8 fixed_value, const u8 *mask,
enum repaper_stage stage)
{
unsigned int b;
for (b = epd->width / 8; b > 0; b--) {
if (data) {
u8 pixels = data[b - 1] & 0x55;
u8 pixel_mask = 0xff;
if (mask) {
pixel_mask = (mask[b - 1] ^ pixels) & 0x55;
pixel_mask |= pixel_mask << 1;
}
switch (stage) {
case REPAPER_COMPENSATE: /* B -> W, W -> B (Current) */
pixels = 0xaa | (pixels ^ 0x55);
break;
case REPAPER_WHITE: /* B -> N, W -> W (Current) */
pixels = 0x55 + (pixels ^ 0x55);
break;
case REPAPER_INVERSE: /* B -> N, W -> B (New) */
pixels = 0x55 | ((pixels ^ 0x55) << 1);
break;
case REPAPER_NORMAL: /* B -> B, W -> W (New) */
pixels = 0xaa | pixels;
break;
}
pixels = (pixels & pixel_mask) | (~pixel_mask & 0x55);
*(*pp)++ = pixels;
} else {
*(*pp)++ = fixed_value;
}
}
}
/* interleave bits: (byte)76543210 -> (16 bit).7.6.5.4.3.2.1 */
static inline u16 repaper_interleave_bits(u16 value)
{
value = (value | (value << 4)) & 0x0f0f;
value = (value | (value << 2)) & 0x3333;
value = (value | (value << 1)) & 0x5555;
return value;
}
/* pixels on display are numbered from 1 */
static void repaper_all_pixels(struct repaper_epd *epd, u8 **pp,
const u8 *data, u8 fixed_value, const u8 *mask,
enum repaper_stage stage)
{
unsigned int b;
for (b = epd->width / 8; b > 0; b--) {
if (data) {
u16 pixels = repaper_interleave_bits(data[b - 1]);
u16 pixel_mask = 0xffff;
if (mask) {
pixel_mask = repaper_interleave_bits(mask[b - 1]);
pixel_mask = (pixel_mask ^ pixels) & 0x5555;
pixel_mask |= pixel_mask << 1;
}
switch (stage) {
case REPAPER_COMPENSATE: /* B -> W, W -> B (Current) */
pixels = 0xaaaa | (pixels ^ 0x5555);
break;
case REPAPER_WHITE: /* B -> N, W -> W (Current) */
pixels = 0x5555 + (pixels ^ 0x5555);
break;
case REPAPER_INVERSE: /* B -> N, W -> B (New) */
pixels = 0x5555 | ((pixels ^ 0x5555) << 1);
break;
case REPAPER_NORMAL: /* B -> B, W -> W (New) */
pixels = 0xaaaa | pixels;
break;
}
pixels = (pixels & pixel_mask) | (~pixel_mask & 0x5555);
*(*pp)++ = pixels >> 8;
*(*pp)++ = pixels;
} else {
*(*pp)++ = fixed_value;
*(*pp)++ = fixed_value;
}
}
}
/* output one line of scan and data bytes to the display */
static void repaper_one_line(struct repaper_epd *epd, unsigned int line,
const u8 *data, u8 fixed_value, const u8 *mask,
enum repaper_stage stage)
{
u8 *p = epd->line_buffer;
unsigned int b;
repaper_spi_mosi_low(epd->spi);
if (epd->pre_border_byte)
*p++ = 0x00;
if (epd->middle_scan) {
/* data bytes */
repaper_odd_pixels(epd, &p, data, fixed_value, mask, stage);
/* scan line */
for (b = epd->bytes_per_scan; b > 0; b--) {
if (line / 4 == b - 1)
*p++ = 0x03 << (2 * (line & 0x03));
else
*p++ = 0x00;
}
/* data bytes */
repaper_even_pixels(epd, &p, data, fixed_value, mask, stage);
} else {
/*
* even scan line, but as lines on display are numbered from 1,
* line: 1,3,5,...
*/
for (b = 0; b < epd->bytes_per_scan; b++) {
if (0 != (line & 0x01) && line / 8 == b)
*p++ = 0xc0 >> (line & 0x06);
else
*p++ = 0x00;
}
/* data bytes */
repaper_all_pixels(epd, &p, data, fixed_value, mask, stage);
/*
* odd scan line, but as lines on display are numbered from 1,
* line: 0,2,4,6,...
*/
for (b = epd->bytes_per_scan; b > 0; b--) {
if (0 == (line & 0x01) && line / 8 == b - 1)
*p++ = 0x03 << (line & 0x06);
else
*p++ = 0x00;
}
}
switch (epd->border_byte) {
case REPAPER_BORDER_BYTE_NONE:
break;
case REPAPER_BORDER_BYTE_ZERO:
*p++ = 0x00;
break;
case REPAPER_BORDER_BYTE_SET:
switch (stage) {
case REPAPER_COMPENSATE:
case REPAPER_WHITE:
case REPAPER_INVERSE:
*p++ = 0x00;
break;
case REPAPER_NORMAL:
*p++ = 0xaa;
break;
}
break;
}
repaper_write_buf(epd->spi, 0x0a, epd->line_buffer,
p - epd->line_buffer);
/* Output data to panel */
repaper_write_val(epd->spi, 0x02, 0x07);
repaper_spi_mosi_low(epd->spi);
}
static void repaper_frame_fixed(struct repaper_epd *epd, u8 fixed_value,
enum repaper_stage stage)
{
unsigned int line;
for (line = 0; line < epd->height; line++)
repaper_one_line(epd, line, NULL, fixed_value, NULL, stage);
}
static void repaper_frame_data(struct repaper_epd *epd, const u8 *image,
const u8 *mask, enum repaper_stage stage)
{
unsigned int line;
if (!mask) {
for (line = 0; line < epd->height; line++) {
repaper_one_line(epd, line,
&image[line * (epd->width / 8)],
0, NULL, stage);
}
} else {
for (line = 0; line < epd->height; line++) {
size_t n = line * epd->width / 8;
repaper_one_line(epd, line, &image[n], 0, &mask[n],
stage);
}
}
}
static void repaper_frame_fixed_repeat(struct repaper_epd *epd, u8 fixed_value,
enum repaper_stage stage)
{
u64 start = local_clock();
u64 end = start + (epd->factored_stage_time * 1000 * 1000);
do {
repaper_frame_fixed(epd, fixed_value, stage);
} while (local_clock() < end);
}
static void repaper_frame_data_repeat(struct repaper_epd *epd, const u8 *image,
const u8 *mask, enum repaper_stage stage)
{
u64 start = local_clock();
u64 end = start + (epd->factored_stage_time * 1000 * 1000);
do {
repaper_frame_data(epd, image, mask, stage);
} while (local_clock() < end);
}
static void repaper_get_temperature(struct repaper_epd *epd)
{
int ret, temperature = 0;
unsigned int factor10x;
if (!epd->thermal)
return;
ret = thermal_zone_get_temp(epd->thermal, &temperature);
if (ret) {
DRM_DEV_ERROR(&epd->spi->dev, "Failed to get temperature (%d)\n", ret);
return;
}
temperature /= 1000;
if (temperature <= -10)
factor10x = 170;
else if (temperature <= -5)
factor10x = 120;
else if (temperature <= 5)
factor10x = 80;
else if (temperature <= 10)
factor10x = 40;
else if (temperature <= 15)
factor10x = 30;
else if (temperature <= 20)
factor10x = 20;
else if (temperature <= 40)
factor10x = 10;
else
factor10x = 7;
epd->factored_stage_time = epd->stage_time * factor10x / 10;
}
static int repaper_fb_dirty(struct drm_framebuffer *fb)
{
struct drm_gem_dma_object *dma_obj = drm_fb_dma_get_gem_obj(fb, 0);
struct repaper_epd *epd = drm_to_epd(fb->dev);
unsigned int dst_pitch = 0;
struct iosys_map dst, vmap;
struct drm_rect clip;
int idx, ret = 0;
u8 *buf = NULL;
if (!drm_dev_enter(fb->dev, &idx))
return -ENODEV;
/* repaper can't do partial updates */
clip.x1 = 0;
clip.x2 = fb->width;
clip.y1 = 0;
clip.y2 = fb->height;
repaper_get_temperature(epd);
DRM_DEBUG("Flushing [FB:%d] st=%ums\n", fb->base.id,
epd->factored_stage_time);
buf = kmalloc(fb->width * fb->height / 8, GFP_KERNEL);
if (!buf) {
ret = -ENOMEM;
goto out_exit;
}
ret = drm_gem_fb_begin_cpu_access(fb, DMA_FROM_DEVICE);
if (ret)
goto out_free;
iosys_map_set_vaddr(&dst, buf);
iosys_map_set_vaddr(&vmap, dma_obj->vaddr);
drm_fb_xrgb8888_to_mono(&dst, &dst_pitch, &vmap, fb, &clip);
drm_gem_fb_end_cpu_access(fb, DMA_FROM_DEVICE);
if (epd->partial) {
repaper_frame_data_repeat(epd, buf, epd->current_frame,
REPAPER_NORMAL);
} else if (epd->cleared) {
repaper_frame_data_repeat(epd, epd->current_frame, NULL,
REPAPER_COMPENSATE);
repaper_frame_data_repeat(epd, epd->current_frame, NULL,
REPAPER_WHITE);
repaper_frame_data_repeat(epd, buf, NULL, REPAPER_INVERSE);
repaper_frame_data_repeat(epd, buf, NULL, REPAPER_NORMAL);
epd->partial = true;
} else {
/* Clear display (anything -> white) */
repaper_frame_fixed_repeat(epd, 0xff, REPAPER_COMPENSATE);
repaper_frame_fixed_repeat(epd, 0xff, REPAPER_WHITE);
repaper_frame_fixed_repeat(epd, 0xaa, REPAPER_INVERSE);
repaper_frame_fixed_repeat(epd, 0xaa, REPAPER_NORMAL);
/* Assuming a clear (white) screen output an image */
repaper_frame_fixed_repeat(epd, 0xaa, REPAPER_COMPENSATE);
repaper_frame_fixed_repeat(epd, 0xaa, REPAPER_WHITE);
repaper_frame_data_repeat(epd, buf, NULL, REPAPER_INVERSE);
repaper_frame_data_repeat(epd, buf, NULL, REPAPER_NORMAL);
epd->cleared = true;
epd->partial = true;
}
memcpy(epd->current_frame, buf, fb->width * fb->height / 8);
/*
* An extra frame write is needed if pixels are set in the bottom line,
* or else grey lines rises up from the pixels
*/
if (epd->pre_border_byte) {
unsigned int x;
for (x = 0; x < (fb->width / 8); x++)
if (buf[x + (fb->width * (fb->height - 1) / 8)]) {
repaper_frame_data_repeat(epd, buf,
epd->current_frame,
REPAPER_NORMAL);
break;
}
}
out_free:
kfree(buf);
out_exit:
drm_dev_exit(idx);
return ret;
}
static void power_off(struct repaper_epd *epd)
{
/* Turn off power and all signals */
gpiod_set_value_cansleep(epd->reset, 0);
gpiod_set_value_cansleep(epd->panel_on, 0);
if (epd->border)
gpiod_set_value_cansleep(epd->border, 0);
/* Ensure SPI MOSI and CLOCK are Low before CS Low */
repaper_spi_mosi_low(epd->spi);
/* Discharge pulse */
gpiod_set_value_cansleep(epd->discharge, 1);
msleep(150);
gpiod_set_value_cansleep(epd->discharge, 0);
}
static enum drm_mode_status repaper_pipe_mode_valid(struct drm_simple_display_pipe *pipe,
const struct drm_display_mode *mode)
{
struct drm_crtc *crtc = &pipe->crtc;
struct repaper_epd *epd = drm_to_epd(crtc->dev);
return drm_crtc_helper_mode_valid_fixed(crtc, mode, epd->mode);
}
static void repaper_pipe_enable(struct drm_simple_display_pipe *pipe,
struct drm_crtc_state *crtc_state,
struct drm_plane_state *plane_state)
{
struct repaper_epd *epd = drm_to_epd(pipe->crtc.dev);
struct spi_device *spi = epd->spi;
struct device *dev = &spi->dev;
bool dc_ok = false;
int i, ret, idx;
if (!drm_dev_enter(pipe->crtc.dev, &idx))
return;
DRM_DEBUG_DRIVER("\n");
/* Power up sequence */
gpiod_set_value_cansleep(epd->reset, 0);
gpiod_set_value_cansleep(epd->panel_on, 0);
gpiod_set_value_cansleep(epd->discharge, 0);
if (epd->border)
gpiod_set_value_cansleep(epd->border, 0);
repaper_spi_mosi_low(spi);
usleep_range(5000, 10000);
gpiod_set_value_cansleep(epd->panel_on, 1);
/*
* This delay comes from the repaper.org userspace driver, it's not
* mentioned in the datasheet.
*/
usleep_range(10000, 15000);
gpiod_set_value_cansleep(epd->reset, 1);
if (epd->border)
gpiod_set_value_cansleep(epd->border, 1);
usleep_range(5000, 10000);
gpiod_set_value_cansleep(epd->reset, 0);
usleep_range(5000, 10000);
gpiod_set_value_cansleep(epd->reset, 1);
usleep_range(5000, 10000);
/* Wait for COG to become ready */
for (i = 100; i > 0; i--) {
if (!gpiod_get_value_cansleep(epd->busy))
break;
usleep_range(10, 100);
}
if (!i) {
DRM_DEV_ERROR(dev, "timeout waiting for panel to become ready.\n");
power_off(epd);
goto out_exit;
}
repaper_read_id(spi);
ret = repaper_read_id(spi);
if (ret != REPAPER_RID_G2_COG_ID) {
if (ret < 0)
dev_err(dev, "failed to read chip (%d)\n", ret);
else
dev_err(dev, "wrong COG ID 0x%02x\n", ret);
power_off(epd);
goto out_exit;
}
/* Disable OE */
repaper_write_val(spi, 0x02, 0x40);
ret = repaper_read_val(spi, 0x0f);
if (ret < 0 || !(ret & 0x80)) {
if (ret < 0)
DRM_DEV_ERROR(dev, "failed to read chip (%d)\n", ret);
else
DRM_DEV_ERROR(dev, "panel is reported broken\n");
power_off(epd);
goto out_exit;
}
/* Power saving mode */
repaper_write_val(spi, 0x0b, 0x02);
/* Channel select */
repaper_write_buf(spi, 0x01, epd->channel_select, 8);
/* High power mode osc */
repaper_write_val(spi, 0x07, 0xd1);
/* Power setting */
repaper_write_val(spi, 0x08, 0x02);
/* Vcom level */
repaper_write_val(spi, 0x09, 0xc2);
/* Power setting */
repaper_write_val(spi, 0x04, 0x03);
/* Driver latch on */
repaper_write_val(spi, 0x03, 0x01);
/* Driver latch off */
repaper_write_val(spi, 0x03, 0x00);
usleep_range(5000, 10000);
/* Start chargepump */
for (i = 0; i < 4; ++i) {
/* Charge pump positive voltage on - VGH/VDL on */
repaper_write_val(spi, 0x05, 0x01);
msleep(240);
/* Charge pump negative voltage on - VGL/VDL on */
repaper_write_val(spi, 0x05, 0x03);
msleep(40);
/* Charge pump Vcom on - Vcom driver on */
repaper_write_val(spi, 0x05, 0x0f);
msleep(40);
/* check DC/DC */
ret = repaper_read_val(spi, 0x0f);
if (ret < 0) {
DRM_DEV_ERROR(dev, "failed to read chip (%d)\n", ret);
power_off(epd);
goto out_exit;
}
if (ret & 0x40) {
dc_ok = true;
break;
}
}
if (!dc_ok) {
DRM_DEV_ERROR(dev, "dc/dc failed\n");
power_off(epd);
goto out_exit;
}
/*
* Output enable to disable
* The userspace driver sets this to 0x04, but the datasheet says 0x06
*/
repaper_write_val(spi, 0x02, 0x04);
epd->partial = false;
out_exit:
drm_dev_exit(idx);
}
static void repaper_pipe_disable(struct drm_simple_display_pipe *pipe)
{
struct repaper_epd *epd = drm_to_epd(pipe->crtc.dev);
struct spi_device *spi = epd->spi;
unsigned int line;
/*
* This callback is not protected by drm_dev_enter/exit since we want to
* turn off the display on regular driver unload. It's highly unlikely
* that the underlying SPI controller is gone should this be called after
* unplug.
*/
DRM_DEBUG_DRIVER("\n");
/* Nothing frame */
for (line = 0; line < epd->height; line++)
repaper_one_line(epd, 0x7fffu, NULL, 0x00, NULL,
REPAPER_COMPENSATE);
/* 2.7" */
if (epd->border) {
/* Dummy line */
repaper_one_line(epd, 0x7fffu, NULL, 0x00, NULL,
REPAPER_COMPENSATE);
msleep(25);
gpiod_set_value_cansleep(epd->border, 0);
msleep(200);
gpiod_set_value_cansleep(epd->border, 1);
} else {
/* Border dummy line */
repaper_one_line(epd, 0x7fffu, NULL, 0x00, NULL,
REPAPER_NORMAL);
msleep(200);
}
/* not described in datasheet */
repaper_write_val(spi, 0x0b, 0x00);
/* Latch reset turn on */
repaper_write_val(spi, 0x03, 0x01);
/* Power off charge pump Vcom */
repaper_write_val(spi, 0x05, 0x03);
/* Power off charge pump neg voltage */
repaper_write_val(spi, 0x05, 0x01);
msleep(120);
/* Discharge internal */
repaper_write_val(spi, 0x04, 0x80);
/* turn off all charge pumps */
repaper_write_val(spi, 0x05, 0x00);
/* Turn off osc */
repaper_write_val(spi, 0x07, 0x01);
msleep(50);
power_off(epd);
}
static void repaper_pipe_update(struct drm_simple_display_pipe *pipe,
struct drm_plane_state *old_state)
{
struct drm_plane_state *state = pipe->plane.state;
struct drm_rect rect;
if (!pipe->crtc.state->active)
return;
if (drm_atomic_helper_damage_merged(old_state, state, &rect))
repaper_fb_dirty(state->fb);
}
static const struct drm_simple_display_pipe_funcs repaper_pipe_funcs = {
.mode_valid = repaper_pipe_mode_valid,
.enable = repaper_pipe_enable,
.disable = repaper_pipe_disable,
.update = repaper_pipe_update,
};
static int repaper_connector_get_modes(struct drm_connector *connector)
{
struct repaper_epd *epd = drm_to_epd(connector->dev);
return drm_connector_helper_get_modes_fixed(connector, epd->mode);
}
static const struct drm_connector_helper_funcs repaper_connector_hfuncs = {
.get_modes = repaper_connector_get_modes,
};
static const struct drm_connector_funcs repaper_connector_funcs = {
.reset = drm_atomic_helper_connector_reset,
.fill_modes = drm_helper_probe_single_connector_modes,
.destroy = drm_connector_cleanup,
.atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
};
static const struct drm_mode_config_funcs repaper_mode_config_funcs = {
.fb_create = drm_gem_fb_create_with_dirty,
.atomic_check = drm_atomic_helper_check,
.atomic_commit = drm_atomic_helper_commit,
};
static const uint32_t repaper_formats[] = {
DRM_FORMAT_XRGB8888,
};
static const struct drm_display_mode repaper_e1144cs021_mode = {
DRM_SIMPLE_MODE(128, 96, 29, 22),
};
static const u8 repaper_e1144cs021_cs[] = { 0x00, 0x00, 0x00, 0x00,
0x00, 0x0f, 0xff, 0x00 };
static const struct drm_display_mode repaper_e1190cs021_mode = {
DRM_SIMPLE_MODE(144, 128, 36, 32),
};
static const u8 repaper_e1190cs021_cs[] = { 0x00, 0x00, 0x00, 0x03,
0xfc, 0x00, 0x00, 0xff };
static const struct drm_display_mode repaper_e2200cs021_mode = {
DRM_SIMPLE_MODE(200, 96, 46, 22),
};
static const u8 repaper_e2200cs021_cs[] = { 0x00, 0x00, 0x00, 0x00,
0x01, 0xff, 0xe0, 0x00 };
static const struct drm_display_mode repaper_e2271cs021_mode = {
DRM_SIMPLE_MODE(264, 176, 57, 38),
};
static const u8 repaper_e2271cs021_cs[] = { 0x00, 0x00, 0x00, 0x7f,
0xff, 0xfe, 0x00, 0x00 };
DEFINE_DRM_GEM_DMA_FOPS(repaper_fops);
static const struct drm_driver repaper_driver = {
.driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC,
.fops = &repaper_fops,
DRM_GEM_DMA_DRIVER_OPS_VMAP,
.name = "repaper",
.desc = "Pervasive Displays RePaper e-ink panels",
.date = "20170405",
.major = 1,
.minor = 0,
};
static const struct of_device_id repaper_of_match[] = {
{ .compatible = "pervasive,e1144cs021", .data = (void *)E1144CS021 },
{ .compatible = "pervasive,e1190cs021", .data = (void *)E1190CS021 },
{ .compatible = "pervasive,e2200cs021", .data = (void *)E2200CS021 },
{ .compatible = "pervasive,e2271cs021", .data = (void *)E2271CS021 },
{},
};
MODULE_DEVICE_TABLE(of, repaper_of_match);
static const struct spi_device_id repaper_id[] = {
{ "e1144cs021", E1144CS021 },
{ "e1190cs021", E1190CS021 },
{ "e2200cs021", E2200CS021 },
{ "e2271cs021", E2271CS021 },
{ },
};
MODULE_DEVICE_TABLE(spi, repaper_id);
static int repaper_probe(struct spi_device *spi)
{
const struct drm_display_mode *mode;
const struct spi_device_id *spi_id;
struct device *dev = &spi->dev;
enum repaper_model model;
const char *thermal_zone;
struct repaper_epd *epd;
size_t line_buffer_size;
struct drm_device *drm;
const void *match;
int ret;
match = device_get_match_data(dev);
if (match) {
model = (enum repaper_model)match;
} else {
spi_id = spi_get_device_id(spi);
model = (enum repaper_model)spi_id->driver_data;
}
/* The SPI device is used to allocate dma memory */
if (!dev->coherent_dma_mask) {
ret = dma_coerce_mask_and_coherent(dev, DMA_BIT_MASK(32));
if (ret) {
dev_warn(dev, "Failed to set dma mask %d\n", ret);
return ret;
}
}
epd = devm_drm_dev_alloc(dev, &repaper_driver,
struct repaper_epd, drm);
if (IS_ERR(epd))
return PTR_ERR(epd);
drm = &epd->drm;
ret = drmm_mode_config_init(drm);
if (ret)
return ret;
drm->mode_config.funcs = &repaper_mode_config_funcs;
epd->spi = spi;
epd->panel_on = devm_gpiod_get(dev, "panel-on", GPIOD_OUT_LOW);
if (IS_ERR(epd->panel_on)) {
ret = PTR_ERR(epd->panel_on);
if (ret != -EPROBE_DEFER)
DRM_DEV_ERROR(dev, "Failed to get gpio 'panel-on'\n");
return ret;
}
epd->discharge = devm_gpiod_get(dev, "discharge", GPIOD_OUT_LOW);
if (IS_ERR(epd->discharge)) {
ret = PTR_ERR(epd->discharge);
if (ret != -EPROBE_DEFER)
DRM_DEV_ERROR(dev, "Failed to get gpio 'discharge'\n");
return ret;
}
epd->reset = devm_gpiod_get(dev, "reset", GPIOD_OUT_LOW);
if (IS_ERR(epd->reset)) {
ret = PTR_ERR(epd->reset);
if (ret != -EPROBE_DEFER)
DRM_DEV_ERROR(dev, "Failed to get gpio 'reset'\n");
return ret;
}
epd->busy = devm_gpiod_get(dev, "busy", GPIOD_IN);
if (IS_ERR(epd->busy)) {
ret = PTR_ERR(epd->busy);
if (ret != -EPROBE_DEFER)
DRM_DEV_ERROR(dev, "Failed to get gpio 'busy'\n");
return ret;
}
if (!device_property_read_string(dev, "pervasive,thermal-zone",
&thermal_zone)) {
epd->thermal = thermal_zone_get_zone_by_name(thermal_zone);
if (IS_ERR(epd->thermal)) {
DRM_DEV_ERROR(dev, "Failed to get thermal zone: %s\n", thermal_zone);
return PTR_ERR(epd->thermal);
}
}
switch (model) {
case E1144CS021:
mode = &repaper_e1144cs021_mode;
epd->channel_select = repaper_e1144cs021_cs;
epd->stage_time = 480;
epd->bytes_per_scan = 96 / 4;
epd->middle_scan = true; /* data-scan-data */
epd->pre_border_byte = false;
epd->border_byte = REPAPER_BORDER_BYTE_ZERO;
break;
case E1190CS021:
mode = &repaper_e1190cs021_mode;
epd->channel_select = repaper_e1190cs021_cs;
epd->stage_time = 480;
epd->bytes_per_scan = 128 / 4 / 2;
epd->middle_scan = false; /* scan-data-scan */
epd->pre_border_byte = false;
epd->border_byte = REPAPER_BORDER_BYTE_SET;
break;
case E2200CS021:
mode = &repaper_e2200cs021_mode;
epd->channel_select = repaper_e2200cs021_cs;
epd->stage_time = 480;
epd->bytes_per_scan = 96 / 4;
epd->middle_scan = true; /* data-scan-data */
epd->pre_border_byte = true;
epd->border_byte = REPAPER_BORDER_BYTE_NONE;
break;
case E2271CS021:
epd->border = devm_gpiod_get(dev, "border", GPIOD_OUT_LOW);
if (IS_ERR(epd->border)) {
ret = PTR_ERR(epd->border);
if (ret != -EPROBE_DEFER)
DRM_DEV_ERROR(dev, "Failed to get gpio 'border'\n");
return ret;
}
mode = &repaper_e2271cs021_mode;
epd->channel_select = repaper_e2271cs021_cs;
epd->stage_time = 630;
epd->bytes_per_scan = 176 / 4;
epd->middle_scan = true; /* data-scan-data */
epd->pre_border_byte = true;
epd->border_byte = REPAPER_BORDER_BYTE_NONE;
break;
default:
return -ENODEV;
}
epd->mode = mode;
epd->width = mode->hdisplay;
epd->height = mode->vdisplay;
epd->factored_stage_time = epd->stage_time;
line_buffer_size = 2 * epd->width / 8 + epd->bytes_per_scan + 2;
epd->line_buffer = devm_kzalloc(dev, line_buffer_size, GFP_KERNEL);
if (!epd->line_buffer)
return -ENOMEM;
epd->current_frame = devm_kzalloc(dev, epd->width * epd->height / 8,
GFP_KERNEL);
if (!epd->current_frame)
return -ENOMEM;
drm->mode_config.min_width = mode->hdisplay;
drm->mode_config.max_width = mode->hdisplay;
drm->mode_config.min_height = mode->vdisplay;
drm->mode_config.max_height = mode->vdisplay;
drm_connector_helper_add(&epd->connector, &repaper_connector_hfuncs);
ret = drm_connector_init(drm, &epd->connector, &repaper_connector_funcs,
DRM_MODE_CONNECTOR_SPI);
if (ret)
return ret;
ret = drm_simple_display_pipe_init(drm, &epd->pipe, &repaper_pipe_funcs,
repaper_formats, ARRAY_SIZE(repaper_formats),
NULL, &epd->connector);
if (ret)
return ret;
drm_mode_config_reset(drm);
ret = drm_dev_register(drm, 0);
if (ret)
return ret;
spi_set_drvdata(spi, drm);
DRM_DEBUG_DRIVER("SPI speed: %uMHz\n", spi->max_speed_hz / 1000000);
drm_fbdev_generic_setup(drm, 0);
return 0;
}
static void repaper_remove(struct spi_device *spi)
{
struct drm_device *drm = spi_get_drvdata(spi);
drm_dev_unplug(drm);
drm_atomic_helper_shutdown(drm);
}
static void repaper_shutdown(struct spi_device *spi)
{
drm_atomic_helper_shutdown(spi_get_drvdata(spi));
}
static struct spi_driver repaper_spi_driver = {
.driver = {
.name = "repaper",
.of_match_table = repaper_of_match,
},
.id_table = repaper_id,
.probe = repaper_probe,
.remove = repaper_remove,
.shutdown = repaper_shutdown,
};
module_spi_driver(repaper_spi_driver);
MODULE_DESCRIPTION("Pervasive Displays RePaper DRM driver");
MODULE_AUTHOR("Noralf Trønnes");
MODULE_LICENSE("GPL");
| linux-master | drivers/gpu/drm/tiny/repaper.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* DRM driver for the HX8357D LCD controller
*
* Copyright 2018 Broadcom
* Copyright 2018 David Lechner <[email protected]>
* Copyright 2016 Noralf Trønnes
* Copyright (C) 2015 Adafruit Industries
* Copyright (C) 2013 Christian Vogelgsang
*/
#include <linux/backlight.h>
#include <linux/delay.h>
#include <linux/gpio/consumer.h>
#include <linux/module.h>
#include <linux/property.h>
#include <linux/spi/spi.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_drv.h>
#include <drm/drm_fbdev_generic.h>
#include <drm/drm_gem_atomic_helper.h>
#include <drm/drm_gem_dma_helper.h>
#include <drm/drm_managed.h>
#include <drm/drm_mipi_dbi.h>
#include <drm/drm_modeset_helper.h>
#include <video/mipi_display.h>
#define HX8357D_SETOSC 0xb0
#define HX8357D_SETPOWER 0xb1
#define HX8357D_SETRGB 0xb3
#define HX8357D_SETCYC 0xb3
#define HX8357D_SETCOM 0xb6
#define HX8357D_SETEXTC 0xb9
#define HX8357D_SETSTBA 0xc0
#define HX8357D_SETPANEL 0xcc
#define HX8357D_SETGAMMA 0xe0
#define HX8357D_MADCTL_MY 0x80
#define HX8357D_MADCTL_MX 0x40
#define HX8357D_MADCTL_MV 0x20
#define HX8357D_MADCTL_ML 0x10
#define HX8357D_MADCTL_RGB 0x00
#define HX8357D_MADCTL_BGR 0x08
#define HX8357D_MADCTL_MH 0x04
static void yx240qv29_enable(struct drm_simple_display_pipe *pipe,
struct drm_crtc_state *crtc_state,
struct drm_plane_state *plane_state)
{
struct mipi_dbi_dev *dbidev = drm_to_mipi_dbi_dev(pipe->crtc.dev);
struct mipi_dbi *dbi = &dbidev->dbi;
u8 addr_mode;
int ret, idx;
if (!drm_dev_enter(pipe->crtc.dev, &idx))
return;
DRM_DEBUG_KMS("\n");
ret = mipi_dbi_poweron_conditional_reset(dbidev);
if (ret < 0)
goto out_exit;
if (ret == 1)
goto out_enable;
/* setextc */
mipi_dbi_command(dbi, HX8357D_SETEXTC, 0xFF, 0x83, 0x57);
msleep(150);
/* setRGB which also enables SDO */
mipi_dbi_command(dbi, HX8357D_SETRGB, 0x00, 0x00, 0x06, 0x06);
/* -1.52V */
mipi_dbi_command(dbi, HX8357D_SETCOM, 0x25);
/* Normal mode 70Hz, Idle mode 55 Hz */
mipi_dbi_command(dbi, HX8357D_SETOSC, 0x68);
/* Set Panel - BGR, Gate direction swapped */
mipi_dbi_command(dbi, HX8357D_SETPANEL, 0x05);
mipi_dbi_command(dbi, HX8357D_SETPOWER,
0x00, /* Not deep standby */
0x15, /* BT */
0x1C, /* VSPR */
0x1C, /* VSNR */
0x83, /* AP */
0xAA); /* FS */
mipi_dbi_command(dbi, HX8357D_SETSTBA,
0x50, /* OPON normal */
0x50, /* OPON idle */
0x01, /* STBA */
0x3C, /* STBA */
0x1E, /* STBA */
0x08); /* GEN */
mipi_dbi_command(dbi, HX8357D_SETCYC,
0x02, /* NW 0x02 */
0x40, /* RTN */
0x00, /* DIV */
0x2A, /* DUM */
0x2A, /* DUM */
0x0D, /* GDON */
0x78); /* GDOFF */
mipi_dbi_command(dbi, HX8357D_SETGAMMA,
0x02,
0x0A,
0x11,
0x1d,
0x23,
0x35,
0x41,
0x4b,
0x4b,
0x42,
0x3A,
0x27,
0x1B,
0x08,
0x09,
0x03,
0x02,
0x0A,
0x11,
0x1d,
0x23,
0x35,
0x41,
0x4b,
0x4b,
0x42,
0x3A,
0x27,
0x1B,
0x08,
0x09,
0x03,
0x00,
0x01);
/* 16 bit */
mipi_dbi_command(dbi, MIPI_DCS_SET_PIXEL_FORMAT,
MIPI_DCS_PIXEL_FMT_16BIT);
/* TE off */
mipi_dbi_command(dbi, MIPI_DCS_SET_TEAR_ON, 0x00);
/* tear line */
mipi_dbi_command(dbi, MIPI_DCS_SET_TEAR_SCANLINE, 0x00, 0x02);
/* Exit Sleep */
mipi_dbi_command(dbi, MIPI_DCS_EXIT_SLEEP_MODE);
msleep(150);
/* display on */
mipi_dbi_command(dbi, MIPI_DCS_SET_DISPLAY_ON);
usleep_range(5000, 7000);
out_enable:
switch (dbidev->rotation) {
default:
addr_mode = HX8357D_MADCTL_MX | HX8357D_MADCTL_MY;
break;
case 90:
addr_mode = HX8357D_MADCTL_MV | HX8357D_MADCTL_MY;
break;
case 180:
addr_mode = 0;
break;
case 270:
addr_mode = HX8357D_MADCTL_MV | HX8357D_MADCTL_MX;
break;
}
mipi_dbi_command(dbi, MIPI_DCS_SET_ADDRESS_MODE, addr_mode);
mipi_dbi_enable_flush(dbidev, crtc_state, plane_state);
out_exit:
drm_dev_exit(idx);
}
static const struct drm_simple_display_pipe_funcs hx8357d_pipe_funcs = {
DRM_MIPI_DBI_SIMPLE_DISPLAY_PIPE_FUNCS(yx240qv29_enable),
};
static const struct drm_display_mode yx350hv15_mode = {
DRM_SIMPLE_MODE(320, 480, 60, 75),
};
DEFINE_DRM_GEM_DMA_FOPS(hx8357d_fops);
static const struct drm_driver hx8357d_driver = {
.driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC,
.fops = &hx8357d_fops,
DRM_GEM_DMA_DRIVER_OPS_VMAP,
.debugfs_init = mipi_dbi_debugfs_init,
.name = "hx8357d",
.desc = "HX8357D",
.date = "20181023",
.major = 1,
.minor = 0,
};
static const struct of_device_id hx8357d_of_match[] = {
{ .compatible = "adafruit,yx350hv15" },
{ }
};
MODULE_DEVICE_TABLE(of, hx8357d_of_match);
static const struct spi_device_id hx8357d_id[] = {
{ "yx350hv15", 0 },
{ }
};
MODULE_DEVICE_TABLE(spi, hx8357d_id);
static int hx8357d_probe(struct spi_device *spi)
{
struct device *dev = &spi->dev;
struct mipi_dbi_dev *dbidev;
struct drm_device *drm;
struct gpio_desc *dc;
u32 rotation = 0;
int ret;
dbidev = devm_drm_dev_alloc(dev, &hx8357d_driver,
struct mipi_dbi_dev, drm);
if (IS_ERR(dbidev))
return PTR_ERR(dbidev);
drm = &dbidev->drm;
dc = devm_gpiod_get(dev, "dc", GPIOD_OUT_LOW);
if (IS_ERR(dc))
return dev_err_probe(dev, PTR_ERR(dc), "Failed to get GPIO 'dc'\n");
dbidev->backlight = devm_of_find_backlight(dev);
if (IS_ERR(dbidev->backlight))
return PTR_ERR(dbidev->backlight);
device_property_read_u32(dev, "rotation", &rotation);
ret = mipi_dbi_spi_init(spi, &dbidev->dbi, dc);
if (ret)
return ret;
ret = mipi_dbi_dev_init(dbidev, &hx8357d_pipe_funcs, &yx350hv15_mode, rotation);
if (ret)
return ret;
drm_mode_config_reset(drm);
ret = drm_dev_register(drm, 0);
if (ret)
return ret;
spi_set_drvdata(spi, drm);
drm_fbdev_generic_setup(drm, 0);
return 0;
}
static void hx8357d_remove(struct spi_device *spi)
{
struct drm_device *drm = spi_get_drvdata(spi);
drm_dev_unplug(drm);
drm_atomic_helper_shutdown(drm);
}
static void hx8357d_shutdown(struct spi_device *spi)
{
drm_atomic_helper_shutdown(spi_get_drvdata(spi));
}
static struct spi_driver hx8357d_spi_driver = {
.driver = {
.name = "hx8357d",
.of_match_table = hx8357d_of_match,
},
.id_table = hx8357d_id,
.probe = hx8357d_probe,
.remove = hx8357d_remove,
.shutdown = hx8357d_shutdown,
};
module_spi_driver(hx8357d_spi_driver);
MODULE_DESCRIPTION("HX8357D DRM driver");
MODULE_AUTHOR("Eric Anholt <[email protected]>");
MODULE_LICENSE("GPL");
| linux-master | drivers/gpu/drm/tiny/hx8357d.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* DRM driver for Multi-Inno MI0283QT panels
*
* Copyright 2016 Noralf Trønnes
*/
#include <linux/backlight.h>
#include <linux/delay.h>
#include <linux/gpio/consumer.h>
#include <linux/module.h>
#include <linux/property.h>
#include <linux/regulator/consumer.h>
#include <linux/spi/spi.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_drv.h>
#include <drm/drm_fbdev_generic.h>
#include <drm/drm_gem_atomic_helper.h>
#include <drm/drm_gem_dma_helper.h>
#include <drm/drm_managed.h>
#include <drm/drm_mipi_dbi.h>
#include <drm/drm_modeset_helper.h>
#include <video/mipi_display.h>
#define ILI9341_FRMCTR1 0xb1
#define ILI9341_DISCTRL 0xb6
#define ILI9341_ETMOD 0xb7
#define ILI9341_PWCTRL1 0xc0
#define ILI9341_PWCTRL2 0xc1
#define ILI9341_VMCTRL1 0xc5
#define ILI9341_VMCTRL2 0xc7
#define ILI9341_PWCTRLA 0xcb
#define ILI9341_PWCTRLB 0xcf
#define ILI9341_PGAMCTRL 0xe0
#define ILI9341_NGAMCTRL 0xe1
#define ILI9341_DTCTRLA 0xe8
#define ILI9341_DTCTRLB 0xea
#define ILI9341_PWRSEQ 0xed
#define ILI9341_EN3GAM 0xf2
#define ILI9341_PUMPCTRL 0xf7
#define ILI9341_MADCTL_BGR BIT(3)
#define ILI9341_MADCTL_MV BIT(5)
#define ILI9341_MADCTL_MX BIT(6)
#define ILI9341_MADCTL_MY BIT(7)
static void mi0283qt_enable(struct drm_simple_display_pipe *pipe,
struct drm_crtc_state *crtc_state,
struct drm_plane_state *plane_state)
{
struct mipi_dbi_dev *dbidev = drm_to_mipi_dbi_dev(pipe->crtc.dev);
struct mipi_dbi *dbi = &dbidev->dbi;
u8 addr_mode;
int ret, idx;
if (!drm_dev_enter(pipe->crtc.dev, &idx))
return;
DRM_DEBUG_KMS("\n");
ret = mipi_dbi_poweron_conditional_reset(dbidev);
if (ret < 0)
goto out_exit;
if (ret == 1)
goto out_enable;
mipi_dbi_command(dbi, MIPI_DCS_SET_DISPLAY_OFF);
mipi_dbi_command(dbi, ILI9341_PWCTRLB, 0x00, 0x83, 0x30);
mipi_dbi_command(dbi, ILI9341_PWRSEQ, 0x64, 0x03, 0x12, 0x81);
mipi_dbi_command(dbi, ILI9341_DTCTRLA, 0x85, 0x01, 0x79);
mipi_dbi_command(dbi, ILI9341_PWCTRLA, 0x39, 0x2c, 0x00, 0x34, 0x02);
mipi_dbi_command(dbi, ILI9341_PUMPCTRL, 0x20);
mipi_dbi_command(dbi, ILI9341_DTCTRLB, 0x00, 0x00);
/* Power Control */
mipi_dbi_command(dbi, ILI9341_PWCTRL1, 0x26);
mipi_dbi_command(dbi, ILI9341_PWCTRL2, 0x11);
/* VCOM */
mipi_dbi_command(dbi, ILI9341_VMCTRL1, 0x35, 0x3e);
mipi_dbi_command(dbi, ILI9341_VMCTRL2, 0xbe);
/* Memory Access Control */
mipi_dbi_command(dbi, MIPI_DCS_SET_PIXEL_FORMAT, MIPI_DCS_PIXEL_FMT_16BIT);
/* Frame Rate */
mipi_dbi_command(dbi, ILI9341_FRMCTR1, 0x00, 0x1b);
/* Gamma */
mipi_dbi_command(dbi, ILI9341_EN3GAM, 0x08);
mipi_dbi_command(dbi, MIPI_DCS_SET_GAMMA_CURVE, 0x01);
mipi_dbi_command(dbi, ILI9341_PGAMCTRL,
0x1f, 0x1a, 0x18, 0x0a, 0x0f, 0x06, 0x45, 0x87,
0x32, 0x0a, 0x07, 0x02, 0x07, 0x05, 0x00);
mipi_dbi_command(dbi, ILI9341_NGAMCTRL,
0x00, 0x25, 0x27, 0x05, 0x10, 0x09, 0x3a, 0x78,
0x4d, 0x05, 0x18, 0x0d, 0x38, 0x3a, 0x1f);
/* DDRAM */
mipi_dbi_command(dbi, ILI9341_ETMOD, 0x07);
/* Display */
mipi_dbi_command(dbi, ILI9341_DISCTRL, 0x0a, 0x82, 0x27, 0x00);
mipi_dbi_command(dbi, MIPI_DCS_EXIT_SLEEP_MODE);
msleep(100);
mipi_dbi_command(dbi, MIPI_DCS_SET_DISPLAY_ON);
msleep(100);
out_enable:
/* The PiTFT (ili9340) has a hardware reset circuit that
* resets only on power-on and not on each reboot through
* a gpio like the rpi-display does.
* As a result, we need to always apply the rotation value
* regardless of the display "on/off" state.
*/
switch (dbidev->rotation) {
default:
addr_mode = ILI9341_MADCTL_MV | ILI9341_MADCTL_MY |
ILI9341_MADCTL_MX;
break;
case 90:
addr_mode = ILI9341_MADCTL_MY;
break;
case 180:
addr_mode = ILI9341_MADCTL_MV;
break;
case 270:
addr_mode = ILI9341_MADCTL_MX;
break;
}
addr_mode |= ILI9341_MADCTL_BGR;
mipi_dbi_command(dbi, MIPI_DCS_SET_ADDRESS_MODE, addr_mode);
mipi_dbi_enable_flush(dbidev, crtc_state, plane_state);
out_exit:
drm_dev_exit(idx);
}
static const struct drm_simple_display_pipe_funcs mi0283qt_pipe_funcs = {
DRM_MIPI_DBI_SIMPLE_DISPLAY_PIPE_FUNCS(mi0283qt_enable),
};
static const struct drm_display_mode mi0283qt_mode = {
DRM_SIMPLE_MODE(320, 240, 58, 43),
};
DEFINE_DRM_GEM_DMA_FOPS(mi0283qt_fops);
static const struct drm_driver mi0283qt_driver = {
.driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC,
.fops = &mi0283qt_fops,
DRM_GEM_DMA_DRIVER_OPS_VMAP,
.debugfs_init = mipi_dbi_debugfs_init,
.name = "mi0283qt",
.desc = "Multi-Inno MI0283QT",
.date = "20160614",
.major = 1,
.minor = 0,
};
static const struct of_device_id mi0283qt_of_match[] = {
{ .compatible = "multi-inno,mi0283qt" },
{},
};
MODULE_DEVICE_TABLE(of, mi0283qt_of_match);
static const struct spi_device_id mi0283qt_id[] = {
{ "mi0283qt", 0 },
{ },
};
MODULE_DEVICE_TABLE(spi, mi0283qt_id);
static int mi0283qt_probe(struct spi_device *spi)
{
struct device *dev = &spi->dev;
struct mipi_dbi_dev *dbidev;
struct drm_device *drm;
struct mipi_dbi *dbi;
struct gpio_desc *dc;
u32 rotation = 0;
int ret;
dbidev = devm_drm_dev_alloc(dev, &mi0283qt_driver,
struct mipi_dbi_dev, drm);
if (IS_ERR(dbidev))
return PTR_ERR(dbidev);
dbi = &dbidev->dbi;
drm = &dbidev->drm;
dbi->reset = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_HIGH);
if (IS_ERR(dbi->reset))
return dev_err_probe(dev, PTR_ERR(dbi->reset), "Failed to get GPIO 'reset'\n");
dc = devm_gpiod_get_optional(dev, "dc", GPIOD_OUT_LOW);
if (IS_ERR(dc))
return dev_err_probe(dev, PTR_ERR(dc), "Failed to get GPIO 'dc'\n");
dbidev->regulator = devm_regulator_get(dev, "power");
if (IS_ERR(dbidev->regulator))
return PTR_ERR(dbidev->regulator);
dbidev->backlight = devm_of_find_backlight(dev);
if (IS_ERR(dbidev->backlight))
return PTR_ERR(dbidev->backlight);
device_property_read_u32(dev, "rotation", &rotation);
ret = mipi_dbi_spi_init(spi, dbi, dc);
if (ret)
return ret;
ret = mipi_dbi_dev_init(dbidev, &mi0283qt_pipe_funcs, &mi0283qt_mode, rotation);
if (ret)
return ret;
drm_mode_config_reset(drm);
ret = drm_dev_register(drm, 0);
if (ret)
return ret;
spi_set_drvdata(spi, drm);
drm_fbdev_generic_setup(drm, 0);
return 0;
}
static void mi0283qt_remove(struct spi_device *spi)
{
struct drm_device *drm = spi_get_drvdata(spi);
drm_dev_unplug(drm);
drm_atomic_helper_shutdown(drm);
}
static void mi0283qt_shutdown(struct spi_device *spi)
{
drm_atomic_helper_shutdown(spi_get_drvdata(spi));
}
static int __maybe_unused mi0283qt_pm_suspend(struct device *dev)
{
return drm_mode_config_helper_suspend(dev_get_drvdata(dev));
}
static int __maybe_unused mi0283qt_pm_resume(struct device *dev)
{
drm_mode_config_helper_resume(dev_get_drvdata(dev));
return 0;
}
static const struct dev_pm_ops mi0283qt_pm_ops = {
SET_SYSTEM_SLEEP_PM_OPS(mi0283qt_pm_suspend, mi0283qt_pm_resume)
};
static struct spi_driver mi0283qt_spi_driver = {
.driver = {
.name = "mi0283qt",
.owner = THIS_MODULE,
.of_match_table = mi0283qt_of_match,
.pm = &mi0283qt_pm_ops,
},
.id_table = mi0283qt_id,
.probe = mi0283qt_probe,
.remove = mi0283qt_remove,
.shutdown = mi0283qt_shutdown,
};
module_spi_driver(mi0283qt_spi_driver);
MODULE_DESCRIPTION("Multi-Inno MI0283QT DRM driver");
MODULE_AUTHOR("Noralf Trønnes");
MODULE_LICENSE("GPL");
| linux-master | drivers/gpu/drm/tiny/mi0283qt.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* DRM driver for Ilitek ILI9225 panels
*
* Copyright 2017 David Lechner <[email protected]>
*
* Some code copied from mipi-dbi.c
* Copyright 2016 Noralf Trønnes
*/
#include <linux/delay.h>
#include <linux/dma-buf.h>
#include <linux/gpio/consumer.h>
#include <linux/module.h>
#include <linux/property.h>
#include <linux/spi/spi.h>
#include <video/mipi_display.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_damage_helper.h>
#include <drm/drm_drv.h>
#include <drm/drm_fb_dma_helper.h>
#include <drm/drm_fbdev_generic.h>
#include <drm/drm_fourcc.h>
#include <drm/drm_framebuffer.h>
#include <drm/drm_gem_atomic_helper.h>
#include <drm/drm_gem_dma_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_managed.h>
#include <drm/drm_mipi_dbi.h>
#include <drm/drm_rect.h>
#define ILI9225_DRIVER_READ_CODE 0x00
#define ILI9225_DRIVER_OUTPUT_CONTROL 0x01
#define ILI9225_LCD_AC_DRIVING_CONTROL 0x02
#define ILI9225_ENTRY_MODE 0x03
#define ILI9225_DISPLAY_CONTROL_1 0x07
#define ILI9225_BLANK_PERIOD_CONTROL_1 0x08
#define ILI9225_FRAME_CYCLE_CONTROL 0x0b
#define ILI9225_INTERFACE_CONTROL 0x0c
#define ILI9225_OSCILLATION_CONTROL 0x0f
#define ILI9225_POWER_CONTROL_1 0x10
#define ILI9225_POWER_CONTROL_2 0x11
#define ILI9225_POWER_CONTROL_3 0x12
#define ILI9225_POWER_CONTROL_4 0x13
#define ILI9225_POWER_CONTROL_5 0x14
#define ILI9225_VCI_RECYCLING 0x15
#define ILI9225_RAM_ADDRESS_SET_1 0x20
#define ILI9225_RAM_ADDRESS_SET_2 0x21
#define ILI9225_WRITE_DATA_TO_GRAM 0x22
#define ILI9225_SOFTWARE_RESET 0x28
#define ILI9225_GATE_SCAN_CONTROL 0x30
#define ILI9225_VERTICAL_SCROLL_1 0x31
#define ILI9225_VERTICAL_SCROLL_2 0x32
#define ILI9225_VERTICAL_SCROLL_3 0x33
#define ILI9225_PARTIAL_DRIVING_POS_1 0x34
#define ILI9225_PARTIAL_DRIVING_POS_2 0x35
#define ILI9225_HORIZ_WINDOW_ADDR_1 0x36
#define ILI9225_HORIZ_WINDOW_ADDR_2 0x37
#define ILI9225_VERT_WINDOW_ADDR_1 0x38
#define ILI9225_VERT_WINDOW_ADDR_2 0x39
#define ILI9225_GAMMA_CONTROL_1 0x50
#define ILI9225_GAMMA_CONTROL_2 0x51
#define ILI9225_GAMMA_CONTROL_3 0x52
#define ILI9225_GAMMA_CONTROL_4 0x53
#define ILI9225_GAMMA_CONTROL_5 0x54
#define ILI9225_GAMMA_CONTROL_6 0x55
#define ILI9225_GAMMA_CONTROL_7 0x56
#define ILI9225_GAMMA_CONTROL_8 0x57
#define ILI9225_GAMMA_CONTROL_9 0x58
#define ILI9225_GAMMA_CONTROL_10 0x59
static inline int ili9225_command(struct mipi_dbi *dbi, u8 cmd, u16 data)
{
u8 par[2] = { data >> 8, data & 0xff };
return mipi_dbi_command_buf(dbi, cmd, par, 2);
}
static void ili9225_fb_dirty(struct iosys_map *src, struct drm_framebuffer *fb,
struct drm_rect *rect)
{
struct mipi_dbi_dev *dbidev = drm_to_mipi_dbi_dev(fb->dev);
unsigned int height = rect->y2 - rect->y1;
unsigned int width = rect->x2 - rect->x1;
struct mipi_dbi *dbi = &dbidev->dbi;
bool swap = dbi->swap_bytes;
u16 x_start, y_start;
u16 x1, x2, y1, y2;
int ret = 0;
bool full;
void *tr;
full = width == fb->width && height == fb->height;
DRM_DEBUG_KMS("Flushing [FB:%d] " DRM_RECT_FMT "\n", fb->base.id, DRM_RECT_ARG(rect));
if (!dbi->dc || !full || swap ||
fb->format->format == DRM_FORMAT_XRGB8888) {
tr = dbidev->tx_buf;
ret = mipi_dbi_buf_copy(tr, src, fb, rect, swap);
if (ret)
goto err_msg;
} else {
tr = src->vaddr; /* TODO: Use mapping abstraction properly */
}
switch (dbidev->rotation) {
default:
x1 = rect->x1;
x2 = rect->x2 - 1;
y1 = rect->y1;
y2 = rect->y2 - 1;
x_start = x1;
y_start = y1;
break;
case 90:
x1 = rect->y1;
x2 = rect->y2 - 1;
y1 = fb->width - rect->x2;
y2 = fb->width - rect->x1 - 1;
x_start = x1;
y_start = y2;
break;
case 180:
x1 = fb->width - rect->x2;
x2 = fb->width - rect->x1 - 1;
y1 = fb->height - rect->y2;
y2 = fb->height - rect->y1 - 1;
x_start = x2;
y_start = y2;
break;
case 270:
x1 = fb->height - rect->y2;
x2 = fb->height - rect->y1 - 1;
y1 = rect->x1;
y2 = rect->x2 - 1;
x_start = x2;
y_start = y1;
break;
}
ili9225_command(dbi, ILI9225_HORIZ_WINDOW_ADDR_1, x2);
ili9225_command(dbi, ILI9225_HORIZ_WINDOW_ADDR_2, x1);
ili9225_command(dbi, ILI9225_VERT_WINDOW_ADDR_1, y2);
ili9225_command(dbi, ILI9225_VERT_WINDOW_ADDR_2, y1);
ili9225_command(dbi, ILI9225_RAM_ADDRESS_SET_1, x_start);
ili9225_command(dbi, ILI9225_RAM_ADDRESS_SET_2, y_start);
ret = mipi_dbi_command_buf(dbi, ILI9225_WRITE_DATA_TO_GRAM, tr,
width * height * 2);
err_msg:
if (ret)
dev_err_once(fb->dev->dev, "Failed to update display %d\n", ret);
}
static void ili9225_pipe_update(struct drm_simple_display_pipe *pipe,
struct drm_plane_state *old_state)
{
struct drm_plane_state *state = pipe->plane.state;
struct drm_shadow_plane_state *shadow_plane_state = to_drm_shadow_plane_state(state);
struct drm_framebuffer *fb = state->fb;
struct drm_rect rect;
int idx;
if (!pipe->crtc.state->active)
return;
if (!drm_dev_enter(fb->dev, &idx))
return;
if (drm_atomic_helper_damage_merged(old_state, state, &rect))
ili9225_fb_dirty(&shadow_plane_state->data[0], fb, &rect);
drm_dev_exit(idx);
}
static void ili9225_pipe_enable(struct drm_simple_display_pipe *pipe,
struct drm_crtc_state *crtc_state,
struct drm_plane_state *plane_state)
{
struct mipi_dbi_dev *dbidev = drm_to_mipi_dbi_dev(pipe->crtc.dev);
struct drm_shadow_plane_state *shadow_plane_state = to_drm_shadow_plane_state(plane_state);
struct drm_framebuffer *fb = plane_state->fb;
struct device *dev = pipe->crtc.dev->dev;
struct mipi_dbi *dbi = &dbidev->dbi;
struct drm_rect rect = {
.x1 = 0,
.x2 = fb->width,
.y1 = 0,
.y2 = fb->height,
};
int ret, idx;
u8 am_id;
if (!drm_dev_enter(pipe->crtc.dev, &idx))
return;
DRM_DEBUG_KMS("\n");
mipi_dbi_hw_reset(dbi);
/*
* There don't seem to be two example init sequences that match, so
* using the one from the popular Arduino library for this display.
* https://github.com/Nkawu/TFT_22_ILI9225/blob/master/src/TFT_22_ILI9225.cpp
*/
ret = ili9225_command(dbi, ILI9225_POWER_CONTROL_1, 0x0000);
if (ret) {
DRM_DEV_ERROR(dev, "Error sending command %d\n", ret);
goto out_exit;
}
ili9225_command(dbi, ILI9225_POWER_CONTROL_2, 0x0000);
ili9225_command(dbi, ILI9225_POWER_CONTROL_3, 0x0000);
ili9225_command(dbi, ILI9225_POWER_CONTROL_4, 0x0000);
ili9225_command(dbi, ILI9225_POWER_CONTROL_5, 0x0000);
msleep(40);
ili9225_command(dbi, ILI9225_POWER_CONTROL_2, 0x0018);
ili9225_command(dbi, ILI9225_POWER_CONTROL_3, 0x6121);
ili9225_command(dbi, ILI9225_POWER_CONTROL_4, 0x006f);
ili9225_command(dbi, ILI9225_POWER_CONTROL_5, 0x495f);
ili9225_command(dbi, ILI9225_POWER_CONTROL_1, 0x0800);
msleep(10);
ili9225_command(dbi, ILI9225_POWER_CONTROL_2, 0x103b);
msleep(50);
switch (dbidev->rotation) {
default:
am_id = 0x30;
break;
case 90:
am_id = 0x18;
break;
case 180:
am_id = 0x00;
break;
case 270:
am_id = 0x28;
break;
}
ili9225_command(dbi, ILI9225_DRIVER_OUTPUT_CONTROL, 0x011c);
ili9225_command(dbi, ILI9225_LCD_AC_DRIVING_CONTROL, 0x0100);
ili9225_command(dbi, ILI9225_ENTRY_MODE, 0x1000 | am_id);
ili9225_command(dbi, ILI9225_DISPLAY_CONTROL_1, 0x0000);
ili9225_command(dbi, ILI9225_BLANK_PERIOD_CONTROL_1, 0x0808);
ili9225_command(dbi, ILI9225_FRAME_CYCLE_CONTROL, 0x1100);
ili9225_command(dbi, ILI9225_INTERFACE_CONTROL, 0x0000);
ili9225_command(dbi, ILI9225_OSCILLATION_CONTROL, 0x0d01);
ili9225_command(dbi, ILI9225_VCI_RECYCLING, 0x0020);
ili9225_command(dbi, ILI9225_RAM_ADDRESS_SET_1, 0x0000);
ili9225_command(dbi, ILI9225_RAM_ADDRESS_SET_2, 0x0000);
ili9225_command(dbi, ILI9225_GATE_SCAN_CONTROL, 0x0000);
ili9225_command(dbi, ILI9225_VERTICAL_SCROLL_1, 0x00db);
ili9225_command(dbi, ILI9225_VERTICAL_SCROLL_2, 0x0000);
ili9225_command(dbi, ILI9225_VERTICAL_SCROLL_3, 0x0000);
ili9225_command(dbi, ILI9225_PARTIAL_DRIVING_POS_1, 0x00db);
ili9225_command(dbi, ILI9225_PARTIAL_DRIVING_POS_2, 0x0000);
ili9225_command(dbi, ILI9225_GAMMA_CONTROL_1, 0x0000);
ili9225_command(dbi, ILI9225_GAMMA_CONTROL_2, 0x0808);
ili9225_command(dbi, ILI9225_GAMMA_CONTROL_3, 0x080a);
ili9225_command(dbi, ILI9225_GAMMA_CONTROL_4, 0x000a);
ili9225_command(dbi, ILI9225_GAMMA_CONTROL_5, 0x0a08);
ili9225_command(dbi, ILI9225_GAMMA_CONTROL_6, 0x0808);
ili9225_command(dbi, ILI9225_GAMMA_CONTROL_7, 0x0000);
ili9225_command(dbi, ILI9225_GAMMA_CONTROL_8, 0x0a00);
ili9225_command(dbi, ILI9225_GAMMA_CONTROL_9, 0x0710);
ili9225_command(dbi, ILI9225_GAMMA_CONTROL_10, 0x0710);
ili9225_command(dbi, ILI9225_DISPLAY_CONTROL_1, 0x0012);
msleep(50);
ili9225_command(dbi, ILI9225_DISPLAY_CONTROL_1, 0x1017);
ili9225_fb_dirty(&shadow_plane_state->data[0], fb, &rect);
out_exit:
drm_dev_exit(idx);
}
static void ili9225_pipe_disable(struct drm_simple_display_pipe *pipe)
{
struct mipi_dbi_dev *dbidev = drm_to_mipi_dbi_dev(pipe->crtc.dev);
struct mipi_dbi *dbi = &dbidev->dbi;
DRM_DEBUG_KMS("\n");
/*
* This callback is not protected by drm_dev_enter/exit since we want to
* turn off the display on regular driver unload. It's highly unlikely
* that the underlying SPI controller is gone should this be called after
* unplug.
*/
ili9225_command(dbi, ILI9225_DISPLAY_CONTROL_1, 0x0000);
msleep(50);
ili9225_command(dbi, ILI9225_POWER_CONTROL_2, 0x0007);
msleep(50);
ili9225_command(dbi, ILI9225_POWER_CONTROL_1, 0x0a02);
}
static int ili9225_dbi_command(struct mipi_dbi *dbi, u8 *cmd, u8 *par,
size_t num)
{
struct spi_device *spi = dbi->spi;
unsigned int bpw = 8;
u32 speed_hz;
int ret;
spi_bus_lock(spi->controller);
gpiod_set_value_cansleep(dbi->dc, 0);
speed_hz = mipi_dbi_spi_cmd_max_speed(spi, 1);
ret = mipi_dbi_spi_transfer(spi, speed_hz, 8, cmd, 1);
spi_bus_unlock(spi->controller);
if (ret || !num)
return ret;
if (*cmd == ILI9225_WRITE_DATA_TO_GRAM && !dbi->swap_bytes)
bpw = 16;
spi_bus_lock(spi->controller);
gpiod_set_value_cansleep(dbi->dc, 1);
speed_hz = mipi_dbi_spi_cmd_max_speed(spi, num);
ret = mipi_dbi_spi_transfer(spi, speed_hz, bpw, par, num);
spi_bus_unlock(spi->controller);
return ret;
}
static const struct drm_simple_display_pipe_funcs ili9225_pipe_funcs = {
.mode_valid = mipi_dbi_pipe_mode_valid,
.enable = ili9225_pipe_enable,
.disable = ili9225_pipe_disable,
.update = ili9225_pipe_update,
.begin_fb_access = mipi_dbi_pipe_begin_fb_access,
.end_fb_access = mipi_dbi_pipe_end_fb_access,
.reset_plane = mipi_dbi_pipe_reset_plane,
.duplicate_plane_state = mipi_dbi_pipe_duplicate_plane_state,
.destroy_plane_state = mipi_dbi_pipe_destroy_plane_state,
};
static const struct drm_display_mode ili9225_mode = {
DRM_SIMPLE_MODE(176, 220, 35, 44),
};
DEFINE_DRM_GEM_DMA_FOPS(ili9225_fops);
static const struct drm_driver ili9225_driver = {
.driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC,
.fops = &ili9225_fops,
DRM_GEM_DMA_DRIVER_OPS_VMAP,
.name = "ili9225",
.desc = "Ilitek ILI9225",
.date = "20171106",
.major = 1,
.minor = 0,
};
static const struct of_device_id ili9225_of_match[] = {
{ .compatible = "vot,v220hf01a-t" },
{},
};
MODULE_DEVICE_TABLE(of, ili9225_of_match);
static const struct spi_device_id ili9225_id[] = {
{ "v220hf01a-t", 0 },
{ },
};
MODULE_DEVICE_TABLE(spi, ili9225_id);
static int ili9225_probe(struct spi_device *spi)
{
struct device *dev = &spi->dev;
struct mipi_dbi_dev *dbidev;
struct drm_device *drm;
struct mipi_dbi *dbi;
struct gpio_desc *rs;
u32 rotation = 0;
int ret;
dbidev = devm_drm_dev_alloc(dev, &ili9225_driver,
struct mipi_dbi_dev, drm);
if (IS_ERR(dbidev))
return PTR_ERR(dbidev);
dbi = &dbidev->dbi;
drm = &dbidev->drm;
dbi->reset = devm_gpiod_get(dev, "reset", GPIOD_OUT_HIGH);
if (IS_ERR(dbi->reset))
return dev_err_probe(dev, PTR_ERR(dbi->reset), "Failed to get GPIO 'reset'\n");
rs = devm_gpiod_get(dev, "rs", GPIOD_OUT_LOW);
if (IS_ERR(rs))
return dev_err_probe(dev, PTR_ERR(rs), "Failed to get GPIO 'rs'\n");
device_property_read_u32(dev, "rotation", &rotation);
ret = mipi_dbi_spi_init(spi, dbi, rs);
if (ret)
return ret;
/* override the command function set in mipi_dbi_spi_init() */
dbi->command = ili9225_dbi_command;
ret = mipi_dbi_dev_init(dbidev, &ili9225_pipe_funcs, &ili9225_mode, rotation);
if (ret)
return ret;
drm_mode_config_reset(drm);
ret = drm_dev_register(drm, 0);
if (ret)
return ret;
spi_set_drvdata(spi, drm);
drm_fbdev_generic_setup(drm, 0);
return 0;
}
static void ili9225_remove(struct spi_device *spi)
{
struct drm_device *drm = spi_get_drvdata(spi);
drm_dev_unplug(drm);
drm_atomic_helper_shutdown(drm);
}
static void ili9225_shutdown(struct spi_device *spi)
{
drm_atomic_helper_shutdown(spi_get_drvdata(spi));
}
static struct spi_driver ili9225_spi_driver = {
.driver = {
.name = "ili9225",
.owner = THIS_MODULE,
.of_match_table = ili9225_of_match,
},
.id_table = ili9225_id,
.probe = ili9225_probe,
.remove = ili9225_remove,
.shutdown = ili9225_shutdown,
};
module_spi_driver(ili9225_spi_driver);
MODULE_DESCRIPTION("Ilitek ILI9225 DRM driver");
MODULE_AUTHOR("David Lechner <[email protected]>");
MODULE_LICENSE("GPL");
| linux-master | drivers/gpu/drm/tiny/ili9225.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* DRM driver for display panels connected to a Sitronix ST7715R or ST7735R
* display controller in SPI mode.
*
* Copyright 2017 David Lechner <[email protected]>
* Copyright (C) 2019 Glider bvba
*/
#include <linux/backlight.h>
#include <linux/delay.h>
#include <linux/dma-buf.h>
#include <linux/gpio/consumer.h>
#include <linux/module.h>
#include <linux/property.h>
#include <linux/spi/spi.h>
#include <video/mipi_display.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_drv.h>
#include <drm/drm_fbdev_generic.h>
#include <drm/drm_gem_atomic_helper.h>
#include <drm/drm_gem_dma_helper.h>
#include <drm/drm_managed.h>
#include <drm/drm_mipi_dbi.h>
#define ST7735R_FRMCTR1 0xb1
#define ST7735R_FRMCTR2 0xb2
#define ST7735R_FRMCTR3 0xb3
#define ST7735R_INVCTR 0xb4
#define ST7735R_PWCTR1 0xc0
#define ST7735R_PWCTR2 0xc1
#define ST7735R_PWCTR3 0xc2
#define ST7735R_PWCTR4 0xc3
#define ST7735R_PWCTR5 0xc4
#define ST7735R_VMCTR1 0xc5
#define ST7735R_GAMCTRP1 0xe0
#define ST7735R_GAMCTRN1 0xe1
#define ST7735R_MY BIT(7)
#define ST7735R_MX BIT(6)
#define ST7735R_MV BIT(5)
#define ST7735R_RGB BIT(3)
struct st7735r_cfg {
const struct drm_display_mode mode;
unsigned int left_offset;
unsigned int top_offset;
unsigned int write_only:1;
unsigned int rgb:1; /* RGB (vs. BGR) */
};
struct st7735r_priv {
struct mipi_dbi_dev dbidev; /* Must be first for .release() */
const struct st7735r_cfg *cfg;
};
static void st7735r_pipe_enable(struct drm_simple_display_pipe *pipe,
struct drm_crtc_state *crtc_state,
struct drm_plane_state *plane_state)
{
struct mipi_dbi_dev *dbidev = drm_to_mipi_dbi_dev(pipe->crtc.dev);
struct st7735r_priv *priv = container_of(dbidev, struct st7735r_priv,
dbidev);
struct mipi_dbi *dbi = &dbidev->dbi;
int ret, idx;
u8 addr_mode;
if (!drm_dev_enter(pipe->crtc.dev, &idx))
return;
DRM_DEBUG_KMS("\n");
ret = mipi_dbi_poweron_reset(dbidev);
if (ret)
goto out_exit;
msleep(150);
mipi_dbi_command(dbi, MIPI_DCS_EXIT_SLEEP_MODE);
msleep(500);
mipi_dbi_command(dbi, ST7735R_FRMCTR1, 0x01, 0x2c, 0x2d);
mipi_dbi_command(dbi, ST7735R_FRMCTR2, 0x01, 0x2c, 0x2d);
mipi_dbi_command(dbi, ST7735R_FRMCTR3, 0x01, 0x2c, 0x2d, 0x01, 0x2c,
0x2d);
mipi_dbi_command(dbi, ST7735R_INVCTR, 0x07);
mipi_dbi_command(dbi, ST7735R_PWCTR1, 0xa2, 0x02, 0x84);
mipi_dbi_command(dbi, ST7735R_PWCTR2, 0xc5);
mipi_dbi_command(dbi, ST7735R_PWCTR3, 0x0a, 0x00);
mipi_dbi_command(dbi, ST7735R_PWCTR4, 0x8a, 0x2a);
mipi_dbi_command(dbi, ST7735R_PWCTR5, 0x8a, 0xee);
mipi_dbi_command(dbi, ST7735R_VMCTR1, 0x0e);
mipi_dbi_command(dbi, MIPI_DCS_EXIT_INVERT_MODE);
switch (dbidev->rotation) {
default:
addr_mode = ST7735R_MX | ST7735R_MY;
break;
case 90:
addr_mode = ST7735R_MX | ST7735R_MV;
break;
case 180:
addr_mode = 0;
break;
case 270:
addr_mode = ST7735R_MY | ST7735R_MV;
break;
}
if (priv->cfg->rgb)
addr_mode |= ST7735R_RGB;
mipi_dbi_command(dbi, MIPI_DCS_SET_ADDRESS_MODE, addr_mode);
mipi_dbi_command(dbi, MIPI_DCS_SET_PIXEL_FORMAT,
MIPI_DCS_PIXEL_FMT_16BIT);
mipi_dbi_command(dbi, ST7735R_GAMCTRP1, 0x02, 0x1c, 0x07, 0x12, 0x37,
0x32, 0x29, 0x2d, 0x29, 0x25, 0x2b, 0x39, 0x00, 0x01,
0x03, 0x10);
mipi_dbi_command(dbi, ST7735R_GAMCTRN1, 0x03, 0x1d, 0x07, 0x06, 0x2e,
0x2c, 0x29, 0x2d, 0x2e, 0x2e, 0x37, 0x3f, 0x00, 0x00,
0x02, 0x10);
mipi_dbi_command(dbi, MIPI_DCS_SET_DISPLAY_ON);
msleep(100);
mipi_dbi_command(dbi, MIPI_DCS_ENTER_NORMAL_MODE);
msleep(20);
mipi_dbi_enable_flush(dbidev, crtc_state, plane_state);
out_exit:
drm_dev_exit(idx);
}
static const struct drm_simple_display_pipe_funcs st7735r_pipe_funcs = {
DRM_MIPI_DBI_SIMPLE_DISPLAY_PIPE_FUNCS(st7735r_pipe_enable),
};
static const struct st7735r_cfg jd_t18003_t01_cfg = {
.mode = { DRM_SIMPLE_MODE(128, 160, 28, 35) },
/* Cannot read from Adafruit 1.8" display via SPI */
.write_only = true,
};
static const struct st7735r_cfg rh128128t_cfg = {
.mode = { DRM_SIMPLE_MODE(128, 128, 25, 26) },
.left_offset = 2,
.top_offset = 3,
.rgb = true,
};
DEFINE_DRM_GEM_DMA_FOPS(st7735r_fops);
static const struct drm_driver st7735r_driver = {
.driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC,
.fops = &st7735r_fops,
DRM_GEM_DMA_DRIVER_OPS_VMAP,
.debugfs_init = mipi_dbi_debugfs_init,
.name = "st7735r",
.desc = "Sitronix ST7735R",
.date = "20171128",
.major = 1,
.minor = 0,
};
static const struct of_device_id st7735r_of_match[] = {
{ .compatible = "jianda,jd-t18003-t01", .data = &jd_t18003_t01_cfg },
{ .compatible = "okaya,rh128128t", .data = &rh128128t_cfg },
{ },
};
MODULE_DEVICE_TABLE(of, st7735r_of_match);
static const struct spi_device_id st7735r_id[] = {
{ "jd-t18003-t01", (uintptr_t)&jd_t18003_t01_cfg },
{ "rh128128t", (uintptr_t)&rh128128t_cfg },
{ },
};
MODULE_DEVICE_TABLE(spi, st7735r_id);
static int st7735r_probe(struct spi_device *spi)
{
struct device *dev = &spi->dev;
const struct st7735r_cfg *cfg;
struct mipi_dbi_dev *dbidev;
struct st7735r_priv *priv;
struct drm_device *drm;
struct mipi_dbi *dbi;
struct gpio_desc *dc;
u32 rotation = 0;
int ret;
cfg = device_get_match_data(&spi->dev);
if (!cfg)
cfg = (void *)spi_get_device_id(spi)->driver_data;
priv = devm_drm_dev_alloc(dev, &st7735r_driver,
struct st7735r_priv, dbidev.drm);
if (IS_ERR(priv))
return PTR_ERR(priv);
dbidev = &priv->dbidev;
priv->cfg = cfg;
dbi = &dbidev->dbi;
drm = &dbidev->drm;
dbi->reset = devm_gpiod_get(dev, "reset", GPIOD_OUT_HIGH);
if (IS_ERR(dbi->reset))
return dev_err_probe(dev, PTR_ERR(dbi->reset), "Failed to get GPIO 'reset'\n");
dc = devm_gpiod_get(dev, "dc", GPIOD_OUT_LOW);
if (IS_ERR(dc))
return dev_err_probe(dev, PTR_ERR(dc), "Failed to get GPIO 'dc'\n");
dbidev->backlight = devm_of_find_backlight(dev);
if (IS_ERR(dbidev->backlight))
return PTR_ERR(dbidev->backlight);
device_property_read_u32(dev, "rotation", &rotation);
ret = mipi_dbi_spi_init(spi, dbi, dc);
if (ret)
return ret;
if (cfg->write_only)
dbi->read_commands = NULL;
dbidev->left_offset = cfg->left_offset;
dbidev->top_offset = cfg->top_offset;
ret = mipi_dbi_dev_init(dbidev, &st7735r_pipe_funcs, &cfg->mode,
rotation);
if (ret)
return ret;
drm_mode_config_reset(drm);
ret = drm_dev_register(drm, 0);
if (ret)
return ret;
spi_set_drvdata(spi, drm);
drm_fbdev_generic_setup(drm, 0);
return 0;
}
static void st7735r_remove(struct spi_device *spi)
{
struct drm_device *drm = spi_get_drvdata(spi);
drm_dev_unplug(drm);
drm_atomic_helper_shutdown(drm);
}
static void st7735r_shutdown(struct spi_device *spi)
{
drm_atomic_helper_shutdown(spi_get_drvdata(spi));
}
static struct spi_driver st7735r_spi_driver = {
.driver = {
.name = "st7735r",
.of_match_table = st7735r_of_match,
},
.id_table = st7735r_id,
.probe = st7735r_probe,
.remove = st7735r_remove,
.shutdown = st7735r_shutdown,
};
module_spi_driver(st7735r_spi_driver);
MODULE_DESCRIPTION("Sitronix ST7735R DRM driver");
MODULE_AUTHOR("David Lechner <[email protected]>");
MODULE_LICENSE("GPL");
| linux-master | drivers/gpu/drm/tiny/st7735r.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* ARC PGU DRM driver.
*
* Copyright (C) 2016 Synopsys, Inc. (www.synopsys.com)
*/
#include <linux/clk.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_debugfs.h>
#include <drm/drm_device.h>
#include <drm/drm_drv.h>
#include <drm/drm_edid.h>
#include <drm/drm_fb_dma_helper.h>
#include <drm/drm_fbdev_dma.h>
#include <drm/drm_fourcc.h>
#include <drm/drm_framebuffer.h>
#include <drm/drm_gem_dma_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_module.h>
#include <drm/drm_of.h>
#include <drm/drm_probe_helper.h>
#include <drm/drm_simple_kms_helper.h>
#include <linux/dma-mapping.h>
#include <linux/module.h>
#include <linux/of_reserved_mem.h>
#include <linux/platform_device.h>
#define ARCPGU_REG_CTRL 0x00
#define ARCPGU_REG_STAT 0x04
#define ARCPGU_REG_FMT 0x10
#define ARCPGU_REG_HSYNC 0x14
#define ARCPGU_REG_VSYNC 0x18
#define ARCPGU_REG_ACTIVE 0x1c
#define ARCPGU_REG_BUF0_ADDR 0x40
#define ARCPGU_REG_STRIDE 0x50
#define ARCPGU_REG_START_SET 0x84
#define ARCPGU_REG_ID 0x3FC
#define ARCPGU_CTRL_ENABLE_MASK 0x02
#define ARCPGU_CTRL_VS_POL_MASK 0x1
#define ARCPGU_CTRL_VS_POL_OFST 0x3
#define ARCPGU_CTRL_HS_POL_MASK 0x1
#define ARCPGU_CTRL_HS_POL_OFST 0x4
#define ARCPGU_MODE_XRGB8888 BIT(2)
#define ARCPGU_STAT_BUSY_MASK 0x02
struct arcpgu_drm_private {
struct drm_device drm;
void __iomem *regs;
struct clk *clk;
struct drm_simple_display_pipe pipe;
struct drm_connector sim_conn;
};
#define dev_to_arcpgu(x) container_of(x, struct arcpgu_drm_private, drm)
#define pipe_to_arcpgu_priv(x) container_of(x, struct arcpgu_drm_private, pipe)
static inline void arc_pgu_write(struct arcpgu_drm_private *arcpgu,
unsigned int reg, u32 value)
{
iowrite32(value, arcpgu->regs + reg);
}
static inline u32 arc_pgu_read(struct arcpgu_drm_private *arcpgu,
unsigned int reg)
{
return ioread32(arcpgu->regs + reg);
}
#define XRES_DEF 640
#define YRES_DEF 480
#define XRES_MAX 8192
#define YRES_MAX 8192
static int arcpgu_drm_connector_get_modes(struct drm_connector *connector)
{
int count;
count = drm_add_modes_noedid(connector, XRES_MAX, YRES_MAX);
drm_set_preferred_mode(connector, XRES_DEF, YRES_DEF);
return count;
}
static const struct drm_connector_helper_funcs
arcpgu_drm_connector_helper_funcs = {
.get_modes = arcpgu_drm_connector_get_modes,
};
static const struct drm_connector_funcs arcpgu_drm_connector_funcs = {
.reset = drm_atomic_helper_connector_reset,
.fill_modes = drm_helper_probe_single_connector_modes,
.destroy = drm_connector_cleanup,
.atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
};
static int arcpgu_drm_sim_init(struct drm_device *drm, struct drm_connector *connector)
{
drm_connector_helper_add(connector, &arcpgu_drm_connector_helper_funcs);
return drm_connector_init(drm, connector, &arcpgu_drm_connector_funcs,
DRM_MODE_CONNECTOR_VIRTUAL);
}
#define ENCODE_PGU_XY(x, y) ((((x) - 1) << 16) | ((y) - 1))
static const u32 arc_pgu_supported_formats[] = {
DRM_FORMAT_RGB565,
DRM_FORMAT_XRGB8888,
DRM_FORMAT_ARGB8888,
};
static void arc_pgu_set_pxl_fmt(struct arcpgu_drm_private *arcpgu)
{
const struct drm_framebuffer *fb = arcpgu->pipe.plane.state->fb;
uint32_t pixel_format = fb->format->format;
u32 format = DRM_FORMAT_INVALID;
int i;
u32 reg_ctrl;
for (i = 0; i < ARRAY_SIZE(arc_pgu_supported_formats); i++) {
if (arc_pgu_supported_formats[i] == pixel_format)
format = arc_pgu_supported_formats[i];
}
if (WARN_ON(format == DRM_FORMAT_INVALID))
return;
reg_ctrl = arc_pgu_read(arcpgu, ARCPGU_REG_CTRL);
if (format == DRM_FORMAT_RGB565)
reg_ctrl &= ~ARCPGU_MODE_XRGB8888;
else
reg_ctrl |= ARCPGU_MODE_XRGB8888;
arc_pgu_write(arcpgu, ARCPGU_REG_CTRL, reg_ctrl);
}
static enum drm_mode_status arc_pgu_mode_valid(struct drm_simple_display_pipe *pipe,
const struct drm_display_mode *mode)
{
struct arcpgu_drm_private *arcpgu = pipe_to_arcpgu_priv(pipe);
long rate, clk_rate = mode->clock * 1000;
long diff = clk_rate / 200; /* +-0.5% allowed by HDMI spec */
rate = clk_round_rate(arcpgu->clk, clk_rate);
if ((max(rate, clk_rate) - min(rate, clk_rate) < diff) && (rate > 0))
return MODE_OK;
return MODE_NOCLOCK;
}
static void arc_pgu_mode_set(struct arcpgu_drm_private *arcpgu)
{
struct drm_display_mode *m = &arcpgu->pipe.crtc.state->adjusted_mode;
u32 val;
arc_pgu_write(arcpgu, ARCPGU_REG_FMT,
ENCODE_PGU_XY(m->crtc_htotal, m->crtc_vtotal));
arc_pgu_write(arcpgu, ARCPGU_REG_HSYNC,
ENCODE_PGU_XY(m->crtc_hsync_start - m->crtc_hdisplay,
m->crtc_hsync_end - m->crtc_hdisplay));
arc_pgu_write(arcpgu, ARCPGU_REG_VSYNC,
ENCODE_PGU_XY(m->crtc_vsync_start - m->crtc_vdisplay,
m->crtc_vsync_end - m->crtc_vdisplay));
arc_pgu_write(arcpgu, ARCPGU_REG_ACTIVE,
ENCODE_PGU_XY(m->crtc_hblank_end - m->crtc_hblank_start,
m->crtc_vblank_end - m->crtc_vblank_start));
val = arc_pgu_read(arcpgu, ARCPGU_REG_CTRL);
if (m->flags & DRM_MODE_FLAG_PVSYNC)
val |= ARCPGU_CTRL_VS_POL_MASK << ARCPGU_CTRL_VS_POL_OFST;
else
val &= ~(ARCPGU_CTRL_VS_POL_MASK << ARCPGU_CTRL_VS_POL_OFST);
if (m->flags & DRM_MODE_FLAG_PHSYNC)
val |= ARCPGU_CTRL_HS_POL_MASK << ARCPGU_CTRL_HS_POL_OFST;
else
val &= ~(ARCPGU_CTRL_HS_POL_MASK << ARCPGU_CTRL_HS_POL_OFST);
arc_pgu_write(arcpgu, ARCPGU_REG_CTRL, val);
arc_pgu_write(arcpgu, ARCPGU_REG_STRIDE, 0);
arc_pgu_write(arcpgu, ARCPGU_REG_START_SET, 1);
arc_pgu_set_pxl_fmt(arcpgu);
clk_set_rate(arcpgu->clk, m->crtc_clock * 1000);
}
static void arc_pgu_enable(struct drm_simple_display_pipe *pipe,
struct drm_crtc_state *crtc_state,
struct drm_plane_state *plane_state)
{
struct arcpgu_drm_private *arcpgu = pipe_to_arcpgu_priv(pipe);
arc_pgu_mode_set(arcpgu);
clk_prepare_enable(arcpgu->clk);
arc_pgu_write(arcpgu, ARCPGU_REG_CTRL,
arc_pgu_read(arcpgu, ARCPGU_REG_CTRL) |
ARCPGU_CTRL_ENABLE_MASK);
}
static void arc_pgu_disable(struct drm_simple_display_pipe *pipe)
{
struct arcpgu_drm_private *arcpgu = pipe_to_arcpgu_priv(pipe);
clk_disable_unprepare(arcpgu->clk);
arc_pgu_write(arcpgu, ARCPGU_REG_CTRL,
arc_pgu_read(arcpgu, ARCPGU_REG_CTRL) &
~ARCPGU_CTRL_ENABLE_MASK);
}
static void arc_pgu_update(struct drm_simple_display_pipe *pipe,
struct drm_plane_state *state)
{
struct arcpgu_drm_private *arcpgu;
struct drm_gem_dma_object *gem;
if (!pipe->plane.state->fb)
return;
arcpgu = pipe_to_arcpgu_priv(pipe);
gem = drm_fb_dma_get_gem_obj(pipe->plane.state->fb, 0);
arc_pgu_write(arcpgu, ARCPGU_REG_BUF0_ADDR, gem->dma_addr);
}
static const struct drm_simple_display_pipe_funcs arc_pgu_pipe_funcs = {
.update = arc_pgu_update,
.mode_valid = arc_pgu_mode_valid,
.enable = arc_pgu_enable,
.disable = arc_pgu_disable,
};
static const struct drm_mode_config_funcs arcpgu_drm_modecfg_funcs = {
.fb_create = drm_gem_fb_create,
.atomic_check = drm_atomic_helper_check,
.atomic_commit = drm_atomic_helper_commit,
};
DEFINE_DRM_GEM_DMA_FOPS(arcpgu_drm_ops);
static int arcpgu_load(struct arcpgu_drm_private *arcpgu)
{
struct platform_device *pdev = to_platform_device(arcpgu->drm.dev);
struct device_node *encoder_node = NULL, *endpoint_node = NULL;
struct drm_connector *connector = NULL;
struct drm_device *drm = &arcpgu->drm;
struct resource *res;
int ret;
arcpgu->clk = devm_clk_get(drm->dev, "pxlclk");
if (IS_ERR(arcpgu->clk))
return PTR_ERR(arcpgu->clk);
ret = drmm_mode_config_init(drm);
if (ret)
return ret;
drm->mode_config.min_width = 0;
drm->mode_config.min_height = 0;
drm->mode_config.max_width = 1920;
drm->mode_config.max_height = 1080;
drm->mode_config.funcs = &arcpgu_drm_modecfg_funcs;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
arcpgu->regs = devm_ioremap_resource(&pdev->dev, res);
if (IS_ERR(arcpgu->regs))
return PTR_ERR(arcpgu->regs);
dev_info(drm->dev, "arc_pgu ID: 0x%x\n",
arc_pgu_read(arcpgu, ARCPGU_REG_ID));
/* Get the optional framebuffer memory resource */
ret = of_reserved_mem_device_init(drm->dev);
if (ret && ret != -ENODEV)
return ret;
if (dma_set_mask_and_coherent(drm->dev, DMA_BIT_MASK(32)))
return -ENODEV;
/*
* There is only one output port inside each device. It is linked with
* encoder endpoint.
*/
endpoint_node = of_graph_get_next_endpoint(pdev->dev.of_node, NULL);
if (endpoint_node) {
encoder_node = of_graph_get_remote_port_parent(endpoint_node);
of_node_put(endpoint_node);
} else {
connector = &arcpgu->sim_conn;
dev_info(drm->dev, "no encoder found. Assumed virtual LCD on simulation platform\n");
ret = arcpgu_drm_sim_init(drm, connector);
if (ret < 0)
return ret;
}
ret = drm_simple_display_pipe_init(drm, &arcpgu->pipe, &arc_pgu_pipe_funcs,
arc_pgu_supported_formats,
ARRAY_SIZE(arc_pgu_supported_formats),
NULL, connector);
if (ret)
return ret;
if (encoder_node) {
struct drm_bridge *bridge;
/* Locate drm bridge from the hdmi encoder DT node */
bridge = of_drm_find_bridge(encoder_node);
if (!bridge)
return -EPROBE_DEFER;
ret = drm_simple_display_pipe_attach_bridge(&arcpgu->pipe, bridge);
if (ret)
return ret;
}
drm_mode_config_reset(drm);
drm_kms_helper_poll_init(drm);
platform_set_drvdata(pdev, drm);
return 0;
}
static int arcpgu_unload(struct drm_device *drm)
{
drm_kms_helper_poll_fini(drm);
drm_atomic_helper_shutdown(drm);
return 0;
}
#ifdef CONFIG_DEBUG_FS
static int arcpgu_show_pxlclock(struct seq_file *m, void *arg)
{
struct drm_info_node *node = (struct drm_info_node *)m->private;
struct drm_device *drm = node->minor->dev;
struct arcpgu_drm_private *arcpgu = dev_to_arcpgu(drm);
unsigned long clkrate = clk_get_rate(arcpgu->clk);
unsigned long mode_clock = arcpgu->pipe.crtc.mode.crtc_clock * 1000;
seq_printf(m, "hw : %lu\n", clkrate);
seq_printf(m, "mode: %lu\n", mode_clock);
return 0;
}
static struct drm_info_list arcpgu_debugfs_list[] = {
{ "clocks", arcpgu_show_pxlclock, 0 },
};
static void arcpgu_debugfs_init(struct drm_minor *minor)
{
drm_debugfs_create_files(arcpgu_debugfs_list,
ARRAY_SIZE(arcpgu_debugfs_list),
minor->debugfs_root, minor);
}
#endif
static const struct drm_driver arcpgu_drm_driver = {
.driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_ATOMIC,
.name = "arcpgu",
.desc = "ARC PGU Controller",
.date = "20160219",
.major = 1,
.minor = 0,
.patchlevel = 0,
.fops = &arcpgu_drm_ops,
DRM_GEM_DMA_DRIVER_OPS,
#ifdef CONFIG_DEBUG_FS
.debugfs_init = arcpgu_debugfs_init,
#endif
};
static int arcpgu_probe(struct platform_device *pdev)
{
struct arcpgu_drm_private *arcpgu;
int ret;
arcpgu = devm_drm_dev_alloc(&pdev->dev, &arcpgu_drm_driver,
struct arcpgu_drm_private, drm);
if (IS_ERR(arcpgu))
return PTR_ERR(arcpgu);
ret = arcpgu_load(arcpgu);
if (ret)
return ret;
ret = drm_dev_register(&arcpgu->drm, 0);
if (ret)
goto err_unload;
drm_fbdev_dma_setup(&arcpgu->drm, 16);
return 0;
err_unload:
arcpgu_unload(&arcpgu->drm);
return ret;
}
static int arcpgu_remove(struct platform_device *pdev)
{
struct drm_device *drm = platform_get_drvdata(pdev);
drm_dev_unregister(drm);
arcpgu_unload(drm);
return 0;
}
static const struct of_device_id arcpgu_of_table[] = {
{.compatible = "snps,arcpgu"},
{}
};
MODULE_DEVICE_TABLE(of, arcpgu_of_table);
static struct platform_driver arcpgu_platform_driver = {
.probe = arcpgu_probe,
.remove = arcpgu_remove,
.driver = {
.name = "arcpgu",
.of_match_table = arcpgu_of_table,
},
};
drm_module_platform_driver(arcpgu_platform_driver);
MODULE_AUTHOR("Carlos Palminha <[email protected]>");
MODULE_DESCRIPTION("ARC PGU DRM driver");
MODULE_LICENSE("GPL");
| linux-master | drivers/gpu/drm/tiny/arcpgu.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* DRM driver for Ilitek ILI9486 panels
*
* Copyright 2020 Kamlesh Gurudasani <[email protected]>
*/
#include <linux/backlight.h>
#include <linux/delay.h>
#include <linux/gpio/consumer.h>
#include <linux/module.h>
#include <linux/property.h>
#include <linux/spi/spi.h>
#include <video/mipi_display.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_drv.h>
#include <drm/drm_fbdev_generic.h>
#include <drm/drm_gem_atomic_helper.h>
#include <drm/drm_gem_dma_helper.h>
#include <drm/drm_managed.h>
#include <drm/drm_mipi_dbi.h>
#include <drm/drm_modeset_helper.h>
#define ILI9486_ITFCTR1 0xb0
#define ILI9486_PWCTRL1 0xc2
#define ILI9486_VMCTRL1 0xc5
#define ILI9486_PGAMCTRL 0xe0
#define ILI9486_NGAMCTRL 0xe1
#define ILI9486_DGAMCTRL 0xe2
#define ILI9486_MADCTL_BGR BIT(3)
#define ILI9486_MADCTL_MV BIT(5)
#define ILI9486_MADCTL_MX BIT(6)
#define ILI9486_MADCTL_MY BIT(7)
/*
* The PiScreen/waveshare rpi-lcd-35 has a SPI to 16-bit parallel bus converter
* in front of the display controller. This means that 8-bit values have to be
* transferred as 16-bit.
*/
static int waveshare_command(struct mipi_dbi *mipi, u8 *cmd, u8 *par,
size_t num)
{
struct spi_device *spi = mipi->spi;
unsigned int bpw = 8;
void *data = par;
u32 speed_hz;
int i, ret;
__be16 *buf;
buf = kmalloc(32 * sizeof(u16), GFP_KERNEL);
if (!buf)
return -ENOMEM;
/*
* The displays are Raspberry Pi HATs and connected to the 8-bit only
* SPI controller, so 16-bit command and parameters need byte swapping
* before being transferred as 8-bit on the big endian SPI bus.
*/
buf[0] = cpu_to_be16(*cmd);
spi_bus_lock(spi->controller);
gpiod_set_value_cansleep(mipi->dc, 0);
speed_hz = mipi_dbi_spi_cmd_max_speed(spi, 2);
ret = mipi_dbi_spi_transfer(spi, speed_hz, 8, buf, 2);
spi_bus_unlock(spi->controller);
if (ret || !num)
goto free;
/* 8-bit configuration data, not 16-bit pixel data */
if (num <= 32) {
for (i = 0; i < num; i++)
buf[i] = cpu_to_be16(par[i]);
num *= 2;
data = buf;
}
/*
* Check whether pixel data bytes needs to be swapped or not
*/
if (*cmd == MIPI_DCS_WRITE_MEMORY_START && !mipi->swap_bytes)
bpw = 16;
spi_bus_lock(spi->controller);
gpiod_set_value_cansleep(mipi->dc, 1);
speed_hz = mipi_dbi_spi_cmd_max_speed(spi, num);
ret = mipi_dbi_spi_transfer(spi, speed_hz, bpw, data, num);
spi_bus_unlock(spi->controller);
free:
kfree(buf);
return ret;
}
static void waveshare_enable(struct drm_simple_display_pipe *pipe,
struct drm_crtc_state *crtc_state,
struct drm_plane_state *plane_state)
{
struct mipi_dbi_dev *dbidev = drm_to_mipi_dbi_dev(pipe->crtc.dev);
struct mipi_dbi *dbi = &dbidev->dbi;
u8 addr_mode;
int ret, idx;
if (!drm_dev_enter(pipe->crtc.dev, &idx))
return;
DRM_DEBUG_KMS("\n");
ret = mipi_dbi_poweron_conditional_reset(dbidev);
if (ret < 0)
goto out_exit;
if (ret == 1)
goto out_enable;
mipi_dbi_command(dbi, ILI9486_ITFCTR1);
mipi_dbi_command(dbi, MIPI_DCS_EXIT_SLEEP_MODE);
msleep(250);
mipi_dbi_command(dbi, MIPI_DCS_SET_PIXEL_FORMAT, 0x55);
mipi_dbi_command(dbi, ILI9486_PWCTRL1, 0x44);
mipi_dbi_command(dbi, ILI9486_VMCTRL1, 0x00, 0x00, 0x00, 0x00);
mipi_dbi_command(dbi, ILI9486_PGAMCTRL,
0x0F, 0x1F, 0x1C, 0x0C, 0x0F, 0x08, 0x48, 0x98,
0x37, 0x0A, 0x13, 0x04, 0x11, 0x0D, 0x0);
mipi_dbi_command(dbi, ILI9486_NGAMCTRL,
0x0F, 0x32, 0x2E, 0x0B, 0x0D, 0x05, 0x47, 0x75,
0x37, 0x06, 0x10, 0x03, 0x24, 0x20, 0x00);
mipi_dbi_command(dbi, ILI9486_DGAMCTRL,
0x0F, 0x32, 0x2E, 0x0B, 0x0D, 0x05, 0x47, 0x75,
0x37, 0x06, 0x10, 0x03, 0x24, 0x20, 0x00);
mipi_dbi_command(dbi, MIPI_DCS_SET_DISPLAY_ON);
msleep(100);
out_enable:
switch (dbidev->rotation) {
case 90:
addr_mode = ILI9486_MADCTL_MY;
break;
case 180:
addr_mode = ILI9486_MADCTL_MV;
break;
case 270:
addr_mode = ILI9486_MADCTL_MX;
break;
default:
addr_mode = ILI9486_MADCTL_MV | ILI9486_MADCTL_MY |
ILI9486_MADCTL_MX;
break;
}
addr_mode |= ILI9486_MADCTL_BGR;
mipi_dbi_command(dbi, MIPI_DCS_SET_ADDRESS_MODE, addr_mode);
mipi_dbi_enable_flush(dbidev, crtc_state, plane_state);
out_exit:
drm_dev_exit(idx);
}
static const struct drm_simple_display_pipe_funcs waveshare_pipe_funcs = {
DRM_MIPI_DBI_SIMPLE_DISPLAY_PIPE_FUNCS(waveshare_enable),
};
static const struct drm_display_mode waveshare_mode = {
DRM_SIMPLE_MODE(480, 320, 73, 49),
};
DEFINE_DRM_GEM_DMA_FOPS(ili9486_fops);
static const struct drm_driver ili9486_driver = {
.driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC,
.fops = &ili9486_fops,
DRM_GEM_DMA_DRIVER_OPS_VMAP,
.debugfs_init = mipi_dbi_debugfs_init,
.name = "ili9486",
.desc = "Ilitek ILI9486",
.date = "20200118",
.major = 1,
.minor = 0,
};
static const struct of_device_id ili9486_of_match[] = {
{ .compatible = "waveshare,rpi-lcd-35" },
{ .compatible = "ozzmaker,piscreen" },
{},
};
MODULE_DEVICE_TABLE(of, ili9486_of_match);
static const struct spi_device_id ili9486_id[] = {
{ "ili9486", 0 },
{ "rpi-lcd-35", 0 },
{ "piscreen", 0 },
{ }
};
MODULE_DEVICE_TABLE(spi, ili9486_id);
static int ili9486_probe(struct spi_device *spi)
{
struct device *dev = &spi->dev;
struct mipi_dbi_dev *dbidev;
struct drm_device *drm;
struct mipi_dbi *dbi;
struct gpio_desc *dc;
u32 rotation = 0;
int ret;
dbidev = devm_drm_dev_alloc(dev, &ili9486_driver,
struct mipi_dbi_dev, drm);
if (IS_ERR(dbidev))
return PTR_ERR(dbidev);
dbi = &dbidev->dbi;
drm = &dbidev->drm;
dbi->reset = devm_gpiod_get(dev, "reset", GPIOD_OUT_HIGH);
if (IS_ERR(dbi->reset))
return dev_err_probe(dev, PTR_ERR(dbi->reset), "Failed to get GPIO 'reset'\n");
dc = devm_gpiod_get(dev, "dc", GPIOD_OUT_LOW);
if (IS_ERR(dc))
return dev_err_probe(dev, PTR_ERR(dc), "Failed to get GPIO 'dc'\n");
dbidev->backlight = devm_of_find_backlight(dev);
if (IS_ERR(dbidev->backlight))
return PTR_ERR(dbidev->backlight);
device_property_read_u32(dev, "rotation", &rotation);
ret = mipi_dbi_spi_init(spi, dbi, dc);
if (ret)
return ret;
dbi->command = waveshare_command;
dbi->read_commands = NULL;
ret = mipi_dbi_dev_init(dbidev, &waveshare_pipe_funcs,
&waveshare_mode, rotation);
if (ret)
return ret;
drm_mode_config_reset(drm);
ret = drm_dev_register(drm, 0);
if (ret)
return ret;
spi_set_drvdata(spi, drm);
drm_fbdev_generic_setup(drm, 0);
return 0;
}
static void ili9486_remove(struct spi_device *spi)
{
struct drm_device *drm = spi_get_drvdata(spi);
drm_dev_unplug(drm);
drm_atomic_helper_shutdown(drm);
}
static void ili9486_shutdown(struct spi_device *spi)
{
drm_atomic_helper_shutdown(spi_get_drvdata(spi));
}
static struct spi_driver ili9486_spi_driver = {
.driver = {
.name = "ili9486",
.of_match_table = ili9486_of_match,
},
.id_table = ili9486_id,
.probe = ili9486_probe,
.remove = ili9486_remove,
.shutdown = ili9486_shutdown,
};
module_spi_driver(ili9486_spi_driver);
MODULE_DESCRIPTION("Ilitek ILI9486 DRM driver");
MODULE_AUTHOR("Kamlesh Gurudasani <[email protected]>");
MODULE_LICENSE("GPL");
| linux-master | drivers/gpu/drm/tiny/ili9486.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2018 Linus Walleij <[email protected]>
* Parts of this file were based on the MCDE driver by Marcus Lorentzon
* (C) ST-Ericsson SA 2013
*/
/**
* DOC: ST-Ericsson MCDE Driver
*
* The MCDE (short for multi-channel display engine) is a graphics
* controller found in the Ux500 chipsets, such as NovaThor U8500.
* It was initially conceptualized by ST Microelectronics for the
* successor of the Nomadik line, STn8500 but productified in the
* ST-Ericsson U8500 where is was used for mass-market deployments
* in Android phones from Samsung and Sony Ericsson.
*
* It can do 1080p30 on SDTV CCIR656, DPI-2, DBI-2 or DSI for
* panels with or without frame buffering and can convert most
* input formats including most variants of RGB and YUV.
*
* The hardware has four display pipes, and the layout is a little
* bit like this::
*
* Memory -> Overlay -> Channel -> FIFO -> 8 formatters -> DSI/DPI
* External 0..5 0..3 A,B, 6 x DSI bridge
* source 0..9 C0,C1 2 x DPI
*
* FIFOs A and B are for LCD and HDMI while FIFO CO/C1 are for
* panels with embedded buffer.
* 6 of the formatters are for DSI, 3 pairs for VID/CMD respectively.
* 2 of the formatters are for DPI.
*
* Behind the formatters are the DSI or DPI ports that route to
* the external pins of the chip. As there are 3 DSI ports and one
* DPI port, it is possible to configure up to 4 display pipelines
* (effectively using channels 0..3) for concurrent use.
*
* In the current DRM/KMS setup, we use one external source, one overlay,
* one FIFO and one formatter which we connect to the simple DMA framebuffer
* helpers. We then provide a bridge to the DSI port, and on the DSI port
* bridge we connect hang a panel bridge or other bridge. This may be subject
* to change as we exploit more of the hardware capabilities.
*
* TODO:
*
* - Enabled damaged rectangles using drm_plane_enable_fb_damage_clips()
* so we can selectively just transmit the damaged area to a
* command-only display.
* - Enable mixing of more planes, possibly at the cost of moving away
* from using the simple framebuffer pipeline.
* - Enable output to bridges such as the AV8100 HDMI encoder from
* the DSI bridge.
*/
#include <linux/clk.h>
#include <linux/component.h>
#include <linux/dma-buf.h>
#include <linux/irq.h>
#include <linux/io.h>
#include <linux/module.h>
#include <linux/of_platform.h>
#include <linux/platform_device.h>
#include <linux/regulator/consumer.h>
#include <linux/slab.h>
#include <linux/delay.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_bridge.h>
#include <drm/drm_drv.h>
#include <drm/drm_fb_dma_helper.h>
#include <drm/drm_fbdev_dma.h>
#include <drm/drm_gem.h>
#include <drm/drm_gem_dma_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_managed.h>
#include <drm/drm_of.h>
#include <drm/drm_probe_helper.h>
#include <drm/drm_panel.h>
#include <drm/drm_vblank.h>
#include "mcde_drm.h"
#define DRIVER_DESC "DRM module for MCDE"
#define MCDE_PID 0x000001FC
#define MCDE_PID_METALFIX_VERSION_SHIFT 0
#define MCDE_PID_METALFIX_VERSION_MASK 0x000000FF
#define MCDE_PID_DEVELOPMENT_VERSION_SHIFT 8
#define MCDE_PID_DEVELOPMENT_VERSION_MASK 0x0000FF00
#define MCDE_PID_MINOR_VERSION_SHIFT 16
#define MCDE_PID_MINOR_VERSION_MASK 0x00FF0000
#define MCDE_PID_MAJOR_VERSION_SHIFT 24
#define MCDE_PID_MAJOR_VERSION_MASK 0xFF000000
static const struct drm_mode_config_funcs mcde_mode_config_funcs = {
.fb_create = drm_gem_fb_create,
.atomic_check = drm_atomic_helper_check,
.atomic_commit = drm_atomic_helper_commit,
};
static const struct drm_mode_config_helper_funcs mcde_mode_config_helpers = {
/*
* Using this function is necessary to commit atomic updates
* that need the CRTC to be enabled before a commit, as is
* the case with e.g. DSI displays.
*/
.atomic_commit_tail = drm_atomic_helper_commit_tail_rpm,
};
static irqreturn_t mcde_irq(int irq, void *data)
{
struct mcde *mcde = data;
u32 val;
val = readl(mcde->regs + MCDE_MISERR);
mcde_display_irq(mcde);
if (val)
dev_info(mcde->dev, "some error IRQ\n");
writel(val, mcde->regs + MCDE_RISERR);
return IRQ_HANDLED;
}
static int mcde_modeset_init(struct drm_device *drm)
{
struct drm_mode_config *mode_config;
struct mcde *mcde = to_mcde(drm);
int ret;
/*
* If no other bridge was found, check if we have a DPI panel or
* any other bridge connected directly to the MCDE DPI output.
* If a DSI bridge is found, DSI will take precedence.
*
* TODO: more elaborate bridge selection if we have more than one
* thing attached to the system.
*/
if (!mcde->bridge) {
struct drm_panel *panel;
struct drm_bridge *bridge;
ret = drm_of_find_panel_or_bridge(drm->dev->of_node,
0, 0, &panel, &bridge);
if (ret) {
dev_err(drm->dev,
"Could not locate any output bridge or panel\n");
return ret;
}
if (panel) {
bridge = drm_panel_bridge_add_typed(panel,
DRM_MODE_CONNECTOR_DPI);
if (IS_ERR(bridge)) {
dev_err(drm->dev,
"Could not connect panel bridge\n");
return PTR_ERR(bridge);
}
}
mcde->dpi_output = true;
mcde->bridge = bridge;
mcde->flow_mode = MCDE_DPI_FORMATTER_FLOW;
}
mode_config = &drm->mode_config;
mode_config->funcs = &mcde_mode_config_funcs;
mode_config->helper_private = &mcde_mode_config_helpers;
/* This hardware can do 1080p */
mode_config->min_width = 1;
mode_config->max_width = 1920;
mode_config->min_height = 1;
mode_config->max_height = 1080;
ret = drm_vblank_init(drm, 1);
if (ret) {
dev_err(drm->dev, "failed to init vblank\n");
return ret;
}
ret = mcde_display_init(drm);
if (ret) {
dev_err(drm->dev, "failed to init display\n");
return ret;
}
/* Attach the bridge. */
ret = drm_simple_display_pipe_attach_bridge(&mcde->pipe,
mcde->bridge);
if (ret) {
dev_err(drm->dev, "failed to attach display output bridge\n");
return ret;
}
drm_mode_config_reset(drm);
drm_kms_helper_poll_init(drm);
return 0;
}
DEFINE_DRM_GEM_DMA_FOPS(drm_fops);
static const struct drm_driver mcde_drm_driver = {
.driver_features =
DRIVER_MODESET | DRIVER_GEM | DRIVER_ATOMIC,
.ioctls = NULL,
.fops = &drm_fops,
.name = "mcde",
.desc = DRIVER_DESC,
.date = "20180529",
.major = 1,
.minor = 0,
.patchlevel = 0,
DRM_GEM_DMA_DRIVER_OPS,
};
static int mcde_drm_bind(struct device *dev)
{
struct drm_device *drm = dev_get_drvdata(dev);
int ret;
ret = drmm_mode_config_init(drm);
if (ret)
return ret;
ret = component_bind_all(drm->dev, drm);
if (ret) {
dev_err(dev, "can't bind component devices\n");
return ret;
}
ret = mcde_modeset_init(drm);
if (ret)
goto unbind;
ret = drm_dev_register(drm, 0);
if (ret < 0)
goto unbind;
drm_fbdev_dma_setup(drm, 32);
return 0;
unbind:
component_unbind_all(drm->dev, drm);
return ret;
}
static void mcde_drm_unbind(struct device *dev)
{
struct drm_device *drm = dev_get_drvdata(dev);
drm_dev_unregister(drm);
drm_atomic_helper_shutdown(drm);
component_unbind_all(drm->dev, drm);
}
static const struct component_master_ops mcde_drm_comp_ops = {
.bind = mcde_drm_bind,
.unbind = mcde_drm_unbind,
};
static struct platform_driver *const mcde_component_drivers[] = {
&mcde_dsi_driver,
};
static int mcde_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct drm_device *drm;
struct mcde *mcde;
struct component_match *match = NULL;
u32 pid;
int irq;
int ret;
int i;
mcde = devm_drm_dev_alloc(dev, &mcde_drm_driver, struct mcde, drm);
if (IS_ERR(mcde))
return PTR_ERR(mcde);
drm = &mcde->drm;
mcde->dev = dev;
platform_set_drvdata(pdev, drm);
/* First obtain and turn on the main power */
mcde->epod = devm_regulator_get(dev, "epod");
if (IS_ERR(mcde->epod)) {
ret = PTR_ERR(mcde->epod);
dev_err(dev, "can't get EPOD regulator\n");
return ret;
}
ret = regulator_enable(mcde->epod);
if (ret) {
dev_err(dev, "can't enable EPOD regulator\n");
return ret;
}
mcde->vana = devm_regulator_get(dev, "vana");
if (IS_ERR(mcde->vana)) {
ret = PTR_ERR(mcde->vana);
dev_err(dev, "can't get VANA regulator\n");
goto regulator_epod_off;
}
ret = regulator_enable(mcde->vana);
if (ret) {
dev_err(dev, "can't enable VANA regulator\n");
goto regulator_epod_off;
}
/*
* The vendor code uses ESRAM (onchip RAM) and need to activate
* the v-esram34 regulator, but we don't use that yet
*/
/* Clock the silicon so we can access the registers */
mcde->mcde_clk = devm_clk_get(dev, "mcde");
if (IS_ERR(mcde->mcde_clk)) {
dev_err(dev, "unable to get MCDE main clock\n");
ret = PTR_ERR(mcde->mcde_clk);
goto regulator_off;
}
ret = clk_prepare_enable(mcde->mcde_clk);
if (ret) {
dev_err(dev, "failed to enable MCDE main clock\n");
goto regulator_off;
}
dev_info(dev, "MCDE clk rate %lu Hz\n", clk_get_rate(mcde->mcde_clk));
mcde->lcd_clk = devm_clk_get(dev, "lcd");
if (IS_ERR(mcde->lcd_clk)) {
dev_err(dev, "unable to get LCD clock\n");
ret = PTR_ERR(mcde->lcd_clk);
goto clk_disable;
}
mcde->hdmi_clk = devm_clk_get(dev, "hdmi");
if (IS_ERR(mcde->hdmi_clk)) {
dev_err(dev, "unable to get HDMI clock\n");
ret = PTR_ERR(mcde->hdmi_clk);
goto clk_disable;
}
mcde->regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(mcde->regs)) {
dev_err(dev, "no MCDE regs\n");
ret = -EINVAL;
goto clk_disable;
}
irq = platform_get_irq(pdev, 0);
if (irq < 0) {
ret = irq;
goto clk_disable;
}
ret = devm_request_irq(dev, irq, mcde_irq, 0, "mcde", mcde);
if (ret) {
dev_err(dev, "failed to request irq %d\n", ret);
goto clk_disable;
}
/*
* Check hardware revision, we only support U8500v2 version
* as this was the only version used for mass market deployment,
* but surely you can add more versions if you have them and
* need them.
*/
pid = readl(mcde->regs + MCDE_PID);
dev_info(dev, "found MCDE HW revision %d.%d (dev %d, metal fix %d)\n",
(pid & MCDE_PID_MAJOR_VERSION_MASK)
>> MCDE_PID_MAJOR_VERSION_SHIFT,
(pid & MCDE_PID_MINOR_VERSION_MASK)
>> MCDE_PID_MINOR_VERSION_SHIFT,
(pid & MCDE_PID_DEVELOPMENT_VERSION_MASK)
>> MCDE_PID_DEVELOPMENT_VERSION_SHIFT,
(pid & MCDE_PID_METALFIX_VERSION_MASK)
>> MCDE_PID_METALFIX_VERSION_SHIFT);
if (pid != 0x03000800) {
dev_err(dev, "unsupported hardware revision\n");
ret = -ENODEV;
goto clk_disable;
}
/* Disable and clear any pending interrupts */
mcde_display_disable_irqs(mcde);
writel(0, mcde->regs + MCDE_IMSCERR);
writel(0xFFFFFFFF, mcde->regs + MCDE_RISERR);
/* Spawn child devices for the DSI ports */
devm_of_platform_populate(dev);
/* Create something that will match the subdrivers when we bind */
for (i = 0; i < ARRAY_SIZE(mcde_component_drivers); i++) {
struct device_driver *drv = &mcde_component_drivers[i]->driver;
struct device *p = NULL, *d;
while ((d = platform_find_device_by_driver(p, drv))) {
put_device(p);
component_match_add(dev, &match, component_compare_dev, d);
p = d;
}
put_device(p);
}
if (!match) {
dev_err(dev, "no matching components\n");
ret = -ENODEV;
goto clk_disable;
}
if (IS_ERR(match)) {
dev_err(dev, "could not create component match\n");
ret = PTR_ERR(match);
goto clk_disable;
}
/*
* Perform an invasive reset of the MCDE and all blocks by
* cutting the power to the subsystem, then bring it back up
* later when we enable the display as a result of
* component_master_add_with_match().
*/
ret = regulator_disable(mcde->epod);
if (ret) {
dev_err(dev, "can't disable EPOD regulator\n");
return ret;
}
/* Wait 50 ms so we are sure we cut the power */
usleep_range(50000, 70000);
ret = component_master_add_with_match(&pdev->dev, &mcde_drm_comp_ops,
match);
if (ret) {
dev_err(dev, "failed to add component master\n");
/*
* The EPOD regulator is already disabled at this point so some
* special errorpath code is needed
*/
clk_disable_unprepare(mcde->mcde_clk);
regulator_disable(mcde->vana);
return ret;
}
return 0;
clk_disable:
clk_disable_unprepare(mcde->mcde_clk);
regulator_off:
regulator_disable(mcde->vana);
regulator_epod_off:
regulator_disable(mcde->epod);
return ret;
}
static void mcde_remove(struct platform_device *pdev)
{
struct drm_device *drm = platform_get_drvdata(pdev);
struct mcde *mcde = to_mcde(drm);
component_master_del(&pdev->dev, &mcde_drm_comp_ops);
clk_disable_unprepare(mcde->mcde_clk);
regulator_disable(mcde->vana);
regulator_disable(mcde->epod);
}
static const struct of_device_id mcde_of_match[] = {
{
.compatible = "ste,mcde",
},
{},
};
static struct platform_driver mcde_driver = {
.driver = {
.name = "mcde",
.of_match_table = mcde_of_match,
},
.probe = mcde_probe,
.remove_new = mcde_remove,
};
static struct platform_driver *const component_drivers[] = {
&mcde_dsi_driver,
};
static int __init mcde_drm_register(void)
{
int ret;
if (drm_firmware_drivers_only())
return -ENODEV;
ret = platform_register_drivers(component_drivers,
ARRAY_SIZE(component_drivers));
if (ret)
return ret;
return platform_driver_register(&mcde_driver);
}
static void __exit mcde_drm_unregister(void)
{
platform_unregister_drivers(component_drivers,
ARRAY_SIZE(component_drivers));
platform_driver_unregister(&mcde_driver);
}
module_init(mcde_drm_register);
module_exit(mcde_drm_unregister);
MODULE_ALIAS("platform:mcde-drm");
MODULE_DESCRIPTION(DRIVER_DESC);
MODULE_AUTHOR("Linus Walleij <[email protected]>");
MODULE_LICENSE("GPL");
| linux-master | drivers/gpu/drm/mcde/mcde_drv.c |
// SPDX-License-Identifier: GPL-2.0
#include <linux/clk-provider.h>
#include <linux/io.h>
#include <linux/regulator/consumer.h>
#include "mcde_drm.h"
#include "mcde_display_regs.h"
/* The MCDE internal clock dividers for FIFO A and B */
struct mcde_clk_div {
struct clk_hw hw;
struct mcde *mcde;
u32 cr;
u32 cr_div;
};
static int mcde_clk_div_enable(struct clk_hw *hw)
{
struct mcde_clk_div *cdiv = container_of(hw, struct mcde_clk_div, hw);
struct mcde *mcde = cdiv->mcde;
u32 val;
spin_lock(&mcde->fifo_crx1_lock);
val = readl(mcde->regs + cdiv->cr);
/*
* Select the PLL72 (LCD) clock as parent
* FIXME: implement other parents.
*/
val &= ~MCDE_CRX1_CLKSEL_MASK;
val |= MCDE_CRX1_CLKSEL_CLKPLL72 << MCDE_CRX1_CLKSEL_SHIFT;
/* Internal clock */
val |= MCDE_CRA1_CLKTYPE_TVXCLKSEL1;
/* Clear then set the divider */
val &= ~(MCDE_CRX1_BCD | MCDE_CRX1_PCD_MASK);
val |= cdiv->cr_div;
writel(val, mcde->regs + cdiv->cr);
spin_unlock(&mcde->fifo_crx1_lock);
return 0;
}
static int mcde_clk_div_choose_div(struct clk_hw *hw, unsigned long rate,
unsigned long *prate, bool set_parent)
{
int best_div = 1, div;
struct clk_hw *parent = clk_hw_get_parent(hw);
unsigned long best_prate = 0;
unsigned long best_diff = ~0ul;
int max_div = (1 << MCDE_CRX1_PCD_BITS) - 1;
for (div = 1; div < max_div; div++) {
unsigned long this_prate, div_rate, diff;
if (set_parent)
this_prate = clk_hw_round_rate(parent, rate * div);
else
this_prate = *prate;
div_rate = DIV_ROUND_UP_ULL(this_prate, div);
diff = abs(rate - div_rate);
if (diff < best_diff) {
best_div = div;
best_diff = diff;
best_prate = this_prate;
}
}
*prate = best_prate;
return best_div;
}
static long mcde_clk_div_round_rate(struct clk_hw *hw, unsigned long rate,
unsigned long *prate)
{
int div = mcde_clk_div_choose_div(hw, rate, prate, true);
return DIV_ROUND_UP_ULL(*prate, div);
}
static unsigned long mcde_clk_div_recalc_rate(struct clk_hw *hw,
unsigned long prate)
{
struct mcde_clk_div *cdiv = container_of(hw, struct mcde_clk_div, hw);
struct mcde *mcde = cdiv->mcde;
u32 cr;
int div;
/*
* If the MCDE is not powered we can't access registers.
* It will come up with 0 in the divider register bits, which
* means "divide by 2".
*/
if (!regulator_is_enabled(mcde->epod))
return DIV_ROUND_UP_ULL(prate, 2);
cr = readl(mcde->regs + cdiv->cr);
if (cr & MCDE_CRX1_BCD)
return prate;
/* 0 in the PCD means "divide by 2", 1 means "divide by 3" etc */
div = cr & MCDE_CRX1_PCD_MASK;
div += 2;
return DIV_ROUND_UP_ULL(prate, div);
}
static int mcde_clk_div_set_rate(struct clk_hw *hw, unsigned long rate,
unsigned long prate)
{
struct mcde_clk_div *cdiv = container_of(hw, struct mcde_clk_div, hw);
int div = mcde_clk_div_choose_div(hw, rate, &prate, false);
u32 cr = 0;
/*
* We cache the CR bits to set the divide in the state so that
* we can call this before we can even write to the hardware.
*/
if (div == 1) {
/* Bypass clock divider */
cr |= MCDE_CRX1_BCD;
} else {
div -= 2;
cr |= div & MCDE_CRX1_PCD_MASK;
}
cdiv->cr_div = cr;
return 0;
}
static const struct clk_ops mcde_clk_div_ops = {
.enable = mcde_clk_div_enable,
.recalc_rate = mcde_clk_div_recalc_rate,
.round_rate = mcde_clk_div_round_rate,
.set_rate = mcde_clk_div_set_rate,
};
int mcde_init_clock_divider(struct mcde *mcde)
{
struct device *dev = mcde->dev;
struct mcde_clk_div *fifoa;
struct mcde_clk_div *fifob;
const char *parent_name;
struct clk_init_data fifoa_init = {
.name = "fifoa",
.ops = &mcde_clk_div_ops,
.parent_names = &parent_name,
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
};
struct clk_init_data fifob_init = {
.name = "fifob",
.ops = &mcde_clk_div_ops,
.parent_names = &parent_name,
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
};
int ret;
spin_lock_init(&mcde->fifo_crx1_lock);
parent_name = __clk_get_name(mcde->lcd_clk);
/* Allocate 2 clocks */
fifoa = devm_kzalloc(dev, sizeof(*fifoa), GFP_KERNEL);
if (!fifoa)
return -ENOMEM;
fifob = devm_kzalloc(dev, sizeof(*fifob), GFP_KERNEL);
if (!fifob)
return -ENOMEM;
fifoa->mcde = mcde;
fifoa->cr = MCDE_CRA1;
fifoa->hw.init = &fifoa_init;
ret = devm_clk_hw_register(dev, &fifoa->hw);
if (ret) {
dev_err(dev, "error registering FIFO A clock divider\n");
return ret;
}
mcde->fifoa_clk = fifoa->hw.clk;
fifob->mcde = mcde;
fifob->cr = MCDE_CRB1;
fifob->hw.init = &fifob_init;
ret = devm_clk_hw_register(dev, &fifob->hw);
if (ret) {
dev_err(dev, "error registering FIFO B clock divider\n");
return ret;
}
mcde->fifob_clk = fifob->hw.clk;
return 0;
}
| linux-master | drivers/gpu/drm/mcde/mcde_clk_div.c |
// SPDX-License-Identifier: GPL-2.0+
#include <linux/clk.h>
#include <linux/component.h>
#include <linux/delay.h>
#include <linux/io.h>
#include <linux/mfd/syscon.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
#include <linux/regulator/consumer.h>
#include <video/mipi_display.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_bridge.h>
#include <drm/drm_device.h>
#include <drm/drm_drv.h>
#include <drm/drm_encoder.h>
#include <drm/drm_mipi_dsi.h>
#include <drm/drm_modeset_helper_vtables.h>
#include <drm/drm_of.h>
#include <drm/drm_panel.h>
#include <drm/drm_print.h>
#include <drm/drm_probe_helper.h>
#include "mcde_drm.h"
#include "mcde_dsi_regs.h"
#define DSI_DEFAULT_LP_FREQ_HZ 19200000
#define DSI_DEFAULT_HS_FREQ_HZ 420160000
/* PRCMU DSI reset registers */
#define PRCM_DSI_SW_RESET 0x324
#define PRCM_DSI_SW_RESET_DSI0_SW_RESETN BIT(0)
#define PRCM_DSI_SW_RESET_DSI1_SW_RESETN BIT(1)
#define PRCM_DSI_SW_RESET_DSI2_SW_RESETN BIT(2)
struct mcde_dsi {
struct device *dev;
struct mcde *mcde;
struct drm_bridge bridge;
struct drm_panel *panel;
struct drm_bridge *bridge_out;
struct mipi_dsi_host dsi_host;
struct mipi_dsi_device *mdsi;
const struct drm_display_mode *mode;
struct clk *hs_clk;
struct clk *lp_clk;
unsigned long hs_freq;
unsigned long lp_freq;
bool unused;
void __iomem *regs;
struct regmap *prcmu;
};
static inline struct mcde_dsi *bridge_to_mcde_dsi(struct drm_bridge *bridge)
{
return container_of(bridge, struct mcde_dsi, bridge);
}
static inline struct mcde_dsi *host_to_mcde_dsi(struct mipi_dsi_host *h)
{
return container_of(h, struct mcde_dsi, dsi_host);
}
bool mcde_dsi_irq(struct mipi_dsi_device *mdsi)
{
struct mcde_dsi *d;
u32 val;
bool te_received = false;
d = host_to_mcde_dsi(mdsi->host);
dev_dbg(d->dev, "%s called\n", __func__);
val = readl(d->regs + DSI_DIRECT_CMD_STS_FLAG);
if (val)
dev_dbg(d->dev, "DSI_DIRECT_CMD_STS_FLAG = %08x\n", val);
if (val & DSI_DIRECT_CMD_STS_WRITE_COMPLETED)
dev_dbg(d->dev, "direct command write completed\n");
if (val & DSI_DIRECT_CMD_STS_TE_RECEIVED) {
te_received = true;
dev_dbg(d->dev, "direct command TE received\n");
}
if (val & DSI_DIRECT_CMD_STS_ACKNOWLEDGE_WITH_ERR_RECEIVED)
dev_err(d->dev, "direct command ACK ERR received\n");
if (val & DSI_DIRECT_CMD_STS_READ_COMPLETED_WITH_ERR)
dev_err(d->dev, "direct command read ERR received\n");
/* Mask off the ACK value and clear status */
writel(val, d->regs + DSI_DIRECT_CMD_STS_CLR);
val = readl(d->regs + DSI_CMD_MODE_STS_FLAG);
if (val)
dev_dbg(d->dev, "DSI_CMD_MODE_STS_FLAG = %08x\n", val);
if (val & DSI_CMD_MODE_STS_ERR_NO_TE)
/* This happens all the time (safe to ignore) */
dev_dbg(d->dev, "CMD mode no TE\n");
if (val & DSI_CMD_MODE_STS_ERR_TE_MISS)
/* This happens all the time (safe to ignore) */
dev_dbg(d->dev, "CMD mode TE miss\n");
if (val & DSI_CMD_MODE_STS_ERR_SDI1_UNDERRUN)
dev_err(d->dev, "CMD mode SD1 underrun\n");
if (val & DSI_CMD_MODE_STS_ERR_SDI2_UNDERRUN)
dev_err(d->dev, "CMD mode SD2 underrun\n");
if (val & DSI_CMD_MODE_STS_ERR_UNWANTED_RD)
dev_err(d->dev, "CMD mode unwanted RD\n");
writel(val, d->regs + DSI_CMD_MODE_STS_CLR);
val = readl(d->regs + DSI_DIRECT_CMD_RD_STS_FLAG);
if (val)
dev_dbg(d->dev, "DSI_DIRECT_CMD_RD_STS_FLAG = %08x\n", val);
writel(val, d->regs + DSI_DIRECT_CMD_RD_STS_CLR);
val = readl(d->regs + DSI_TG_STS_FLAG);
if (val)
dev_dbg(d->dev, "DSI_TG_STS_FLAG = %08x\n", val);
writel(val, d->regs + DSI_TG_STS_CLR);
val = readl(d->regs + DSI_VID_MODE_STS_FLAG);
if (val)
dev_dbg(d->dev, "DSI_VID_MODE_STS_FLAG = %08x\n", val);
if (val & DSI_VID_MODE_STS_VSG_RUNNING)
dev_dbg(d->dev, "VID mode VSG running\n");
if (val & DSI_VID_MODE_STS_ERR_MISSING_DATA)
dev_err(d->dev, "VID mode missing data\n");
if (val & DSI_VID_MODE_STS_ERR_MISSING_HSYNC)
dev_err(d->dev, "VID mode missing HSYNC\n");
if (val & DSI_VID_MODE_STS_ERR_MISSING_VSYNC)
dev_err(d->dev, "VID mode missing VSYNC\n");
if (val & DSI_VID_MODE_STS_REG_ERR_SMALL_LENGTH)
dev_err(d->dev, "VID mode less bytes than expected between two HSYNC\n");
if (val & DSI_VID_MODE_STS_REG_ERR_SMALL_HEIGHT)
dev_err(d->dev, "VID mode less lines than expected between two VSYNC\n");
if (val & (DSI_VID_MODE_STS_ERR_BURSTWRITE |
DSI_VID_MODE_STS_ERR_LINEWRITE |
DSI_VID_MODE_STS_ERR_LONGREAD))
dev_err(d->dev, "VID mode read/write error\n");
if (val & DSI_VID_MODE_STS_ERR_VRS_WRONG_LENGTH)
dev_err(d->dev, "VID mode received packets differ from expected size\n");
if (val & DSI_VID_MODE_STS_VSG_RECOVERY)
dev_err(d->dev, "VID mode VSG in recovery mode\n");
writel(val, d->regs + DSI_VID_MODE_STS_CLR);
return te_received;
}
static void mcde_dsi_attach_to_mcde(struct mcde_dsi *d)
{
d->mcde->mdsi = d->mdsi;
/*
* Select the way the DSI data flow is pushing to the display:
* currently we just support video or command mode depending
* on the type of display. Video mode defaults to using the
* formatter itself for synchronization (stateless video panel).
*
* FIXME: add flags to struct mipi_dsi_device .flags to indicate
* displays that require BTA (bus turn around) so we can handle
* such displays as well. Figure out how to properly handle
* single frame on-demand updates with DRM for command mode
* displays (MCDE_COMMAND_ONESHOT_FLOW).
*/
if (d->mdsi->mode_flags & MIPI_DSI_MODE_VIDEO)
d->mcde->flow_mode = MCDE_VIDEO_FORMATTER_FLOW;
else
d->mcde->flow_mode = MCDE_COMMAND_TE_FLOW;
}
static int mcde_dsi_host_attach(struct mipi_dsi_host *host,
struct mipi_dsi_device *mdsi)
{
struct mcde_dsi *d = host_to_mcde_dsi(host);
if (mdsi->lanes < 1 || mdsi->lanes > 2) {
DRM_ERROR("dsi device params invalid, 1 or 2 lanes supported\n");
return -EINVAL;
}
dev_info(d->dev, "attached DSI device with %d lanes\n", mdsi->lanes);
/* MIPI_DSI_FMT_RGB88 etc */
dev_info(d->dev, "format %08x, %dbpp\n", mdsi->format,
mipi_dsi_pixel_format_to_bpp(mdsi->format));
dev_info(d->dev, "mode flags: %08lx\n", mdsi->mode_flags);
d->mdsi = mdsi;
if (d->mcde)
mcde_dsi_attach_to_mcde(d);
return 0;
}
static int mcde_dsi_host_detach(struct mipi_dsi_host *host,
struct mipi_dsi_device *mdsi)
{
struct mcde_dsi *d = host_to_mcde_dsi(host);
d->mdsi = NULL;
if (d->mcde)
d->mcde->mdsi = NULL;
return 0;
}
#define MCDE_DSI_HOST_IS_READ(type) \
((type == MIPI_DSI_GENERIC_READ_REQUEST_0_PARAM) || \
(type == MIPI_DSI_GENERIC_READ_REQUEST_1_PARAM) || \
(type == MIPI_DSI_GENERIC_READ_REQUEST_2_PARAM) || \
(type == MIPI_DSI_DCS_READ))
static int mcde_dsi_execute_transfer(struct mcde_dsi *d,
const struct mipi_dsi_msg *msg)
{
const u32 loop_delay_us = 10; /* us */
u32 loop_counter;
size_t txlen = msg->tx_len;
size_t rxlen = msg->rx_len;
int i;
u32 val;
int ret;
writel(~0, d->regs + DSI_DIRECT_CMD_STS_CLR);
writel(~0, d->regs + DSI_CMD_MODE_STS_CLR);
/* Send command */
writel(1, d->regs + DSI_DIRECT_CMD_SEND);
loop_counter = 1000 * 1000 / loop_delay_us;
if (MCDE_DSI_HOST_IS_READ(msg->type)) {
/* Read command */
while (!(readl(d->regs + DSI_DIRECT_CMD_STS) &
(DSI_DIRECT_CMD_STS_READ_COMPLETED |
DSI_DIRECT_CMD_STS_READ_COMPLETED_WITH_ERR))
&& --loop_counter)
usleep_range(loop_delay_us, (loop_delay_us * 3) / 2);
if (!loop_counter) {
dev_err(d->dev, "DSI read timeout!\n");
/* Set exit code and retry */
return -ETIME;
}
} else {
/* Writing only */
while (!(readl(d->regs + DSI_DIRECT_CMD_STS) &
DSI_DIRECT_CMD_STS_WRITE_COMPLETED)
&& --loop_counter)
usleep_range(loop_delay_us, (loop_delay_us * 3) / 2);
if (!loop_counter) {
/* Set exit code and retry */
dev_err(d->dev, "DSI write timeout!\n");
return -ETIME;
}
}
val = readl(d->regs + DSI_DIRECT_CMD_STS);
if (val & DSI_DIRECT_CMD_STS_READ_COMPLETED_WITH_ERR) {
dev_err(d->dev, "read completed with error\n");
writel(1, d->regs + DSI_DIRECT_CMD_RD_INIT);
return -EIO;
}
if (val & DSI_DIRECT_CMD_STS_ACKNOWLEDGE_WITH_ERR_RECEIVED) {
val >>= DSI_DIRECT_CMD_STS_ACK_VAL_SHIFT;
dev_err(d->dev, "error during transmission: %04x\n",
val);
return -EIO;
}
if (!MCDE_DSI_HOST_IS_READ(msg->type)) {
/* Return number of bytes written */
ret = txlen;
} else {
/* OK this is a read command, get the response */
u32 rdsz;
u32 rddat;
u8 *rx = msg->rx_buf;
rdsz = readl(d->regs + DSI_DIRECT_CMD_RD_PROPERTY);
rdsz &= DSI_DIRECT_CMD_RD_PROPERTY_RD_SIZE_MASK;
rddat = readl(d->regs + DSI_DIRECT_CMD_RDDAT);
if (rdsz < rxlen) {
dev_err(d->dev, "read error, requested %zd got %d\n",
rxlen, rdsz);
return -EIO;
}
/* FIXME: read more than 4 bytes */
for (i = 0; i < 4 && i < rxlen; i++)
rx[i] = (rddat >> (i * 8)) & 0xff;
ret = rdsz;
}
/* Successful transmission */
return ret;
}
static ssize_t mcde_dsi_host_transfer(struct mipi_dsi_host *host,
const struct mipi_dsi_msg *msg)
{
struct mcde_dsi *d = host_to_mcde_dsi(host);
const u8 *tx = msg->tx_buf;
size_t txlen = msg->tx_len;
size_t rxlen = msg->rx_len;
unsigned int retries = 0;
u32 val;
int ret;
int i;
if (txlen > 16) {
dev_err(d->dev,
"dunno how to write more than 16 bytes yet\n");
return -EIO;
}
if (rxlen > 4) {
dev_err(d->dev,
"dunno how to read more than 4 bytes yet\n");
return -EIO;
}
dev_dbg(d->dev,
"message to channel %d, write %zd bytes read %zd bytes\n",
msg->channel, txlen, rxlen);
/* Command "nature" */
if (MCDE_DSI_HOST_IS_READ(msg->type))
/* MCTL_MAIN_DATA_CTL already set up */
val = DSI_DIRECT_CMD_MAIN_SETTINGS_CMD_NAT_READ;
else
val = DSI_DIRECT_CMD_MAIN_SETTINGS_CMD_NAT_WRITE;
/*
* More than 2 bytes will not fit in a single packet, so it's
* time to set the "long not short" bit. One byte is used by
* the MIPI DCS command leaving just one byte for the payload
* in a short package.
*/
if (mipi_dsi_packet_format_is_long(msg->type))
val |= DSI_DIRECT_CMD_MAIN_SETTINGS_CMD_LONGNOTSHORT;
val |= 0 << DSI_DIRECT_CMD_MAIN_SETTINGS_CMD_ID_SHIFT;
val |= txlen << DSI_DIRECT_CMD_MAIN_SETTINGS_CMD_SIZE_SHIFT;
val |= DSI_DIRECT_CMD_MAIN_SETTINGS_CMD_LP_EN;
val |= msg->type << DSI_DIRECT_CMD_MAIN_SETTINGS_CMD_HEAD_SHIFT;
writel(val, d->regs + DSI_DIRECT_CMD_MAIN_SETTINGS);
/* MIPI DCS command is part of the data */
if (txlen > 0) {
val = 0;
for (i = 0; i < 4 && i < txlen; i++)
val |= tx[i] << (i * 8);
}
writel(val, d->regs + DSI_DIRECT_CMD_WRDAT0);
if (txlen > 4) {
val = 0;
for (i = 0; i < 4 && (i + 4) < txlen; i++)
val |= tx[i + 4] << (i * 8);
writel(val, d->regs + DSI_DIRECT_CMD_WRDAT1);
}
if (txlen > 8) {
val = 0;
for (i = 0; i < 4 && (i + 8) < txlen; i++)
val |= tx[i + 8] << (i * 8);
writel(val, d->regs + DSI_DIRECT_CMD_WRDAT2);
}
if (txlen > 12) {
val = 0;
for (i = 0; i < 4 && (i + 12) < txlen; i++)
val |= tx[i + 12] << (i * 8);
writel(val, d->regs + DSI_DIRECT_CMD_WRDAT3);
}
while (retries < 3) {
ret = mcde_dsi_execute_transfer(d, msg);
if (ret >= 0)
break;
retries++;
}
if (ret < 0 && retries)
dev_err(d->dev, "gave up after %d retries\n", retries);
/* Clear any errors */
writel(~0, d->regs + DSI_DIRECT_CMD_STS_CLR);
writel(~0, d->regs + DSI_CMD_MODE_STS_CLR);
return ret;
}
static const struct mipi_dsi_host_ops mcde_dsi_host_ops = {
.attach = mcde_dsi_host_attach,
.detach = mcde_dsi_host_detach,
.transfer = mcde_dsi_host_transfer,
};
/* This sends a direct (short) command to request TE */
void mcde_dsi_te_request(struct mipi_dsi_device *mdsi)
{
struct mcde_dsi *d;
u32 val;
d = host_to_mcde_dsi(mdsi->host);
/* Command "nature" TE request */
val = DSI_DIRECT_CMD_MAIN_SETTINGS_CMD_NAT_TE_REQ;
val |= 0 << DSI_DIRECT_CMD_MAIN_SETTINGS_CMD_ID_SHIFT;
val |= 2 << DSI_DIRECT_CMD_MAIN_SETTINGS_CMD_SIZE_SHIFT;
val |= DSI_DIRECT_CMD_MAIN_SETTINGS_CMD_LP_EN;
val |= MIPI_DSI_GENERIC_SHORT_WRITE_1_PARAM <<
DSI_DIRECT_CMD_MAIN_SETTINGS_CMD_HEAD_SHIFT;
writel(val, d->regs + DSI_DIRECT_CMD_MAIN_SETTINGS);
/* Clear TE reveived and error status bits and enables them */
writel(DSI_DIRECT_CMD_STS_CLR_TE_RECEIVED_CLR |
DSI_DIRECT_CMD_STS_CLR_ACKNOWLEDGE_WITH_ERR_RECEIVED_CLR,
d->regs + DSI_DIRECT_CMD_STS_CLR);
val = readl(d->regs + DSI_DIRECT_CMD_STS_CTL);
val |= DSI_DIRECT_CMD_STS_CTL_TE_RECEIVED_EN;
val |= DSI_DIRECT_CMD_STS_CTL_ACKNOWLEDGE_WITH_ERR_EN;
writel(val, d->regs + DSI_DIRECT_CMD_STS_CTL);
/* Clear and enable no TE or TE missing status */
writel(DSI_CMD_MODE_STS_CLR_ERR_NO_TE_CLR |
DSI_CMD_MODE_STS_CLR_ERR_TE_MISS_CLR,
d->regs + DSI_CMD_MODE_STS_CLR);
val = readl(d->regs + DSI_CMD_MODE_STS_CTL);
val |= DSI_CMD_MODE_STS_CTL_ERR_NO_TE_EN;
val |= DSI_CMD_MODE_STS_CTL_ERR_TE_MISS_EN;
writel(val, d->regs + DSI_CMD_MODE_STS_CTL);
/* Send this TE request command */
writel(1, d->regs + DSI_DIRECT_CMD_SEND);
}
static void mcde_dsi_setup_video_mode(struct mcde_dsi *d,
const struct drm_display_mode *mode)
{
/* cpp, characters per pixel, number of bytes per pixel */
u8 cpp = mipi_dsi_pixel_format_to_bpp(d->mdsi->format) / 8;
u64 pclk;
u64 bpl;
int hfp;
int hbp;
int hsa;
u32 blkline_pck, line_duration;
u32 val;
val = 0;
if (d->mdsi->mode_flags & MIPI_DSI_MODE_VIDEO_BURST)
val |= DSI_VID_MAIN_CTL_BURST_MODE;
if (d->mdsi->mode_flags & MIPI_DSI_MODE_VIDEO_SYNC_PULSE) {
val |= DSI_VID_MAIN_CTL_SYNC_PULSE_ACTIVE;
val |= DSI_VID_MAIN_CTL_SYNC_PULSE_HORIZONTAL;
}
/* RGB header and pixel mode */
switch (d->mdsi->format) {
case MIPI_DSI_FMT_RGB565:
val |= MIPI_DSI_PACKED_PIXEL_STREAM_16 <<
DSI_VID_MAIN_CTL_HEADER_SHIFT;
val |= DSI_VID_MAIN_CTL_VID_PIXEL_MODE_16BITS;
break;
case MIPI_DSI_FMT_RGB666_PACKED:
val |= MIPI_DSI_PACKED_PIXEL_STREAM_18 <<
DSI_VID_MAIN_CTL_HEADER_SHIFT;
val |= DSI_VID_MAIN_CTL_VID_PIXEL_MODE_18BITS;
break;
case MIPI_DSI_FMT_RGB666:
val |= MIPI_DSI_PIXEL_STREAM_3BYTE_18
<< DSI_VID_MAIN_CTL_HEADER_SHIFT;
val |= DSI_VID_MAIN_CTL_VID_PIXEL_MODE_18BITS_LOOSE;
break;
case MIPI_DSI_FMT_RGB888:
val |= MIPI_DSI_PACKED_PIXEL_STREAM_24 <<
DSI_VID_MAIN_CTL_HEADER_SHIFT;
val |= DSI_VID_MAIN_CTL_VID_PIXEL_MODE_24BITS;
break;
default:
dev_err(d->dev, "unknown pixel mode\n");
return;
}
/* TODO: TVG (test video generator) could be enabled here */
/*
* During vertical blanking: go to LP mode
* Like with the EOL setting, if this is not set, the EOL area will be
* filled with NULL or blanking packets in the vblank area.
* FIXME: some Samsung phones and display panels such as s6e63m0 use
* DSI_VID_MAIN_CTL_REG_BLKLINE_MODE_BLANKING here instead,
* figure out how to properly configure that from the panel.
*/
val |= DSI_VID_MAIN_CTL_REG_BLKLINE_MODE_LP_0;
/*
* During EOL: go to LP mode. If this is not set, the EOL area will be
* filled with NULL or blanking packets.
*/
val |= DSI_VID_MAIN_CTL_REG_BLKEOL_MODE_LP_0;
/* Recovery mode 1 */
val |= 1 << DSI_VID_MAIN_CTL_RECOVERY_MODE_SHIFT;
/* All other fields zero */
writel(val, d->regs + DSI_VID_MAIN_CTL);
/* Vertical frame parameters are pretty straight-forward */
val = mode->vdisplay << DSI_VID_VSIZE_VACT_LENGTH_SHIFT;
/* vertical front porch */
val |= (mode->vsync_start - mode->vdisplay)
<< DSI_VID_VSIZE_VFP_LENGTH_SHIFT;
/* vertical sync active */
val |= (mode->vsync_end - mode->vsync_start)
<< DSI_VID_VSIZE_VSA_LENGTH_SHIFT;
/* vertical back porch */
val |= (mode->vtotal - mode->vsync_end)
<< DSI_VID_VSIZE_VBP_LENGTH_SHIFT;
writel(val, d->regs + DSI_VID_VSIZE);
/*
* Horizontal frame parameters:
* horizontal resolution is given in pixels but must be re-calculated
* into bytes since this is what the hardware expects, these registers
* define the payload size of the packet.
*
* hfp = horizontal front porch in bytes
* hbp = horizontal back porch in bytes
* hsa = horizontal sync active in bytes
*
* 6 + 2 is HFP header + checksum
*/
hfp = (mode->hsync_start - mode->hdisplay) * cpp - 6 - 2;
if (d->mdsi->mode_flags & MIPI_DSI_MODE_VIDEO_SYNC_PULSE) {
/*
* Use sync pulse for sync: explicit HSA time
* 6 is HBP header + checksum
* 4 is RGB header + checksum
*/
hbp = (mode->htotal - mode->hsync_end) * cpp - 4 - 6;
/*
* 6 is HBP header + checksum
* 4 is HSW packet bytes
* 4 is RGB header + checksum
*/
hsa = (mode->hsync_end - mode->hsync_start) * cpp - 4 - 4 - 6;
} else {
/*
* Use event for sync: HBP includes both back porch and sync
* 6 is HBP header + checksum
* 4 is HSW packet bytes
* 4 is RGB header + checksum
*/
hbp = (mode->htotal - mode->hsync_start) * cpp - 4 - 4 - 6;
/* HSA is not present in this mode and set to 0 */
hsa = 0;
}
if (hfp < 0) {
dev_info(d->dev, "hfp negative, set to 0\n");
hfp = 0;
}
if (hbp < 0) {
dev_info(d->dev, "hbp negative, set to 0\n");
hbp = 0;
}
if (hsa < 0) {
dev_info(d->dev, "hsa negative, set to 0\n");
hsa = 0;
}
dev_dbg(d->dev, "hfp: %u, hbp: %u, hsa: %u bytes\n",
hfp, hbp, hsa);
/* Frame parameters: horizontal sync active */
val = hsa << DSI_VID_HSIZE1_HSA_LENGTH_SHIFT;
/* horizontal back porch */
val |= hbp << DSI_VID_HSIZE1_HBP_LENGTH_SHIFT;
/* horizontal front porch */
val |= hfp << DSI_VID_HSIZE1_HFP_LENGTH_SHIFT;
writel(val, d->regs + DSI_VID_HSIZE1);
/* RGB data length (visible bytes on one scanline) */
val = mode->hdisplay * cpp;
writel(val, d->regs + DSI_VID_HSIZE2);
dev_dbg(d->dev, "RGB length, visible area on a line: %u bytes\n", val);
/*
* Calculate the time between two pixels in picoseconds using
* the supplied refresh rate and total resolution including
* porches and sync.
*/
/* (ps/s) / (pixels/s) = ps/pixels */
pclk = DIV_ROUND_UP_ULL(1000000000000, (mode->clock * 1000));
dev_dbg(d->dev, "picoseconds between two pixels: %llu\n",
pclk);
/*
* How many bytes per line will this update frequency yield?
*
* Calculate the number of picoseconds for one scanline (1), then
* divide by 1000000000000 (2) to get in pixels per second we
* want to output.
*
* Multiply with number of bytes per second at this video display
* frequency (3) to get number of bytes transferred during this
* time. Notice that we use the frequency the display wants,
* not what we actually get from the DSI PLL, which is hs_freq.
*
* These arithmetics are done in a different order to avoid
* overflow.
*/
bpl = pclk * mode->htotal; /* (1) picoseconds per line */
dev_dbg(d->dev, "picoseconds per line: %llu\n", bpl);
/* Multiply with bytes per second (3) */
bpl *= (d->mdsi->hs_rate / 8);
/* Pixels per second (2) */
bpl = DIV_ROUND_DOWN_ULL(bpl, 1000000); /* microseconds */
bpl = DIV_ROUND_DOWN_ULL(bpl, 1000000); /* seconds */
/* parallel transactions in all lanes */
bpl *= d->mdsi->lanes;
dev_dbg(d->dev,
"calculated bytes per line: %llu @ %d Hz with HS %lu Hz\n",
bpl, drm_mode_vrefresh(mode), d->mdsi->hs_rate);
/*
* 6 is header + checksum, header = 4 bytes, checksum = 2 bytes
* 4 is short packet for vsync/hsync
*/
if (d->mdsi->mode_flags & MIPI_DSI_MODE_VIDEO_SYNC_PULSE) {
/* Set the event packet size to 0 (not used) */
writel(0, d->regs + DSI_VID_BLKSIZE1);
/*
* FIXME: isn't the hsync width in pixels? The porch and
* sync area size is in pixels here, but this -6
* seems to be for bytes. It looks like this in the vendor
* code though. Is it completely untested?
*/
blkline_pck = bpl - (mode->hsync_end - mode->hsync_start) - 6;
val = blkline_pck << DSI_VID_BLKSIZE2_BLKLINE_PULSE_PCK_SHIFT;
writel(val, d->regs + DSI_VID_BLKSIZE2);
} else {
/* Set the sync pulse packet size to 0 (not used) */
writel(0, d->regs + DSI_VID_BLKSIZE2);
/* Specifying payload size in bytes (-4-6 from manual) */
blkline_pck = bpl - 4 - 6;
if (blkline_pck > 0x1FFF)
dev_err(d->dev, "blkline_pck too big %d bytes\n",
blkline_pck);
val = blkline_pck << DSI_VID_BLKSIZE1_BLKLINE_EVENT_PCK_SHIFT;
val &= DSI_VID_BLKSIZE1_BLKLINE_EVENT_PCK_MASK;
writel(val, d->regs + DSI_VID_BLKSIZE1);
}
/*
* The line duration is used to scale back the frequency from
* the max frequency supported by the HS clock to the desired
* update frequency in vrefresh.
*/
line_duration = blkline_pck + 6;
/*
* The datasheet contains this complex condition to decreasing
* the line duration by 1 under very specific circumstances.
* Here we also imply that LP is used during burst EOL.
*/
if (d->mdsi->lanes == 2 && (hsa & 0x01) && (hfp & 0x01)
&& (d->mdsi->mode_flags & MIPI_DSI_MODE_VIDEO_BURST))
line_duration--;
line_duration = DIV_ROUND_CLOSEST(line_duration, d->mdsi->lanes);
dev_dbg(d->dev, "line duration %u bytes\n", line_duration);
val = line_duration << DSI_VID_DPHY_TIME_REG_LINE_DURATION_SHIFT;
/*
* This is the time to perform LP->HS on D-PHY
* FIXME: nowhere to get this from: DT property on the DSI?
* The manual says this is "system dependent".
* values like 48 and 72 seen in the vendor code.
*/
val |= 48 << DSI_VID_DPHY_TIME_REG_WAKEUP_TIME_SHIFT;
writel(val, d->regs + DSI_VID_DPHY_TIME);
/*
* See the manual figure 657 page 2203 for understanding the impact
* of the different burst mode settings.
*/
if (d->mdsi->mode_flags & MIPI_DSI_MODE_VIDEO_BURST) {
int blkeol_pck, blkeol_duration;
/*
* Packet size at EOL for burst mode, this is only used
* if DSI_VID_MAIN_CTL_REG_BLKEOL_MODE_LP_0 is NOT set,
* but we instead send NULL or blanking packets at EOL.
* This is given in number of bytes.
*
* See the manual page 2198 for the 13 reg_blkeol_pck bits.
*/
blkeol_pck = bpl - (mode->htotal * cpp) - 6;
if (blkeol_pck < 0) {
dev_err(d->dev, "video block does not fit on line!\n");
dev_err(d->dev,
"calculated bytes per line: %llu @ %d Hz\n",
bpl, drm_mode_vrefresh(mode));
dev_err(d->dev,
"bytes per line (blkline_pck) %u bytes\n",
blkline_pck);
dev_err(d->dev,
"blkeol_pck becomes %d bytes\n", blkeol_pck);
return;
}
dev_dbg(d->dev, "BLKEOL packet: %d bytes\n", blkeol_pck);
val = readl(d->regs + DSI_VID_BLKSIZE1);
val &= ~DSI_VID_BLKSIZE1_BLKEOL_PCK_MASK;
val |= blkeol_pck << DSI_VID_BLKSIZE1_BLKEOL_PCK_SHIFT;
writel(val, d->regs + DSI_VID_BLKSIZE1);
/* Use the same value for exact burst limit */
val = blkeol_pck <<
DSI_VID_VCA_SETTING2_EXACT_BURST_LIMIT_SHIFT;
val &= DSI_VID_VCA_SETTING2_EXACT_BURST_LIMIT_MASK;
writel(val, d->regs + DSI_VID_VCA_SETTING2);
/*
* This BLKEOL duration is claimed to be the duration in clock
* cycles of the BLLP end-of-line (EOL) period for each line if
* DSI_VID_MAIN_CTL_REG_BLKEOL_MODE_LP_0 is set.
*
* It is hard to trust the manuals' claim that this is in clock
* cycles as we mimic the behaviour of the vendor code, which
* appears to write a number of bytes that would have been
* transferred on a single lane.
*
* See the manual figure 657 page 2203 and page 2198 for the 13
* reg_blkeol_duration bits.
*
* FIXME: should this also be set up also for non-burst mode
* according to figure 565 page 2202?
*/
blkeol_duration = DIV_ROUND_CLOSEST(blkeol_pck + 6,
d->mdsi->lanes);
dev_dbg(d->dev, "BLKEOL duration: %d clock cycles\n",
blkeol_duration);
val = readl(d->regs + DSI_VID_PCK_TIME);
val &= ~DSI_VID_PCK_TIME_BLKEOL_DURATION_MASK;
val |= blkeol_duration <<
DSI_VID_PCK_TIME_BLKEOL_DURATION_SHIFT;
writel(val, d->regs + DSI_VID_PCK_TIME);
/* Max burst limit, this is given in bytes */
val = readl(d->regs + DSI_VID_VCA_SETTING1);
val &= ~DSI_VID_VCA_SETTING1_MAX_BURST_LIMIT_MASK;
val |= (blkeol_pck - 6) <<
DSI_VID_VCA_SETTING1_MAX_BURST_LIMIT_SHIFT;
writel(val, d->regs + DSI_VID_VCA_SETTING1);
}
/* Maximum line limit */
val = readl(d->regs + DSI_VID_VCA_SETTING2);
val &= ~DSI_VID_VCA_SETTING2_MAX_LINE_LIMIT_MASK;
val |= (blkline_pck - 6) <<
DSI_VID_VCA_SETTING2_MAX_LINE_LIMIT_SHIFT;
writel(val, d->regs + DSI_VID_VCA_SETTING2);
dev_dbg(d->dev, "blkline pck: %d bytes\n", blkline_pck - 6);
}
static void mcde_dsi_start(struct mcde_dsi *d)
{
unsigned long hs_freq;
u32 val;
int i;
/* No integration mode */
writel(0, d->regs + DSI_MCTL_INTEGRATION_MODE);
/* Enable the DSI port, from drivers/video/mcde/dsilink_v2.c */
val = DSI_MCTL_MAIN_DATA_CTL_LINK_EN |
DSI_MCTL_MAIN_DATA_CTL_BTA_EN |
DSI_MCTL_MAIN_DATA_CTL_READ_EN |
DSI_MCTL_MAIN_DATA_CTL_REG_TE_EN;
if (!(d->mdsi->mode_flags & MIPI_DSI_MODE_NO_EOT_PACKET))
val |= DSI_MCTL_MAIN_DATA_CTL_HOST_EOT_GEN;
writel(val, d->regs + DSI_MCTL_MAIN_DATA_CTL);
/* Set a high command timeout, clear other fields */
val = 0x3ff << DSI_CMD_MODE_CTL_TE_TIMEOUT_SHIFT;
writel(val, d->regs + DSI_CMD_MODE_CTL);
/*
* UI_X4 is described as "unit interval times four"
* I guess since DSI packets are 4 bytes wide, one unit
* is one byte.
*/
hs_freq = clk_get_rate(d->hs_clk);
hs_freq /= 1000000; /* MHz */
val = 4000 / hs_freq;
dev_dbg(d->dev, "UI value: %d\n", val);
val <<= DSI_MCTL_DPHY_STATIC_UI_X4_SHIFT;
val &= DSI_MCTL_DPHY_STATIC_UI_X4_MASK;
writel(val, d->regs + DSI_MCTL_DPHY_STATIC);
/*
* Enable clocking: 0x0f (something?) between each burst,
* enable the second lane if needed, enable continuous clock if
* needed, enable switch into ULPM (ultra-low power mode) on
* all the lines.
*/
val = 0x0f << DSI_MCTL_MAIN_PHY_CTL_WAIT_BURST_TIME_SHIFT;
if (d->mdsi->lanes == 2)
val |= DSI_MCTL_MAIN_PHY_CTL_LANE2_EN;
if (!(d->mdsi->mode_flags & MIPI_DSI_CLOCK_NON_CONTINUOUS))
val |= DSI_MCTL_MAIN_PHY_CTL_CLK_CONTINUOUS;
val |= DSI_MCTL_MAIN_PHY_CTL_CLK_ULPM_EN |
DSI_MCTL_MAIN_PHY_CTL_DAT1_ULPM_EN |
DSI_MCTL_MAIN_PHY_CTL_DAT2_ULPM_EN;
writel(val, d->regs + DSI_MCTL_MAIN_PHY_CTL);
val = (1 << DSI_MCTL_ULPOUT_TIME_CKLANE_ULPOUT_TIME_SHIFT) |
(1 << DSI_MCTL_ULPOUT_TIME_DATA_ULPOUT_TIME_SHIFT);
writel(val, d->regs + DSI_MCTL_ULPOUT_TIME);
writel(DSI_DPHY_LANES_TRIM_DPHY_SPECS_90_81B_0_90,
d->regs + DSI_DPHY_LANES_TRIM);
/* High PHY timeout */
val = (0x0f << DSI_MCTL_DPHY_TIMEOUT_CLK_DIV_SHIFT) |
(0x3fff << DSI_MCTL_DPHY_TIMEOUT_HSTX_TO_VAL_SHIFT) |
(0x3fff << DSI_MCTL_DPHY_TIMEOUT_LPRX_TO_VAL_SHIFT);
writel(val, d->regs + DSI_MCTL_DPHY_TIMEOUT);
val = DSI_MCTL_MAIN_EN_PLL_START |
DSI_MCTL_MAIN_EN_CKLANE_EN |
DSI_MCTL_MAIN_EN_DAT1_EN |
DSI_MCTL_MAIN_EN_IF1_EN;
if (d->mdsi->lanes == 2)
val |= DSI_MCTL_MAIN_EN_DAT2_EN;
writel(val, d->regs + DSI_MCTL_MAIN_EN);
/* Wait for the PLL to lock and the clock and data lines to come up */
i = 0;
val = DSI_MCTL_MAIN_STS_PLL_LOCK |
DSI_MCTL_MAIN_STS_CLKLANE_READY |
DSI_MCTL_MAIN_STS_DAT1_READY;
if (d->mdsi->lanes == 2)
val |= DSI_MCTL_MAIN_STS_DAT2_READY;
while ((readl(d->regs + DSI_MCTL_MAIN_STS) & val) != val) {
/* Sleep for a millisecond */
usleep_range(1000, 1500);
if (i++ == 100) {
dev_warn(d->dev, "DSI lanes did not start up\n");
return;
}
}
/* TODO needed? */
/* Command mode, clear IF1 ID */
val = readl(d->regs + DSI_CMD_MODE_CTL);
/*
* If we enable low-power mode here,
* then display updates become really slow.
*/
if (d->mdsi->mode_flags & MIPI_DSI_MODE_LPM)
val |= DSI_CMD_MODE_CTL_IF1_LP_EN;
val &= ~DSI_CMD_MODE_CTL_IF1_ID_MASK;
writel(val, d->regs + DSI_CMD_MODE_CTL);
/* Wait for DSI PHY to initialize */
usleep_range(100, 200);
dev_info(d->dev, "DSI link enabled\n");
}
/*
* Notice that this is called from inside the display controller
* and not from the bridge callbacks.
*/
void mcde_dsi_enable(struct drm_bridge *bridge)
{
struct mcde_dsi *d = bridge_to_mcde_dsi(bridge);
unsigned long hs_freq, lp_freq;
u32 val;
int ret;
/* Copy maximum clock frequencies */
if (d->mdsi->lp_rate)
lp_freq = d->mdsi->lp_rate;
else
lp_freq = DSI_DEFAULT_LP_FREQ_HZ;
if (d->mdsi->hs_rate)
hs_freq = d->mdsi->hs_rate;
else
hs_freq = DSI_DEFAULT_HS_FREQ_HZ;
/* Enable LP (Low Power, Energy Save, ES) and HS (High Speed) clocks */
d->lp_freq = clk_round_rate(d->lp_clk, lp_freq);
ret = clk_set_rate(d->lp_clk, d->lp_freq);
if (ret)
dev_err(d->dev, "failed to set LP clock rate %lu Hz\n",
d->lp_freq);
d->hs_freq = clk_round_rate(d->hs_clk, hs_freq);
ret = clk_set_rate(d->hs_clk, d->hs_freq);
if (ret)
dev_err(d->dev, "failed to set HS clock rate %lu Hz\n",
d->hs_freq);
/* Start clocks */
ret = clk_prepare_enable(d->lp_clk);
if (ret)
dev_err(d->dev, "failed to enable LP clock\n");
else
dev_info(d->dev, "DSI LP clock rate %lu Hz\n",
d->lp_freq);
ret = clk_prepare_enable(d->hs_clk);
if (ret)
dev_err(d->dev, "failed to enable HS clock\n");
else
dev_info(d->dev, "DSI HS clock rate %lu Hz\n",
d->hs_freq);
/* Assert RESET through the PRCMU, active low */
/* FIXME: which DSI block? */
regmap_update_bits(d->prcmu, PRCM_DSI_SW_RESET,
PRCM_DSI_SW_RESET_DSI0_SW_RESETN, 0);
usleep_range(100, 200);
/* De-assert RESET again */
regmap_update_bits(d->prcmu, PRCM_DSI_SW_RESET,
PRCM_DSI_SW_RESET_DSI0_SW_RESETN,
PRCM_DSI_SW_RESET_DSI0_SW_RESETN);
/* Start up the hardware */
mcde_dsi_start(d);
if (d->mdsi->mode_flags & MIPI_DSI_MODE_VIDEO) {
/* Set up the video mode from the DRM mode */
mcde_dsi_setup_video_mode(d, d->mode);
/* Put IF1 into video mode */
val = readl(d->regs + DSI_MCTL_MAIN_DATA_CTL);
val |= DSI_MCTL_MAIN_DATA_CTL_IF1_MODE;
writel(val, d->regs + DSI_MCTL_MAIN_DATA_CTL);
/* Disable command mode on IF1 */
val = readl(d->regs + DSI_CMD_MODE_CTL);
val &= ~DSI_CMD_MODE_CTL_IF1_LP_EN;
writel(val, d->regs + DSI_CMD_MODE_CTL);
/* Enable some error interrupts */
val = readl(d->regs + DSI_VID_MODE_STS_CTL);
val |= DSI_VID_MODE_STS_CTL_ERR_MISSING_VSYNC;
val |= DSI_VID_MODE_STS_CTL_ERR_MISSING_DATA;
writel(val, d->regs + DSI_VID_MODE_STS_CTL);
/* Enable video mode */
val = readl(d->regs + DSI_MCTL_MAIN_DATA_CTL);
val |= DSI_MCTL_MAIN_DATA_CTL_VID_EN;
writel(val, d->regs + DSI_MCTL_MAIN_DATA_CTL);
} else {
/* Command mode, clear IF1 ID */
val = readl(d->regs + DSI_CMD_MODE_CTL);
/*
* If we enable low-power mode here
* the display updates become really slow.
*/
if (d->mdsi->mode_flags & MIPI_DSI_MODE_LPM)
val |= DSI_CMD_MODE_CTL_IF1_LP_EN;
val &= ~DSI_CMD_MODE_CTL_IF1_ID_MASK;
writel(val, d->regs + DSI_CMD_MODE_CTL);
}
dev_info(d->dev, "enabled MCDE DSI master\n");
}
static void mcde_dsi_bridge_mode_set(struct drm_bridge *bridge,
const struct drm_display_mode *mode,
const struct drm_display_mode *adj)
{
struct mcde_dsi *d = bridge_to_mcde_dsi(bridge);
if (!d->mdsi) {
dev_err(d->dev, "no DSI device attached to encoder!\n");
return;
}
d->mode = mode;
dev_info(d->dev, "set DSI master to %dx%d %u Hz %s mode\n",
mode->hdisplay, mode->vdisplay, mode->clock * 1000,
(d->mdsi->mode_flags & MIPI_DSI_MODE_VIDEO) ? "VIDEO" : "CMD"
);
}
static void mcde_dsi_wait_for_command_mode_stop(struct mcde_dsi *d)
{
u32 val;
int i;
/*
* Wait until we get out of command mode
* CSM = Command State Machine
*/
i = 0;
val = DSI_CMD_MODE_STS_CSM_RUNNING;
while ((readl(d->regs + DSI_CMD_MODE_STS) & val) == val) {
/* Sleep for a millisecond */
usleep_range(1000, 2000);
if (i++ == 100) {
dev_warn(d->dev,
"could not get out of command mode\n");
return;
}
}
}
static void mcde_dsi_wait_for_video_mode_stop(struct mcde_dsi *d)
{
u32 val;
int i;
/* Wait until we get out og video mode */
i = 0;
val = DSI_VID_MODE_STS_VSG_RUNNING;
while ((readl(d->regs + DSI_VID_MODE_STS) & val) == val) {
/* Sleep for a millisecond */
usleep_range(1000, 2000);
if (i++ == 100) {
dev_warn(d->dev,
"could not get out of video mode\n");
return;
}
}
}
/*
* Notice that this is called from inside the display controller
* and not from the bridge callbacks.
*/
void mcde_dsi_disable(struct drm_bridge *bridge)
{
struct mcde_dsi *d = bridge_to_mcde_dsi(bridge);
u32 val;
if (d->mdsi->mode_flags & MIPI_DSI_MODE_VIDEO) {
/* Stop video mode */
val = readl(d->regs + DSI_MCTL_MAIN_DATA_CTL);
val &= ~DSI_MCTL_MAIN_DATA_CTL_VID_EN;
writel(val, d->regs + DSI_MCTL_MAIN_DATA_CTL);
mcde_dsi_wait_for_video_mode_stop(d);
} else {
/* Stop command mode */
mcde_dsi_wait_for_command_mode_stop(d);
}
/*
* Stop clocks and terminate any DSI traffic here so the panel can
* send commands to shut down the display using DSI direct write until
* this point.
*/
/* Disable all error interrupts */
writel(0, d->regs + DSI_VID_MODE_STS_CTL);
clk_disable_unprepare(d->hs_clk);
clk_disable_unprepare(d->lp_clk);
}
static int mcde_dsi_bridge_attach(struct drm_bridge *bridge,
enum drm_bridge_attach_flags flags)
{
struct mcde_dsi *d = bridge_to_mcde_dsi(bridge);
struct drm_device *drm = bridge->dev;
if (!drm_core_check_feature(drm, DRIVER_ATOMIC)) {
dev_err(d->dev, "we need atomic updates\n");
return -ENOTSUPP;
}
/* Attach the DSI bridge to the output (panel etc) bridge */
return drm_bridge_attach(bridge->encoder, d->bridge_out, bridge, flags);
}
static const struct drm_bridge_funcs mcde_dsi_bridge_funcs = {
.attach = mcde_dsi_bridge_attach,
.mode_set = mcde_dsi_bridge_mode_set,
};
static int mcde_dsi_bind(struct device *dev, struct device *master,
void *data)
{
struct drm_device *drm = data;
struct mcde *mcde = to_mcde(drm);
struct mcde_dsi *d = dev_get_drvdata(dev);
struct device_node *child;
struct drm_panel *panel = NULL;
struct drm_bridge *bridge = NULL;
if (!of_get_available_child_count(dev->of_node)) {
dev_info(dev, "unused DSI interface\n");
d->unused = true;
return 0;
}
d->mcde = mcde;
/* If the display attached before binding, set this up */
if (d->mdsi)
mcde_dsi_attach_to_mcde(d);
/* Obtain the clocks */
d->hs_clk = devm_clk_get(dev, "hs");
if (IS_ERR(d->hs_clk)) {
dev_err(dev, "unable to get HS clock\n");
return PTR_ERR(d->hs_clk);
}
d->lp_clk = devm_clk_get(dev, "lp");
if (IS_ERR(d->lp_clk)) {
dev_err(dev, "unable to get LP clock\n");
return PTR_ERR(d->lp_clk);
}
/* Look for a panel as a child to this node */
for_each_available_child_of_node(dev->of_node, child) {
panel = of_drm_find_panel(child);
if (IS_ERR(panel)) {
dev_err(dev, "failed to find panel try bridge (%ld)\n",
PTR_ERR(panel));
panel = NULL;
bridge = of_drm_find_bridge(child);
if (!bridge) {
dev_err(dev, "failed to find bridge\n");
of_node_put(child);
return -EINVAL;
}
}
}
if (panel) {
bridge = drm_panel_bridge_add_typed(panel,
DRM_MODE_CONNECTOR_DSI);
if (IS_ERR(bridge)) {
dev_err(dev, "error adding panel bridge\n");
return PTR_ERR(bridge);
}
dev_info(dev, "connected to panel\n");
d->panel = panel;
} else if (bridge) {
/* TODO: AV8100 HDMI encoder goes here for example */
dev_info(dev, "connected to non-panel bridge (unsupported)\n");
return -ENODEV;
} else {
dev_err(dev, "no panel or bridge\n");
return -ENODEV;
}
d->bridge_out = bridge;
/* Create a bridge for this DSI channel */
d->bridge.funcs = &mcde_dsi_bridge_funcs;
d->bridge.of_node = dev->of_node;
drm_bridge_add(&d->bridge);
/* TODO: first come first serve, use a list */
mcde->bridge = &d->bridge;
dev_info(dev, "initialized MCDE DSI bridge\n");
return 0;
}
static void mcde_dsi_unbind(struct device *dev, struct device *master,
void *data)
{
struct mcde_dsi *d = dev_get_drvdata(dev);
if (d->panel)
drm_panel_bridge_remove(d->bridge_out);
regmap_update_bits(d->prcmu, PRCM_DSI_SW_RESET,
PRCM_DSI_SW_RESET_DSI0_SW_RESETN, 0);
}
static const struct component_ops mcde_dsi_component_ops = {
.bind = mcde_dsi_bind,
.unbind = mcde_dsi_unbind,
};
static int mcde_dsi_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct mcde_dsi *d;
struct mipi_dsi_host *host;
u32 dsi_id;
int ret;
d = devm_kzalloc(dev, sizeof(*d), GFP_KERNEL);
if (!d)
return -ENOMEM;
d->dev = dev;
platform_set_drvdata(pdev, d);
/* Get a handle on the PRCMU so we can do reset */
d->prcmu =
syscon_regmap_lookup_by_compatible("stericsson,db8500-prcmu");
if (IS_ERR(d->prcmu)) {
dev_err(dev, "no PRCMU regmap\n");
return PTR_ERR(d->prcmu);
}
d->regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(d->regs))
return PTR_ERR(d->regs);
dsi_id = readl(d->regs + DSI_ID_REG);
dev_info(dev, "HW revision 0x%08x\n", dsi_id);
host = &d->dsi_host;
host->dev = dev;
host->ops = &mcde_dsi_host_ops;
ret = mipi_dsi_host_register(host);
if (ret < 0) {
dev_err(dev, "failed to register DSI host: %d\n", ret);
return ret;
}
dev_info(dev, "registered DSI host\n");
platform_set_drvdata(pdev, d);
return component_add(dev, &mcde_dsi_component_ops);
}
static void mcde_dsi_remove(struct platform_device *pdev)
{
struct mcde_dsi *d = platform_get_drvdata(pdev);
component_del(&pdev->dev, &mcde_dsi_component_ops);
mipi_dsi_host_unregister(&d->dsi_host);
}
static const struct of_device_id mcde_dsi_of_match[] = {
{
.compatible = "ste,mcde-dsi",
},
{},
};
struct platform_driver mcde_dsi_driver = {
.driver = {
.name = "mcde-dsi",
.of_match_table = mcde_dsi_of_match,
},
.probe = mcde_dsi_probe,
.remove_new = mcde_dsi_remove,
};
| linux-master | drivers/gpu/drm/mcde/mcde_dsi.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2018 Linus Walleij <[email protected]>
* Parts of this file were based on the MCDE driver by Marcus Lorentzon
* (C) ST-Ericsson SA 2013
*/
#include <linux/clk.h>
#include <linux/delay.h>
#include <linux/dma-buf.h>
#include <linux/regulator/consumer.h>
#include <linux/media-bus-format.h>
#include <drm/drm_device.h>
#include <drm/drm_fb_dma_helper.h>
#include <drm/drm_fourcc.h>
#include <drm/drm_framebuffer.h>
#include <drm/drm_gem_atomic_helper.h>
#include <drm/drm_gem_dma_helper.h>
#include <drm/drm_mipi_dsi.h>
#include <drm/drm_simple_kms_helper.h>
#include <drm/drm_bridge.h>
#include <drm/drm_vblank.h>
#include <video/mipi_display.h>
#include "mcde_drm.h"
#include "mcde_display_regs.h"
enum mcde_fifo {
MCDE_FIFO_A,
MCDE_FIFO_B,
/* TODO: implement FIFO C0 and FIFO C1 */
};
enum mcde_channel {
MCDE_CHANNEL_0 = 0,
MCDE_CHANNEL_1,
MCDE_CHANNEL_2,
MCDE_CHANNEL_3,
};
enum mcde_extsrc {
MCDE_EXTSRC_0 = 0,
MCDE_EXTSRC_1,
MCDE_EXTSRC_2,
MCDE_EXTSRC_3,
MCDE_EXTSRC_4,
MCDE_EXTSRC_5,
MCDE_EXTSRC_6,
MCDE_EXTSRC_7,
MCDE_EXTSRC_8,
MCDE_EXTSRC_9,
};
enum mcde_overlay {
MCDE_OVERLAY_0 = 0,
MCDE_OVERLAY_1,
MCDE_OVERLAY_2,
MCDE_OVERLAY_3,
MCDE_OVERLAY_4,
MCDE_OVERLAY_5,
};
enum mcde_formatter {
MCDE_DSI_FORMATTER_0 = 0,
MCDE_DSI_FORMATTER_1,
MCDE_DSI_FORMATTER_2,
MCDE_DSI_FORMATTER_3,
MCDE_DSI_FORMATTER_4,
MCDE_DSI_FORMATTER_5,
MCDE_DPI_FORMATTER_0,
MCDE_DPI_FORMATTER_1,
};
void mcde_display_irq(struct mcde *mcde)
{
u32 mispp, misovl, mischnl;
bool vblank = false;
/* Handle display IRQs */
mispp = readl(mcde->regs + MCDE_MISPP);
misovl = readl(mcde->regs + MCDE_MISOVL);
mischnl = readl(mcde->regs + MCDE_MISCHNL);
/*
* Handle IRQs from the DSI link. All IRQs from the DSI links
* are just latched onto the MCDE IRQ line, so we need to traverse
* any active DSI masters and check if an IRQ is originating from
* them.
*
* TODO: Currently only one DSI link is supported.
*/
if (!mcde->dpi_output && mcde_dsi_irq(mcde->mdsi)) {
u32 val;
/*
* In oneshot mode we do not send continuous updates
* to the display, instead we only push out updates when
* the update function is called, then we disable the
* flow on the channel once we get the TE IRQ.
*/
if (mcde->flow_mode == MCDE_COMMAND_ONESHOT_FLOW) {
spin_lock(&mcde->flow_lock);
if (--mcde->flow_active == 0) {
dev_dbg(mcde->dev, "TE0 IRQ\n");
/* Disable FIFO A flow */
val = readl(mcde->regs + MCDE_CRA0);
val &= ~MCDE_CRX0_FLOEN;
writel(val, mcde->regs + MCDE_CRA0);
}
spin_unlock(&mcde->flow_lock);
}
}
/* Vblank from one of the channels */
if (mispp & MCDE_PP_VCMPA) {
dev_dbg(mcde->dev, "chnl A vblank IRQ\n");
vblank = true;
}
if (mispp & MCDE_PP_VCMPB) {
dev_dbg(mcde->dev, "chnl B vblank IRQ\n");
vblank = true;
}
if (mispp & MCDE_PP_VCMPC0)
dev_dbg(mcde->dev, "chnl C0 vblank IRQ\n");
if (mispp & MCDE_PP_VCMPC1)
dev_dbg(mcde->dev, "chnl C1 vblank IRQ\n");
if (mispp & MCDE_PP_VSCC0)
dev_dbg(mcde->dev, "chnl C0 TE IRQ\n");
if (mispp & MCDE_PP_VSCC1)
dev_dbg(mcde->dev, "chnl C1 TE IRQ\n");
writel(mispp, mcde->regs + MCDE_RISPP);
if (vblank)
drm_crtc_handle_vblank(&mcde->pipe.crtc);
if (misovl)
dev_info(mcde->dev, "some stray overlay IRQ %08x\n", misovl);
writel(misovl, mcde->regs + MCDE_RISOVL);
if (mischnl)
dev_info(mcde->dev, "some stray channel error IRQ %08x\n",
mischnl);
writel(mischnl, mcde->regs + MCDE_RISCHNL);
}
void mcde_display_disable_irqs(struct mcde *mcde)
{
/* Disable all IRQs */
writel(0, mcde->regs + MCDE_IMSCPP);
writel(0, mcde->regs + MCDE_IMSCOVL);
writel(0, mcde->regs + MCDE_IMSCCHNL);
/* Clear any pending IRQs */
writel(0xFFFFFFFF, mcde->regs + MCDE_RISPP);
writel(0xFFFFFFFF, mcde->regs + MCDE_RISOVL);
writel(0xFFFFFFFF, mcde->regs + MCDE_RISCHNL);
}
static int mcde_display_check(struct drm_simple_display_pipe *pipe,
struct drm_plane_state *pstate,
struct drm_crtc_state *cstate)
{
const struct drm_display_mode *mode = &cstate->mode;
struct drm_framebuffer *old_fb = pipe->plane.state->fb;
struct drm_framebuffer *fb = pstate->fb;
if (fb) {
u32 offset = drm_fb_dma_get_gem_addr(fb, pstate, 0);
/* FB base address must be dword aligned. */
if (offset & 3) {
DRM_DEBUG_KMS("FB not 32-bit aligned\n");
return -EINVAL;
}
/*
* There's no pitch register, the mode's hdisplay
* controls this.
*/
if (fb->pitches[0] != mode->hdisplay * fb->format->cpp[0]) {
DRM_DEBUG_KMS("can't handle pitches\n");
return -EINVAL;
}
/*
* We can't change the FB format in a flicker-free
* manner (and only update it during CRTC enable).
*/
if (old_fb && old_fb->format != fb->format)
cstate->mode_changed = true;
}
return 0;
}
static int mcde_configure_extsrc(struct mcde *mcde, enum mcde_extsrc src,
u32 format)
{
u32 val;
u32 conf;
u32 cr;
switch (src) {
case MCDE_EXTSRC_0:
conf = MCDE_EXTSRC0CONF;
cr = MCDE_EXTSRC0CR;
break;
case MCDE_EXTSRC_1:
conf = MCDE_EXTSRC1CONF;
cr = MCDE_EXTSRC1CR;
break;
case MCDE_EXTSRC_2:
conf = MCDE_EXTSRC2CONF;
cr = MCDE_EXTSRC2CR;
break;
case MCDE_EXTSRC_3:
conf = MCDE_EXTSRC3CONF;
cr = MCDE_EXTSRC3CR;
break;
case MCDE_EXTSRC_4:
conf = MCDE_EXTSRC4CONF;
cr = MCDE_EXTSRC4CR;
break;
case MCDE_EXTSRC_5:
conf = MCDE_EXTSRC5CONF;
cr = MCDE_EXTSRC5CR;
break;
case MCDE_EXTSRC_6:
conf = MCDE_EXTSRC6CONF;
cr = MCDE_EXTSRC6CR;
break;
case MCDE_EXTSRC_7:
conf = MCDE_EXTSRC7CONF;
cr = MCDE_EXTSRC7CR;
break;
case MCDE_EXTSRC_8:
conf = MCDE_EXTSRC8CONF;
cr = MCDE_EXTSRC8CR;
break;
case MCDE_EXTSRC_9:
conf = MCDE_EXTSRC9CONF;
cr = MCDE_EXTSRC9CR;
break;
}
/*
* Configure external source 0 one buffer (buffer 0)
* primary overlay ID 0.
* From mcde_hw.c ovly_update_registers() in the vendor tree
*/
val = 0 << MCDE_EXTSRCXCONF_BUF_ID_SHIFT;
val |= 1 << MCDE_EXTSRCXCONF_BUF_NB_SHIFT;
val |= 0 << MCDE_EXTSRCXCONF_PRI_OVLID_SHIFT;
switch (format) {
case DRM_FORMAT_ARGB8888:
val |= MCDE_EXTSRCXCONF_BPP_ARGB8888 <<
MCDE_EXTSRCXCONF_BPP_SHIFT;
break;
case DRM_FORMAT_ABGR8888:
val |= MCDE_EXTSRCXCONF_BPP_ARGB8888 <<
MCDE_EXTSRCXCONF_BPP_SHIFT;
val |= MCDE_EXTSRCXCONF_BGR;
break;
case DRM_FORMAT_XRGB8888:
val |= MCDE_EXTSRCXCONF_BPP_XRGB8888 <<
MCDE_EXTSRCXCONF_BPP_SHIFT;
break;
case DRM_FORMAT_XBGR8888:
val |= MCDE_EXTSRCXCONF_BPP_XRGB8888 <<
MCDE_EXTSRCXCONF_BPP_SHIFT;
val |= MCDE_EXTSRCXCONF_BGR;
break;
case DRM_FORMAT_RGB888:
val |= MCDE_EXTSRCXCONF_BPP_RGB888 <<
MCDE_EXTSRCXCONF_BPP_SHIFT;
break;
case DRM_FORMAT_BGR888:
val |= MCDE_EXTSRCXCONF_BPP_RGB888 <<
MCDE_EXTSRCXCONF_BPP_SHIFT;
val |= MCDE_EXTSRCXCONF_BGR;
break;
case DRM_FORMAT_ARGB4444:
val |= MCDE_EXTSRCXCONF_BPP_ARGB4444 <<
MCDE_EXTSRCXCONF_BPP_SHIFT;
break;
case DRM_FORMAT_ABGR4444:
val |= MCDE_EXTSRCXCONF_BPP_ARGB4444 <<
MCDE_EXTSRCXCONF_BPP_SHIFT;
val |= MCDE_EXTSRCXCONF_BGR;
break;
case DRM_FORMAT_XRGB4444:
val |= MCDE_EXTSRCXCONF_BPP_RGB444 <<
MCDE_EXTSRCXCONF_BPP_SHIFT;
break;
case DRM_FORMAT_XBGR4444:
val |= MCDE_EXTSRCXCONF_BPP_RGB444 <<
MCDE_EXTSRCXCONF_BPP_SHIFT;
val |= MCDE_EXTSRCXCONF_BGR;
break;
case DRM_FORMAT_XRGB1555:
val |= MCDE_EXTSRCXCONF_BPP_IRGB1555 <<
MCDE_EXTSRCXCONF_BPP_SHIFT;
break;
case DRM_FORMAT_XBGR1555:
val |= MCDE_EXTSRCXCONF_BPP_IRGB1555 <<
MCDE_EXTSRCXCONF_BPP_SHIFT;
val |= MCDE_EXTSRCXCONF_BGR;
break;
case DRM_FORMAT_RGB565:
val |= MCDE_EXTSRCXCONF_BPP_RGB565 <<
MCDE_EXTSRCXCONF_BPP_SHIFT;
break;
case DRM_FORMAT_BGR565:
val |= MCDE_EXTSRCXCONF_BPP_RGB565 <<
MCDE_EXTSRCXCONF_BPP_SHIFT;
val |= MCDE_EXTSRCXCONF_BGR;
break;
case DRM_FORMAT_YUV422:
val |= MCDE_EXTSRCXCONF_BPP_YCBCR422 <<
MCDE_EXTSRCXCONF_BPP_SHIFT;
break;
default:
dev_err(mcde->dev, "Unknown pixel format 0x%08x\n",
format);
return -EINVAL;
}
writel(val, mcde->regs + conf);
/* Software select, primary */
val = MCDE_EXTSRCXCR_SEL_MOD_SOFTWARE_SEL;
val |= MCDE_EXTSRCXCR_MULTIOVL_CTRL_PRIMARY;
writel(val, mcde->regs + cr);
return 0;
}
static void mcde_configure_overlay(struct mcde *mcde, enum mcde_overlay ovl,
enum mcde_extsrc src,
enum mcde_channel ch,
const struct drm_display_mode *mode,
u32 format, int cpp)
{
u32 val;
u32 conf1;
u32 conf2;
u32 crop;
u32 ljinc;
u32 cr;
u32 comp;
u32 pixel_fetcher_watermark;
switch (ovl) {
case MCDE_OVERLAY_0:
conf1 = MCDE_OVL0CONF;
conf2 = MCDE_OVL0CONF2;
crop = MCDE_OVL0CROP;
ljinc = MCDE_OVL0LJINC;
cr = MCDE_OVL0CR;
comp = MCDE_OVL0COMP;
break;
case MCDE_OVERLAY_1:
conf1 = MCDE_OVL1CONF;
conf2 = MCDE_OVL1CONF2;
crop = MCDE_OVL1CROP;
ljinc = MCDE_OVL1LJINC;
cr = MCDE_OVL1CR;
comp = MCDE_OVL1COMP;
break;
case MCDE_OVERLAY_2:
conf1 = MCDE_OVL2CONF;
conf2 = MCDE_OVL2CONF2;
crop = MCDE_OVL2CROP;
ljinc = MCDE_OVL2LJINC;
cr = MCDE_OVL2CR;
comp = MCDE_OVL2COMP;
break;
case MCDE_OVERLAY_3:
conf1 = MCDE_OVL3CONF;
conf2 = MCDE_OVL3CONF2;
crop = MCDE_OVL3CROP;
ljinc = MCDE_OVL3LJINC;
cr = MCDE_OVL3CR;
comp = MCDE_OVL3COMP;
break;
case MCDE_OVERLAY_4:
conf1 = MCDE_OVL4CONF;
conf2 = MCDE_OVL4CONF2;
crop = MCDE_OVL4CROP;
ljinc = MCDE_OVL4LJINC;
cr = MCDE_OVL4CR;
comp = MCDE_OVL4COMP;
break;
case MCDE_OVERLAY_5:
conf1 = MCDE_OVL5CONF;
conf2 = MCDE_OVL5CONF2;
crop = MCDE_OVL5CROP;
ljinc = MCDE_OVL5LJINC;
cr = MCDE_OVL5CR;
comp = MCDE_OVL5COMP;
break;
}
val = mode->hdisplay << MCDE_OVLXCONF_PPL_SHIFT;
val |= mode->vdisplay << MCDE_OVLXCONF_LPF_SHIFT;
/* Use external source 0 that we just configured */
val |= src << MCDE_OVLXCONF_EXTSRC_ID_SHIFT;
writel(val, mcde->regs + conf1);
val = MCDE_OVLXCONF2_BP_PER_PIXEL_ALPHA;
val |= 0xff << MCDE_OVLXCONF2_ALPHAVALUE_SHIFT;
/* OPQ: overlay is opaque */
switch (format) {
case DRM_FORMAT_ARGB8888:
case DRM_FORMAT_ABGR8888:
case DRM_FORMAT_ARGB4444:
case DRM_FORMAT_ABGR4444:
case DRM_FORMAT_XRGB1555:
case DRM_FORMAT_XBGR1555:
/* No OPQ */
break;
case DRM_FORMAT_XRGB8888:
case DRM_FORMAT_XBGR8888:
case DRM_FORMAT_RGB888:
case DRM_FORMAT_BGR888:
case DRM_FORMAT_RGB565:
case DRM_FORMAT_BGR565:
case DRM_FORMAT_YUV422:
val |= MCDE_OVLXCONF2_OPQ;
break;
default:
dev_err(mcde->dev, "Unknown pixel format 0x%08x\n",
format);
break;
}
/*
* Pixel fetch watermark level is max 0x1FFF pixels.
* Two basic rules should be followed:
* 1. The value should be at least 256 bits.
* 2. The sum of all active overlays pixelfetch watermark level
* multiplied with bits per pixel, should be lower than the
* size of input_fifo_size in bits.
* 3. The value should be a multiple of a line (256 bits).
*/
switch (cpp) {
case 2:
pixel_fetcher_watermark = 128;
break;
case 3:
pixel_fetcher_watermark = 96;
break;
case 4:
pixel_fetcher_watermark = 48;
break;
default:
pixel_fetcher_watermark = 48;
break;
}
dev_dbg(mcde->dev, "pixel fetcher watermark level %d pixels\n",
pixel_fetcher_watermark);
val |= pixel_fetcher_watermark << MCDE_OVLXCONF2_PIXELFETCHERWATERMARKLEVEL_SHIFT;
writel(val, mcde->regs + conf2);
/* Number of bytes to fetch per line */
writel(mcde->stride, mcde->regs + ljinc);
/* No cropping */
writel(0, mcde->regs + crop);
/* Set up overlay control register */
val = MCDE_OVLXCR_OVLEN;
val |= MCDE_OVLXCR_COLCCTRL_DISABLED;
val |= MCDE_OVLXCR_BURSTSIZE_8W <<
MCDE_OVLXCR_BURSTSIZE_SHIFT;
val |= MCDE_OVLXCR_MAXOUTSTANDING_8_REQ <<
MCDE_OVLXCR_MAXOUTSTANDING_SHIFT;
/* Not using rotation but set it up anyways */
val |= MCDE_OVLXCR_ROTBURSTSIZE_8W <<
MCDE_OVLXCR_ROTBURSTSIZE_SHIFT;
writel(val, mcde->regs + cr);
/*
* Set up the overlay compositor to route the overlay out to
* the desired channel
*/
val = ch << MCDE_OVLXCOMP_CH_ID_SHIFT;
writel(val, mcde->regs + comp);
}
static void mcde_configure_channel(struct mcde *mcde, enum mcde_channel ch,
enum mcde_fifo fifo,
const struct drm_display_mode *mode)
{
u32 val;
u32 conf;
u32 sync;
u32 stat;
u32 bgcol;
u32 mux;
switch (ch) {
case MCDE_CHANNEL_0:
conf = MCDE_CHNL0CONF;
sync = MCDE_CHNL0SYNCHMOD;
stat = MCDE_CHNL0STAT;
bgcol = MCDE_CHNL0BCKGNDCOL;
mux = MCDE_CHNL0MUXING;
break;
case MCDE_CHANNEL_1:
conf = MCDE_CHNL1CONF;
sync = MCDE_CHNL1SYNCHMOD;
stat = MCDE_CHNL1STAT;
bgcol = MCDE_CHNL1BCKGNDCOL;
mux = MCDE_CHNL1MUXING;
break;
case MCDE_CHANNEL_2:
conf = MCDE_CHNL2CONF;
sync = MCDE_CHNL2SYNCHMOD;
stat = MCDE_CHNL2STAT;
bgcol = MCDE_CHNL2BCKGNDCOL;
mux = MCDE_CHNL2MUXING;
break;
case MCDE_CHANNEL_3:
conf = MCDE_CHNL3CONF;
sync = MCDE_CHNL3SYNCHMOD;
stat = MCDE_CHNL3STAT;
bgcol = MCDE_CHNL3BCKGNDCOL;
mux = MCDE_CHNL3MUXING;
return;
}
/* Set up channel 0 sync (based on chnl_update_registers()) */
switch (mcde->flow_mode) {
case MCDE_COMMAND_ONESHOT_FLOW:
/* Oneshot is achieved with software sync */
val = MCDE_CHNLXSYNCHMOD_SRC_SYNCH_SOFTWARE
<< MCDE_CHNLXSYNCHMOD_SRC_SYNCH_SHIFT;
break;
case MCDE_COMMAND_TE_FLOW:
val = MCDE_CHNLXSYNCHMOD_SRC_SYNCH_HARDWARE
<< MCDE_CHNLXSYNCHMOD_SRC_SYNCH_SHIFT;
val |= MCDE_CHNLXSYNCHMOD_OUT_SYNCH_SRC_TE0
<< MCDE_CHNLXSYNCHMOD_OUT_SYNCH_SRC_SHIFT;
break;
case MCDE_COMMAND_BTA_TE_FLOW:
val = MCDE_CHNLXSYNCHMOD_SRC_SYNCH_HARDWARE
<< MCDE_CHNLXSYNCHMOD_SRC_SYNCH_SHIFT;
/*
* TODO:
* The vendor driver uses the formatter as sync source
* for BTA TE mode. Test to use TE if you have a panel
* that uses this mode.
*/
val |= MCDE_CHNLXSYNCHMOD_OUT_SYNCH_SRC_FORMATTER
<< MCDE_CHNLXSYNCHMOD_OUT_SYNCH_SRC_SHIFT;
break;
case MCDE_VIDEO_TE_FLOW:
val = MCDE_CHNLXSYNCHMOD_SRC_SYNCH_HARDWARE
<< MCDE_CHNLXSYNCHMOD_SRC_SYNCH_SHIFT;
val |= MCDE_CHNLXSYNCHMOD_OUT_SYNCH_SRC_TE0
<< MCDE_CHNLXSYNCHMOD_OUT_SYNCH_SRC_SHIFT;
break;
case MCDE_VIDEO_FORMATTER_FLOW:
case MCDE_DPI_FORMATTER_FLOW:
val = MCDE_CHNLXSYNCHMOD_SRC_SYNCH_HARDWARE
<< MCDE_CHNLXSYNCHMOD_SRC_SYNCH_SHIFT;
val |= MCDE_CHNLXSYNCHMOD_OUT_SYNCH_SRC_FORMATTER
<< MCDE_CHNLXSYNCHMOD_OUT_SYNCH_SRC_SHIFT;
break;
default:
dev_err(mcde->dev, "unknown flow mode %d\n",
mcde->flow_mode);
return;
}
writel(val, mcde->regs + sync);
/* Set up pixels per line and lines per frame */
val = (mode->hdisplay - 1) << MCDE_CHNLXCONF_PPL_SHIFT;
val |= (mode->vdisplay - 1) << MCDE_CHNLXCONF_LPF_SHIFT;
writel(val, mcde->regs + conf);
/*
* Normalize color conversion:
* black background, OLED conversion disable on channel
*/
val = MCDE_CHNLXSTAT_CHNLBLBCKGND_EN |
MCDE_CHNLXSTAT_CHNLRD;
writel(val, mcde->regs + stat);
writel(0, mcde->regs + bgcol);
/* Set up muxing: connect the channel to the desired FIFO */
switch (fifo) {
case MCDE_FIFO_A:
writel(MCDE_CHNLXMUXING_FIFO_ID_FIFO_A,
mcde->regs + mux);
break;
case MCDE_FIFO_B:
writel(MCDE_CHNLXMUXING_FIFO_ID_FIFO_B,
mcde->regs + mux);
break;
}
/*
* If using DPI configure the sync event.
* TODO: this is for LCD only, it does not cover TV out.
*/
if (mcde->dpi_output) {
u32 stripwidth;
stripwidth = 0xF000 / (mode->vdisplay * 4);
dev_info(mcde->dev, "stripwidth: %d\n", stripwidth);
val = MCDE_SYNCHCONF_HWREQVEVENT_ACTIVE_VIDEO |
(mode->hdisplay - 1 - stripwidth) << MCDE_SYNCHCONF_HWREQVCNT_SHIFT |
MCDE_SYNCHCONF_SWINTVEVENT_ACTIVE_VIDEO |
(mode->hdisplay - 1 - stripwidth) << MCDE_SYNCHCONF_SWINTVCNT_SHIFT;
switch (fifo) {
case MCDE_FIFO_A:
writel(val, mcde->regs + MCDE_SYNCHCONFA);
break;
case MCDE_FIFO_B:
writel(val, mcde->regs + MCDE_SYNCHCONFB);
break;
}
}
}
static void mcde_configure_fifo(struct mcde *mcde, enum mcde_fifo fifo,
enum mcde_formatter fmt,
int fifo_wtrmrk)
{
u32 val;
u32 ctrl;
u32 cr0, cr1;
switch (fifo) {
case MCDE_FIFO_A:
ctrl = MCDE_CTRLA;
cr0 = MCDE_CRA0;
cr1 = MCDE_CRA1;
break;
case MCDE_FIFO_B:
ctrl = MCDE_CTRLB;
cr0 = MCDE_CRB0;
cr1 = MCDE_CRB1;
break;
}
val = fifo_wtrmrk << MCDE_CTRLX_FIFOWTRMRK_SHIFT;
/*
* Select the formatter to use for this FIFO
*
* The register definitions imply that different IDs should be used
* by the DSI formatters depending on if they are in VID or CMD
* mode, and the manual says they are dedicated but identical.
* The vendor code uses them as it seems fit.
*/
switch (fmt) {
case MCDE_DSI_FORMATTER_0:
val |= MCDE_CTRLX_FORMTYPE_DSI << MCDE_CTRLX_FORMTYPE_SHIFT;
val |= MCDE_CTRLX_FORMID_DSI0VID << MCDE_CTRLX_FORMID_SHIFT;
break;
case MCDE_DSI_FORMATTER_1:
val |= MCDE_CTRLX_FORMTYPE_DSI << MCDE_CTRLX_FORMTYPE_SHIFT;
val |= MCDE_CTRLX_FORMID_DSI0CMD << MCDE_CTRLX_FORMID_SHIFT;
break;
case MCDE_DSI_FORMATTER_2:
val |= MCDE_CTRLX_FORMTYPE_DSI << MCDE_CTRLX_FORMTYPE_SHIFT;
val |= MCDE_CTRLX_FORMID_DSI1VID << MCDE_CTRLX_FORMID_SHIFT;
break;
case MCDE_DSI_FORMATTER_3:
val |= MCDE_CTRLX_FORMTYPE_DSI << MCDE_CTRLX_FORMTYPE_SHIFT;
val |= MCDE_CTRLX_FORMID_DSI1CMD << MCDE_CTRLX_FORMID_SHIFT;
break;
case MCDE_DSI_FORMATTER_4:
val |= MCDE_CTRLX_FORMTYPE_DSI << MCDE_CTRLX_FORMTYPE_SHIFT;
val |= MCDE_CTRLX_FORMID_DSI2VID << MCDE_CTRLX_FORMID_SHIFT;
break;
case MCDE_DSI_FORMATTER_5:
val |= MCDE_CTRLX_FORMTYPE_DSI << MCDE_CTRLX_FORMTYPE_SHIFT;
val |= MCDE_CTRLX_FORMID_DSI2CMD << MCDE_CTRLX_FORMID_SHIFT;
break;
case MCDE_DPI_FORMATTER_0:
val |= MCDE_CTRLX_FORMTYPE_DPITV << MCDE_CTRLX_FORMTYPE_SHIFT;
val |= MCDE_CTRLX_FORMID_DPIA << MCDE_CTRLX_FORMID_SHIFT;
break;
case MCDE_DPI_FORMATTER_1:
val |= MCDE_CTRLX_FORMTYPE_DPITV << MCDE_CTRLX_FORMTYPE_SHIFT;
val |= MCDE_CTRLX_FORMID_DPIB << MCDE_CTRLX_FORMID_SHIFT;
break;
}
writel(val, mcde->regs + ctrl);
/* Blend source with Alpha 0xff on FIFO */
val = MCDE_CRX0_BLENDEN |
0xff << MCDE_CRX0_ALPHABLEND_SHIFT;
writel(val, mcde->regs + cr0);
spin_lock(&mcde->fifo_crx1_lock);
val = readl(mcde->regs + cr1);
/*
* Set-up from mcde_fmtr_dsi.c, fmtr_dsi_enable_video()
* FIXME: a different clock needs to be selected for TV out.
*/
if (mcde->dpi_output) {
struct drm_connector *connector = drm_panel_bridge_connector(mcde->bridge);
u32 bus_format;
/* Assume RGB888 24 bit if we have no further info */
if (!connector->display_info.num_bus_formats) {
dev_info(mcde->dev, "panel does not specify bus format, assume RGB888\n");
bus_format = MEDIA_BUS_FMT_RGB888_1X24;
} else {
bus_format = connector->display_info.bus_formats[0];
}
/*
* Set up the CDWIN and OUTBPP for the LCD
*
* FIXME: fill this in if you know the correspondance between the MIPI
* DPI specification and the media bus formats.
*/
val &= ~MCDE_CRX1_CDWIN_MASK;
val &= ~MCDE_CRX1_OUTBPP_MASK;
switch (bus_format) {
case MEDIA_BUS_FMT_RGB888_1X24:
val |= MCDE_CRX1_CDWIN_24BPP << MCDE_CRX1_CDWIN_SHIFT;
val |= MCDE_CRX1_OUTBPP_24BPP << MCDE_CRX1_OUTBPP_SHIFT;
break;
default:
dev_err(mcde->dev, "unknown bus format, assume RGB888\n");
val |= MCDE_CRX1_CDWIN_24BPP << MCDE_CRX1_CDWIN_SHIFT;
val |= MCDE_CRX1_OUTBPP_24BPP << MCDE_CRX1_OUTBPP_SHIFT;
break;
}
} else {
/* Use the MCDE clock for DSI */
val &= ~MCDE_CRX1_CLKSEL_MASK;
val |= MCDE_CRX1_CLKSEL_MCDECLK << MCDE_CRX1_CLKSEL_SHIFT;
}
writel(val, mcde->regs + cr1);
spin_unlock(&mcde->fifo_crx1_lock);
};
static void mcde_configure_dsi_formatter(struct mcde *mcde,
enum mcde_formatter fmt,
u32 formatter_frame,
int pkt_size)
{
u32 val;
u32 conf0;
u32 frame;
u32 pkt;
u32 sync;
u32 cmdw;
u32 delay0, delay1;
switch (fmt) {
case MCDE_DSI_FORMATTER_0:
conf0 = MCDE_DSIVID0CONF0;
frame = MCDE_DSIVID0FRAME;
pkt = MCDE_DSIVID0PKT;
sync = MCDE_DSIVID0SYNC;
cmdw = MCDE_DSIVID0CMDW;
delay0 = MCDE_DSIVID0DELAY0;
delay1 = MCDE_DSIVID0DELAY1;
break;
case MCDE_DSI_FORMATTER_1:
conf0 = MCDE_DSIVID1CONF0;
frame = MCDE_DSIVID1FRAME;
pkt = MCDE_DSIVID1PKT;
sync = MCDE_DSIVID1SYNC;
cmdw = MCDE_DSIVID1CMDW;
delay0 = MCDE_DSIVID1DELAY0;
delay1 = MCDE_DSIVID1DELAY1;
break;
case MCDE_DSI_FORMATTER_2:
conf0 = MCDE_DSIVID2CONF0;
frame = MCDE_DSIVID2FRAME;
pkt = MCDE_DSIVID2PKT;
sync = MCDE_DSIVID2SYNC;
cmdw = MCDE_DSIVID2CMDW;
delay0 = MCDE_DSIVID2DELAY0;
delay1 = MCDE_DSIVID2DELAY1;
break;
default:
dev_err(mcde->dev, "tried to configure a non-DSI formatter as DSI\n");
return;
}
/*
* Enable formatter
* 8 bit commands and DCS commands (notgen = not generic)
*/
val = MCDE_DSICONF0_CMD8 | MCDE_DSICONF0_DCSVID_NOTGEN;
if (mcde->mdsi->mode_flags & MIPI_DSI_MODE_VIDEO)
val |= MCDE_DSICONF0_VID_MODE_VID;
switch (mcde->mdsi->format) {
case MIPI_DSI_FMT_RGB888:
val |= MCDE_DSICONF0_PACKING_RGB888 <<
MCDE_DSICONF0_PACKING_SHIFT;
break;
case MIPI_DSI_FMT_RGB666:
val |= MCDE_DSICONF0_PACKING_RGB666 <<
MCDE_DSICONF0_PACKING_SHIFT;
break;
case MIPI_DSI_FMT_RGB666_PACKED:
dev_err(mcde->dev,
"we cannot handle the packed RGB666 format\n");
val |= MCDE_DSICONF0_PACKING_RGB666 <<
MCDE_DSICONF0_PACKING_SHIFT;
break;
case MIPI_DSI_FMT_RGB565:
val |= MCDE_DSICONF0_PACKING_RGB565 <<
MCDE_DSICONF0_PACKING_SHIFT;
break;
default:
dev_err(mcde->dev, "unknown DSI format\n");
return;
}
writel(val, mcde->regs + conf0);
writel(formatter_frame, mcde->regs + frame);
writel(pkt_size, mcde->regs + pkt);
writel(0, mcde->regs + sync);
/* Define the MIPI command: we want to write into display memory */
val = MIPI_DCS_WRITE_MEMORY_CONTINUE <<
MCDE_DSIVIDXCMDW_CMDW_CONTINUE_SHIFT;
val |= MIPI_DCS_WRITE_MEMORY_START <<
MCDE_DSIVIDXCMDW_CMDW_START_SHIFT;
writel(val, mcde->regs + cmdw);
/*
* FIXME: the vendor driver has some hack around this value in
* CMD mode with autotrig.
*/
writel(0, mcde->regs + delay0);
writel(0, mcde->regs + delay1);
}
static void mcde_enable_fifo(struct mcde *mcde, enum mcde_fifo fifo)
{
u32 val;
u32 cr;
switch (fifo) {
case MCDE_FIFO_A:
cr = MCDE_CRA0;
break;
case MCDE_FIFO_B:
cr = MCDE_CRB0;
break;
default:
dev_err(mcde->dev, "cannot enable FIFO %c\n",
'A' + fifo);
return;
}
spin_lock(&mcde->flow_lock);
val = readl(mcde->regs + cr);
val |= MCDE_CRX0_FLOEN;
writel(val, mcde->regs + cr);
mcde->flow_active++;
spin_unlock(&mcde->flow_lock);
}
static void mcde_disable_fifo(struct mcde *mcde, enum mcde_fifo fifo,
bool wait_for_drain)
{
int timeout = 100;
u32 val;
u32 cr;
switch (fifo) {
case MCDE_FIFO_A:
cr = MCDE_CRA0;
break;
case MCDE_FIFO_B:
cr = MCDE_CRB0;
break;
default:
dev_err(mcde->dev, "cannot disable FIFO %c\n",
'A' + fifo);
return;
}
spin_lock(&mcde->flow_lock);
val = readl(mcde->regs + cr);
val &= ~MCDE_CRX0_FLOEN;
writel(val, mcde->regs + cr);
mcde->flow_active = 0;
spin_unlock(&mcde->flow_lock);
if (!wait_for_drain)
return;
/* Check that we really drained and stopped the flow */
while (readl(mcde->regs + cr) & MCDE_CRX0_FLOEN) {
usleep_range(1000, 1500);
if (!--timeout) {
dev_err(mcde->dev,
"FIFO timeout while clearing FIFO %c\n",
'A' + fifo);
return;
}
}
}
/*
* This drains a pipe i.e. a FIFO connected to a certain channel
*/
static void mcde_drain_pipe(struct mcde *mcde, enum mcde_fifo fifo,
enum mcde_channel ch)
{
u32 val;
u32 ctrl;
u32 synsw;
switch (fifo) {
case MCDE_FIFO_A:
ctrl = MCDE_CTRLA;
break;
case MCDE_FIFO_B:
ctrl = MCDE_CTRLB;
break;
}
switch (ch) {
case MCDE_CHANNEL_0:
synsw = MCDE_CHNL0SYNCHSW;
break;
case MCDE_CHANNEL_1:
synsw = MCDE_CHNL1SYNCHSW;
break;
case MCDE_CHANNEL_2:
synsw = MCDE_CHNL2SYNCHSW;
break;
case MCDE_CHANNEL_3:
synsw = MCDE_CHNL3SYNCHSW;
return;
}
val = readl(mcde->regs + ctrl);
if (!(val & MCDE_CTRLX_FIFOEMPTY)) {
dev_err(mcde->dev, "Channel A FIFO not empty (handover)\n");
/* Attempt to clear the FIFO */
mcde_enable_fifo(mcde, fifo);
/* Trigger a software sync out on respective channel (0-3) */
writel(MCDE_CHNLXSYNCHSW_SW_TRIG, mcde->regs + synsw);
/* Disable FIFO A flow again */
mcde_disable_fifo(mcde, fifo, true);
}
}
static int mcde_dsi_get_pkt_div(int ppl, int fifo_size)
{
/*
* DSI command mode line packets should be split into an even number of
* packets smaller than or equal to the fifo size.
*/
int div;
const int max_div = DIV_ROUND_UP(MCDE_MAX_WIDTH, fifo_size);
for (div = 1; div < max_div; div++)
if (ppl % div == 0 && ppl / div <= fifo_size)
return div;
return 1;
}
static void mcde_setup_dpi(struct mcde *mcde, const struct drm_display_mode *mode,
int *fifo_wtrmrk_lvl)
{
struct drm_connector *connector = drm_panel_bridge_connector(mcde->bridge);
u32 hsw, hfp, hbp;
u32 vsw, vfp, vbp;
u32 val;
/* FIXME: we only support LCD, implement TV out */
hsw = mode->hsync_end - mode->hsync_start;
hfp = mode->hsync_start - mode->hdisplay;
hbp = mode->htotal - mode->hsync_end;
vsw = mode->vsync_end - mode->vsync_start;
vfp = mode->vsync_start - mode->vdisplay;
vbp = mode->vtotal - mode->vsync_end;
dev_info(mcde->dev, "output on DPI LCD from channel A\n");
/* Display actual values */
dev_info(mcde->dev, "HSW: %d, HFP: %d, HBP: %d, VSW: %d, VFP: %d, VBP: %d\n",
hsw, hfp, hbp, vsw, vfp, vbp);
/*
* The pixel fetcher is 128 64-bit words deep = 1024 bytes.
* One overlay of 32bpp (4 cpp) assumed, fetch 160 pixels.
* 160 * 4 = 640 bytes.
*/
*fifo_wtrmrk_lvl = 640;
/* Set up the main control, watermark level at 7 */
val = 7 << MCDE_CONF0_IFIFOCTRLWTRMRKLVL_SHIFT;
/*
* This sets up the internal silicon muxing of the DPI
* lines. This is how the silicon connects out to the
* external pins, then the pins need to be further
* configured into "alternate functions" using pin control
* to actually get the signals out.
*
* FIXME: this is hardcoded to the only setting found in
* the wild. If we need to use different settings for
* different DPI displays, make this parameterizable from
* the device tree.
*/
/* 24 bits DPI: connect Ch A LSB to D[0:7] */
val |= 0 << MCDE_CONF0_OUTMUX0_SHIFT;
/* 24 bits DPI: connect Ch A MID to D[8:15] */
val |= 1 << MCDE_CONF0_OUTMUX1_SHIFT;
/* Don't care about this muxing */
val |= 0 << MCDE_CONF0_OUTMUX2_SHIFT;
/* Don't care about this muxing */
val |= 0 << MCDE_CONF0_OUTMUX3_SHIFT;
/* 24 bits DPI: connect Ch A MSB to D[32:39] */
val |= 2 << MCDE_CONF0_OUTMUX4_SHIFT;
/* Syncmux bits zero: DPI channel A */
writel(val, mcde->regs + MCDE_CONF0);
/* This hammers us into LCD mode */
writel(0, mcde->regs + MCDE_TVCRA);
/* Front porch and sync width */
val = (vsw << MCDE_TVBL1_BEL1_SHIFT);
val |= (vfp << MCDE_TVBL1_BSL1_SHIFT);
writel(val, mcde->regs + MCDE_TVBL1A);
/* The vendor driver sets the same value into TVBL2A */
writel(val, mcde->regs + MCDE_TVBL2A);
/* Vertical back porch */
val = (vbp << MCDE_TVDVO_DVO1_SHIFT);
/* The vendor drivers sets the same value into TVDVOA */
val |= (vbp << MCDE_TVDVO_DVO2_SHIFT);
writel(val, mcde->regs + MCDE_TVDVOA);
/* Horizontal back porch, as 0 = 1 cycle we need to subtract 1 */
writel((hbp - 1), mcde->regs + MCDE_TVTIM1A);
/* Horizongal sync width and horizonal front porch, 0 = 1 cycle */
val = ((hsw - 1) << MCDE_TVLBALW_LBW_SHIFT);
val |= ((hfp - 1) << MCDE_TVLBALW_ALW_SHIFT);
writel(val, mcde->regs + MCDE_TVLBALWA);
/* Blank some TV registers we don't use */
writel(0, mcde->regs + MCDE_TVISLA);
writel(0, mcde->regs + MCDE_TVBLUA);
/* Set up sync inversion etc */
val = 0;
if (mode->flags & DRM_MODE_FLAG_NHSYNC)
val |= MCDE_LCDTIM1B_IHS;
if (mode->flags & DRM_MODE_FLAG_NVSYNC)
val |= MCDE_LCDTIM1B_IVS;
if (connector->display_info.bus_flags & DRM_BUS_FLAG_DE_LOW)
val |= MCDE_LCDTIM1B_IOE;
if (connector->display_info.bus_flags & DRM_BUS_FLAG_PIXDATA_DRIVE_NEGEDGE)
val |= MCDE_LCDTIM1B_IPC;
writel(val, mcde->regs + MCDE_LCDTIM1A);
}
static void mcde_setup_dsi(struct mcde *mcde, const struct drm_display_mode *mode,
int cpp, int *fifo_wtrmrk_lvl, int *dsi_formatter_frame,
int *dsi_pkt_size)
{
u32 formatter_ppl = mode->hdisplay; /* pixels per line */
u32 formatter_lpf = mode->vdisplay; /* lines per frame */
int formatter_frame;
int formatter_cpp;
int fifo_wtrmrk;
u32 pkt_div;
int pkt_size;
u32 val;
dev_info(mcde->dev, "output in %s mode, format %dbpp\n",
(mcde->mdsi->mode_flags & MIPI_DSI_MODE_VIDEO) ?
"VIDEO" : "CMD",
mipi_dsi_pixel_format_to_bpp(mcde->mdsi->format));
formatter_cpp =
mipi_dsi_pixel_format_to_bpp(mcde->mdsi->format) / 8;
dev_info(mcde->dev, "Overlay CPP: %d bytes, DSI formatter CPP %d bytes\n",
cpp, formatter_cpp);
/* Set up the main control, watermark level at 7 */
val = 7 << MCDE_CONF0_IFIFOCTRLWTRMRKLVL_SHIFT;
/*
* This is the internal silicon muxing of the DPI
* (parallell display) lines. Since we are not using
* this at all (we are using DSI) these are just
* dummy values from the vendor tree.
*/
val |= 3 << MCDE_CONF0_OUTMUX0_SHIFT;
val |= 3 << MCDE_CONF0_OUTMUX1_SHIFT;
val |= 0 << MCDE_CONF0_OUTMUX2_SHIFT;
val |= 4 << MCDE_CONF0_OUTMUX3_SHIFT;
val |= 5 << MCDE_CONF0_OUTMUX4_SHIFT;
writel(val, mcde->regs + MCDE_CONF0);
/* Calculations from mcde_fmtr_dsi.c, fmtr_dsi_enable_video() */
/*
* Set up FIFO A watermark level:
* 128 for LCD 32bpp video mode
* 48 for LCD 32bpp command mode
* 128 for LCD 16bpp video mode
* 64 for LCD 16bpp command mode
* 128 for HDMI 32bpp
* 192 for HDMI 16bpp
*/
fifo_wtrmrk = mode->hdisplay;
if (mcde->mdsi->mode_flags & MIPI_DSI_MODE_VIDEO) {
fifo_wtrmrk = min(fifo_wtrmrk, 128);
pkt_div = 1;
} else {
fifo_wtrmrk = min(fifo_wtrmrk, 48);
/* The FIFO is 640 entries deep on this v3 hardware */
pkt_div = mcde_dsi_get_pkt_div(mode->hdisplay, 640);
}
dev_dbg(mcde->dev, "FIFO watermark after flooring: %d bytes\n",
fifo_wtrmrk);
dev_dbg(mcde->dev, "Packet divisor: %d bytes\n", pkt_div);
/* NOTE: pkt_div is 1 for video mode */
pkt_size = (formatter_ppl * formatter_cpp) / pkt_div;
/* Commands CMD8 need one extra byte */
if (!(mcde->mdsi->mode_flags & MIPI_DSI_MODE_VIDEO))
pkt_size++;
dev_dbg(mcde->dev, "DSI packet size: %d * %d bytes per line\n",
pkt_size, pkt_div);
dev_dbg(mcde->dev, "Overlay frame size: %u bytes\n",
mode->hdisplay * mode->vdisplay * cpp);
/* NOTE: pkt_div is 1 for video mode */
formatter_frame = pkt_size * pkt_div * formatter_lpf;
dev_dbg(mcde->dev, "Formatter frame size: %u bytes\n", formatter_frame);
*fifo_wtrmrk_lvl = fifo_wtrmrk;
*dsi_pkt_size = pkt_size;
*dsi_formatter_frame = formatter_frame;
}
static void mcde_display_enable(struct drm_simple_display_pipe *pipe,
struct drm_crtc_state *cstate,
struct drm_plane_state *plane_state)
{
struct drm_crtc *crtc = &pipe->crtc;
struct drm_plane *plane = &pipe->plane;
struct drm_device *drm = crtc->dev;
struct mcde *mcde = to_mcde(drm);
const struct drm_display_mode *mode = &cstate->mode;
struct drm_framebuffer *fb = plane->state->fb;
u32 format = fb->format->format;
int dsi_pkt_size;
int fifo_wtrmrk;
int cpp = fb->format->cpp[0];
u32 dsi_formatter_frame;
u32 val;
int ret;
/* This powers up the entire MCDE block and the DSI hardware */
ret = regulator_enable(mcde->epod);
if (ret) {
dev_err(drm->dev, "can't re-enable EPOD regulator\n");
return;
}
dev_info(drm->dev, "enable MCDE, %d x %d format %p4cc\n",
mode->hdisplay, mode->vdisplay, &format);
/* Clear any pending interrupts */
mcde_display_disable_irqs(mcde);
writel(0, mcde->regs + MCDE_IMSCERR);
writel(0xFFFFFFFF, mcde->regs + MCDE_RISERR);
if (mcde->dpi_output)
mcde_setup_dpi(mcde, mode, &fifo_wtrmrk);
else
mcde_setup_dsi(mcde, mode, cpp, &fifo_wtrmrk,
&dsi_formatter_frame, &dsi_pkt_size);
mcde->stride = mode->hdisplay * cpp;
dev_dbg(drm->dev, "Overlay line stride: %u bytes\n",
mcde->stride);
/* Drain the FIFO A + channel 0 pipe so we have a clean slate */
mcde_drain_pipe(mcde, MCDE_FIFO_A, MCDE_CHANNEL_0);
/*
* We set up our display pipeline:
* EXTSRC 0 -> OVERLAY 0 -> CHANNEL 0 -> FIFO A -> DSI FORMATTER 0
*
* First configure the external source (memory) on external source 0
* using the desired bitstream/bitmap format
*/
mcde_configure_extsrc(mcde, MCDE_EXTSRC_0, format);
/*
* Configure overlay 0 according to format and mode and take input
* from external source 0 and route the output of this overlay to
* channel 0
*/
mcde_configure_overlay(mcde, MCDE_OVERLAY_0, MCDE_EXTSRC_0,
MCDE_CHANNEL_0, mode, format, cpp);
/*
* Configure pixel-per-line and line-per-frame for channel 0 and then
* route channel 0 to FIFO A
*/
mcde_configure_channel(mcde, MCDE_CHANNEL_0, MCDE_FIFO_A, mode);
if (mcde->dpi_output) {
unsigned long lcd_freq;
/* Configure FIFO A to use DPI formatter 0 */
mcde_configure_fifo(mcde, MCDE_FIFO_A, MCDE_DPI_FORMATTER_0,
fifo_wtrmrk);
/* Set up and enable the LCD clock */
lcd_freq = clk_round_rate(mcde->fifoa_clk, mode->clock * 1000);
ret = clk_set_rate(mcde->fifoa_clk, lcd_freq);
if (ret)
dev_err(mcde->dev, "failed to set LCD clock rate %lu Hz\n",
lcd_freq);
ret = clk_prepare_enable(mcde->fifoa_clk);
if (ret) {
dev_err(mcde->dev, "failed to enable FIFO A DPI clock\n");
return;
}
dev_info(mcde->dev, "LCD FIFO A clk rate %lu Hz\n",
clk_get_rate(mcde->fifoa_clk));
} else {
/* Configure FIFO A to use DSI formatter 0 */
mcde_configure_fifo(mcde, MCDE_FIFO_A, MCDE_DSI_FORMATTER_0,
fifo_wtrmrk);
/*
* This brings up the DSI bridge which is tightly connected
* to the MCDE DSI formatter.
*/
mcde_dsi_enable(mcde->bridge);
/* Configure the DSI formatter 0 for the DSI panel output */
mcde_configure_dsi_formatter(mcde, MCDE_DSI_FORMATTER_0,
dsi_formatter_frame, dsi_pkt_size);
}
switch (mcde->flow_mode) {
case MCDE_COMMAND_TE_FLOW:
case MCDE_COMMAND_BTA_TE_FLOW:
case MCDE_VIDEO_TE_FLOW:
/* We are using TE in some combination */
if (mode->flags & DRM_MODE_FLAG_NVSYNC)
val = MCDE_VSCRC_VSPOL;
else
val = 0;
writel(val, mcde->regs + MCDE_VSCRC0);
/* Enable VSYNC capture on TE0 */
val = readl(mcde->regs + MCDE_CRC);
val |= MCDE_CRC_SYCEN0;
writel(val, mcde->regs + MCDE_CRC);
break;
default:
/* No TE capture */
break;
}
drm_crtc_vblank_on(crtc);
/*
* If we're using oneshot mode we don't start the flow
* until each time the display is given an update, and
* then we disable it immediately after. For all other
* modes (command or video) we start the FIFO flow
* right here. This is necessary for the hardware to
* behave right.
*/
if (mcde->flow_mode != MCDE_COMMAND_ONESHOT_FLOW) {
mcde_enable_fifo(mcde, MCDE_FIFO_A);
dev_dbg(mcde->dev, "started MCDE video FIFO flow\n");
}
/* Enable MCDE with automatic clock gating */
val = readl(mcde->regs + MCDE_CR);
val |= MCDE_CR_MCDEEN | MCDE_CR_AUTOCLKG_EN;
writel(val, mcde->regs + MCDE_CR);
dev_info(drm->dev, "MCDE display is enabled\n");
}
static void mcde_display_disable(struct drm_simple_display_pipe *pipe)
{
struct drm_crtc *crtc = &pipe->crtc;
struct drm_device *drm = crtc->dev;
struct mcde *mcde = to_mcde(drm);
struct drm_pending_vblank_event *event;
int ret;
drm_crtc_vblank_off(crtc);
/* Disable FIFO A flow */
mcde_disable_fifo(mcde, MCDE_FIFO_A, true);
if (mcde->dpi_output) {
clk_disable_unprepare(mcde->fifoa_clk);
} else {
/* This disables the DSI bridge */
mcde_dsi_disable(mcde->bridge);
}
event = crtc->state->event;
if (event) {
crtc->state->event = NULL;
spin_lock_irq(&crtc->dev->event_lock);
drm_crtc_send_vblank_event(crtc, event);
spin_unlock_irq(&crtc->dev->event_lock);
}
ret = regulator_disable(mcde->epod);
if (ret)
dev_err(drm->dev, "can't disable EPOD regulator\n");
/* Make sure we are powered down (before we may power up again) */
usleep_range(50000, 70000);
dev_info(drm->dev, "MCDE display is disabled\n");
}
static void mcde_start_flow(struct mcde *mcde)
{
/* Request a TE ACK only in TE+BTA mode */
if (mcde->flow_mode == MCDE_COMMAND_BTA_TE_FLOW)
mcde_dsi_te_request(mcde->mdsi);
/* Enable FIFO A flow */
mcde_enable_fifo(mcde, MCDE_FIFO_A);
/*
* If oneshot mode is enabled, the flow will be disabled
* when the TE0 IRQ arrives in the interrupt handler. Otherwise
* updates are continuously streamed to the display after this
* point.
*/
if (mcde->flow_mode == MCDE_COMMAND_ONESHOT_FLOW) {
/* Trigger a software sync out on channel 0 */
writel(MCDE_CHNLXSYNCHSW_SW_TRIG,
mcde->regs + MCDE_CHNL0SYNCHSW);
/*
* Disable FIFO A flow again: since we are using TE sync we
* need to wait for the FIFO to drain before we continue
* so repeated calls to this function will not cause a mess
* in the hardware by pushing updates will updates are going
* on already.
*/
mcde_disable_fifo(mcde, MCDE_FIFO_A, true);
}
dev_dbg(mcde->dev, "started MCDE FIFO flow\n");
}
static void mcde_set_extsrc(struct mcde *mcde, u32 buffer_address)
{
/* Write bitmap base address to register */
writel(buffer_address, mcde->regs + MCDE_EXTSRCXA0);
/*
* Base address for next line this is probably only used
* in interlace modes.
*/
writel(buffer_address + mcde->stride, mcde->regs + MCDE_EXTSRCXA1);
}
static void mcde_display_update(struct drm_simple_display_pipe *pipe,
struct drm_plane_state *old_pstate)
{
struct drm_crtc *crtc = &pipe->crtc;
struct drm_device *drm = crtc->dev;
struct mcde *mcde = to_mcde(drm);
struct drm_pending_vblank_event *event = crtc->state->event;
struct drm_plane *plane = &pipe->plane;
struct drm_plane_state *pstate = plane->state;
struct drm_framebuffer *fb = pstate->fb;
/*
* Handle any pending event first, we need to arm the vblank
* interrupt before sending any update to the display so we don't
* miss the interrupt.
*/
if (event) {
crtc->state->event = NULL;
spin_lock_irq(&crtc->dev->event_lock);
/*
* Hardware must be on before we can arm any vblank event,
* this is not a scanout controller where there is always
* some periodic update going on, it is completely frozen
* until we get an update. If MCDE output isn't yet enabled,
* we just send a vblank dummy event back.
*/
if (crtc->state->active && drm_crtc_vblank_get(crtc) == 0) {
dev_dbg(mcde->dev, "arm vblank event\n");
drm_crtc_arm_vblank_event(crtc, event);
} else {
dev_dbg(mcde->dev, "insert fake vblank event\n");
drm_crtc_send_vblank_event(crtc, event);
}
spin_unlock_irq(&crtc->dev->event_lock);
}
/*
* We do not start sending framebuffer updates before the
* display is enabled. Update events will however be dispatched
* from the DRM core before the display is enabled.
*/
if (fb) {
mcde_set_extsrc(mcde, drm_fb_dma_get_gem_addr(fb, pstate, 0));
dev_info_once(mcde->dev, "first update of display contents\n");
/*
* Usually the flow is already active, unless we are in
* oneshot mode, then we need to kick the flow right here.
*/
if (mcde->flow_active == 0)
mcde_start_flow(mcde);
} else {
/*
* If an update is receieved before the MCDE is enabled
* (before mcde_display_enable() is called) we can't really
* do much with that buffer.
*/
dev_info(mcde->dev, "ignored a display update\n");
}
}
static int mcde_display_enable_vblank(struct drm_simple_display_pipe *pipe)
{
struct drm_crtc *crtc = &pipe->crtc;
struct drm_device *drm = crtc->dev;
struct mcde *mcde = to_mcde(drm);
u32 val;
/* Enable all VBLANK IRQs */
val = MCDE_PP_VCMPA |
MCDE_PP_VCMPB |
MCDE_PP_VSCC0 |
MCDE_PP_VSCC1 |
MCDE_PP_VCMPC0 |
MCDE_PP_VCMPC1;
writel(val, mcde->regs + MCDE_IMSCPP);
return 0;
}
static void mcde_display_disable_vblank(struct drm_simple_display_pipe *pipe)
{
struct drm_crtc *crtc = &pipe->crtc;
struct drm_device *drm = crtc->dev;
struct mcde *mcde = to_mcde(drm);
/* Disable all VBLANK IRQs */
writel(0, mcde->regs + MCDE_IMSCPP);
/* Clear any pending IRQs */
writel(0xFFFFFFFF, mcde->regs + MCDE_RISPP);
}
static struct drm_simple_display_pipe_funcs mcde_display_funcs = {
.check = mcde_display_check,
.enable = mcde_display_enable,
.disable = mcde_display_disable,
.update = mcde_display_update,
.enable_vblank = mcde_display_enable_vblank,
.disable_vblank = mcde_display_disable_vblank,
};
int mcde_display_init(struct drm_device *drm)
{
struct mcde *mcde = to_mcde(drm);
int ret;
static const u32 formats[] = {
DRM_FORMAT_ARGB8888,
DRM_FORMAT_ABGR8888,
DRM_FORMAT_XRGB8888,
DRM_FORMAT_XBGR8888,
DRM_FORMAT_RGB888,
DRM_FORMAT_BGR888,
DRM_FORMAT_ARGB4444,
DRM_FORMAT_ABGR4444,
DRM_FORMAT_XRGB4444,
DRM_FORMAT_XBGR4444,
/* These are actually IRGB1555 so intensity bit is lost */
DRM_FORMAT_XRGB1555,
DRM_FORMAT_XBGR1555,
DRM_FORMAT_RGB565,
DRM_FORMAT_BGR565,
DRM_FORMAT_YUV422,
};
ret = mcde_init_clock_divider(mcde);
if (ret)
return ret;
ret = drm_simple_display_pipe_init(drm, &mcde->pipe,
&mcde_display_funcs,
formats, ARRAY_SIZE(formats),
NULL,
mcde->connector);
if (ret)
return ret;
return 0;
}
EXPORT_SYMBOL_GPL(mcde_display_init);
| linux-master | drivers/gpu/drm/mcde/mcde_display.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/* Hisilicon Hibmc SoC drm driver
*
* Based on the bochs drm driver.
*
* Copyright (c) 2016 Huawei Limited.
*
* Author:
* Rongrong Zou <[email protected]>
* Rongrong Zou <[email protected]>
* Jianhua Li <[email protected]>
*/
#include <linux/io.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_probe_helper.h>
#include <drm/drm_print.h>
#include <drm/drm_simple_kms_helper.h>
#include "hibmc_drm_drv.h"
#include "hibmc_drm_regs.h"
static int hibmc_connector_get_modes(struct drm_connector *connector)
{
int count;
void *edid;
struct hibmc_connector *hibmc_connector = to_hibmc_connector(connector);
edid = drm_get_edid(connector, &hibmc_connector->adapter);
if (edid) {
drm_connector_update_edid_property(connector, edid);
count = drm_add_edid_modes(connector, edid);
if (count)
goto out;
}
count = drm_add_modes_noedid(connector,
connector->dev->mode_config.max_width,
connector->dev->mode_config.max_height);
drm_set_preferred_mode(connector, 1024, 768);
out:
kfree(edid);
return count;
}
static void hibmc_connector_destroy(struct drm_connector *connector)
{
struct hibmc_connector *hibmc_connector = to_hibmc_connector(connector);
i2c_del_adapter(&hibmc_connector->adapter);
drm_connector_cleanup(connector);
}
static const struct drm_connector_helper_funcs
hibmc_connector_helper_funcs = {
.get_modes = hibmc_connector_get_modes,
};
static const struct drm_connector_funcs hibmc_connector_funcs = {
.fill_modes = drm_helper_probe_single_connector_modes,
.destroy = hibmc_connector_destroy,
.reset = drm_atomic_helper_connector_reset,
.atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
};
static void hibmc_encoder_mode_set(struct drm_encoder *encoder,
struct drm_display_mode *mode,
struct drm_display_mode *adj_mode)
{
u32 reg;
struct drm_device *dev = encoder->dev;
struct hibmc_drm_private *priv = to_hibmc_drm_private(dev);
reg = readl(priv->mmio + HIBMC_DISPLAY_CONTROL_HISILE);
reg |= HIBMC_DISPLAY_CONTROL_FPVDDEN(1);
reg |= HIBMC_DISPLAY_CONTROL_PANELDATE(1);
reg |= HIBMC_DISPLAY_CONTROL_FPEN(1);
reg |= HIBMC_DISPLAY_CONTROL_VBIASEN(1);
writel(reg, priv->mmio + HIBMC_DISPLAY_CONTROL_HISILE);
}
static const struct drm_encoder_helper_funcs hibmc_encoder_helper_funcs = {
.mode_set = hibmc_encoder_mode_set,
};
int hibmc_vdac_init(struct hibmc_drm_private *priv)
{
struct drm_device *dev = &priv->dev;
struct hibmc_connector *hibmc_connector = &priv->connector;
struct drm_encoder *encoder = &priv->encoder;
struct drm_crtc *crtc = &priv->crtc;
struct drm_connector *connector = &hibmc_connector->base;
int ret;
ret = hibmc_ddc_create(dev, hibmc_connector);
if (ret) {
drm_err(dev, "failed to create ddc: %d\n", ret);
return ret;
}
encoder->possible_crtcs = drm_crtc_mask(crtc);
ret = drm_simple_encoder_init(dev, encoder, DRM_MODE_ENCODER_DAC);
if (ret) {
drm_err(dev, "failed to init encoder: %d\n", ret);
return ret;
}
drm_encoder_helper_add(encoder, &hibmc_encoder_helper_funcs);
ret = drm_connector_init_with_ddc(dev, connector,
&hibmc_connector_funcs,
DRM_MODE_CONNECTOR_VGA,
&hibmc_connector->adapter);
if (ret) {
drm_err(dev, "failed to init connector: %d\n", ret);
return ret;
}
drm_connector_helper_add(connector, &hibmc_connector_helper_funcs);
drm_connector_attach_encoder(connector, encoder);
return 0;
}
| linux-master | drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_vdac.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/* Hisilicon Hibmc SoC drm driver
*
* Based on the bochs drm driver.
*
* Copyright (c) 2016 Huawei Limited.
*
* Author:
* Tian Tao <[email protected]>
*/
#include <linux/delay.h>
#include <linux/pci.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_probe_helper.h>
#include "hibmc_drm_drv.h"
#define GPIO_DATA 0x0802A0
#define GPIO_DATA_DIRECTION 0x0802A4
#define I2C_SCL_MASK BIT(0)
#define I2C_SDA_MASK BIT(1)
static void hibmc_set_i2c_signal(void *data, u32 mask, int value)
{
struct hibmc_connector *hibmc_connector = data;
struct hibmc_drm_private *priv = to_hibmc_drm_private(hibmc_connector->base.dev);
u32 tmp_dir = readl(priv->mmio + GPIO_DATA_DIRECTION);
if (value) {
tmp_dir &= ~mask;
writel(tmp_dir, priv->mmio + GPIO_DATA_DIRECTION);
} else {
u32 tmp_data = readl(priv->mmio + GPIO_DATA);
tmp_data &= ~mask;
writel(tmp_data, priv->mmio + GPIO_DATA);
tmp_dir |= mask;
writel(tmp_dir, priv->mmio + GPIO_DATA_DIRECTION);
}
}
static int hibmc_get_i2c_signal(void *data, u32 mask)
{
struct hibmc_connector *hibmc_connector = data;
struct hibmc_drm_private *priv = to_hibmc_drm_private(hibmc_connector->base.dev);
u32 tmp_dir = readl(priv->mmio + GPIO_DATA_DIRECTION);
if ((tmp_dir & mask) != mask) {
tmp_dir &= ~mask;
writel(tmp_dir, priv->mmio + GPIO_DATA_DIRECTION);
}
return (readl(priv->mmio + GPIO_DATA) & mask) ? 1 : 0;
}
static void hibmc_ddc_setsda(void *data, int state)
{
hibmc_set_i2c_signal(data, I2C_SDA_MASK, state);
}
static void hibmc_ddc_setscl(void *data, int state)
{
hibmc_set_i2c_signal(data, I2C_SCL_MASK, state);
}
static int hibmc_ddc_getsda(void *data)
{
return hibmc_get_i2c_signal(data, I2C_SDA_MASK);
}
static int hibmc_ddc_getscl(void *data)
{
return hibmc_get_i2c_signal(data, I2C_SCL_MASK);
}
int hibmc_ddc_create(struct drm_device *drm_dev,
struct hibmc_connector *connector)
{
connector->adapter.owner = THIS_MODULE;
connector->adapter.class = I2C_CLASS_DDC;
snprintf(connector->adapter.name, I2C_NAME_SIZE, "HIS i2c bit bus");
connector->adapter.dev.parent = drm_dev->dev;
i2c_set_adapdata(&connector->adapter, connector);
connector->adapter.algo_data = &connector->bit_data;
connector->bit_data.udelay = 20;
connector->bit_data.timeout = usecs_to_jiffies(2000);
connector->bit_data.data = connector;
connector->bit_data.setsda = hibmc_ddc_setsda;
connector->bit_data.setscl = hibmc_ddc_setscl;
connector->bit_data.getsda = hibmc_ddc_getsda;
connector->bit_data.getscl = hibmc_ddc_getscl;
return i2c_bit_add_bus(&connector->adapter);
}
| linux-master | drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_i2c.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/* Hisilicon Hibmc SoC drm driver
*
* Based on the bochs drm driver.
*
* Copyright (c) 2016 Huawei Limited.
*
* Author:
* Rongrong Zou <[email protected]>
* Rongrong Zou <[email protected]>
* Jianhua Li <[email protected]>
*/
#include <linux/module.h>
#include <linux/pci.h>
#include <drm/drm_aperture.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_drv.h>
#include <drm/drm_fbdev_generic.h>
#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_gem_vram_helper.h>
#include <drm/drm_managed.h>
#include <drm/drm_module.h>
#include <drm/drm_vblank.h>
#include "hibmc_drm_drv.h"
#include "hibmc_drm_regs.h"
DEFINE_DRM_GEM_FOPS(hibmc_fops);
static irqreturn_t hibmc_interrupt(int irq, void *arg)
{
struct drm_device *dev = (struct drm_device *)arg;
struct hibmc_drm_private *priv = to_hibmc_drm_private(dev);
u32 status;
status = readl(priv->mmio + HIBMC_RAW_INTERRUPT);
if (status & HIBMC_RAW_INTERRUPT_VBLANK(1)) {
writel(HIBMC_RAW_INTERRUPT_VBLANK(1),
priv->mmio + HIBMC_RAW_INTERRUPT);
drm_handle_vblank(dev, 0);
}
return IRQ_HANDLED;
}
static int hibmc_dumb_create(struct drm_file *file, struct drm_device *dev,
struct drm_mode_create_dumb *args)
{
return drm_gem_vram_fill_create_dumb(file, dev, 0, 128, args);
}
static const struct drm_driver hibmc_driver = {
.driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC,
.fops = &hibmc_fops,
.name = "hibmc",
.date = "20160828",
.desc = "hibmc drm driver",
.major = 1,
.minor = 0,
.debugfs_init = drm_vram_mm_debugfs_init,
.dumb_create = hibmc_dumb_create,
.dumb_map_offset = drm_gem_ttm_dumb_map_offset,
};
static int __maybe_unused hibmc_pm_suspend(struct device *dev)
{
struct drm_device *drm_dev = dev_get_drvdata(dev);
return drm_mode_config_helper_suspend(drm_dev);
}
static int __maybe_unused hibmc_pm_resume(struct device *dev)
{
struct drm_device *drm_dev = dev_get_drvdata(dev);
return drm_mode_config_helper_resume(drm_dev);
}
static const struct dev_pm_ops hibmc_pm_ops = {
SET_SYSTEM_SLEEP_PM_OPS(hibmc_pm_suspend,
hibmc_pm_resume)
};
static const struct drm_mode_config_funcs hibmc_mode_funcs = {
.mode_valid = drm_vram_helper_mode_valid,
.atomic_check = drm_atomic_helper_check,
.atomic_commit = drm_atomic_helper_commit,
.fb_create = drm_gem_fb_create,
};
static int hibmc_kms_init(struct hibmc_drm_private *priv)
{
struct drm_device *dev = &priv->dev;
int ret;
ret = drmm_mode_config_init(dev);
if (ret)
return ret;
dev->mode_config.min_width = 0;
dev->mode_config.min_height = 0;
dev->mode_config.max_width = 1920;
dev->mode_config.max_height = 1200;
dev->mode_config.preferred_depth = 24;
dev->mode_config.prefer_shadow = 1;
dev->mode_config.funcs = (void *)&hibmc_mode_funcs;
ret = hibmc_de_init(priv);
if (ret) {
drm_err(dev, "failed to init de: %d\n", ret);
return ret;
}
ret = hibmc_vdac_init(priv);
if (ret) {
drm_err(dev, "failed to init vdac: %d\n", ret);
return ret;
}
return 0;
}
/*
* It can operate in one of three modes: 0, 1 or Sleep.
*/
void hibmc_set_power_mode(struct hibmc_drm_private *priv, u32 power_mode)
{
u32 control_value = 0;
void __iomem *mmio = priv->mmio;
u32 input = 1;
if (power_mode > HIBMC_PW_MODE_CTL_MODE_SLEEP)
return;
if (power_mode == HIBMC_PW_MODE_CTL_MODE_SLEEP)
input = 0;
control_value = readl(mmio + HIBMC_POWER_MODE_CTRL);
control_value &= ~(HIBMC_PW_MODE_CTL_MODE_MASK |
HIBMC_PW_MODE_CTL_OSC_INPUT_MASK);
control_value |= HIBMC_FIELD(HIBMC_PW_MODE_CTL_MODE, power_mode);
control_value |= HIBMC_FIELD(HIBMC_PW_MODE_CTL_OSC_INPUT, input);
writel(control_value, mmio + HIBMC_POWER_MODE_CTRL);
}
void hibmc_set_current_gate(struct hibmc_drm_private *priv, unsigned int gate)
{
u32 gate_reg;
u32 mode;
void __iomem *mmio = priv->mmio;
/* Get current power mode. */
mode = (readl(mmio + HIBMC_POWER_MODE_CTRL) &
HIBMC_PW_MODE_CTL_MODE_MASK) >> HIBMC_PW_MODE_CTL_MODE_SHIFT;
switch (mode) {
case HIBMC_PW_MODE_CTL_MODE_MODE0:
gate_reg = HIBMC_MODE0_GATE;
break;
case HIBMC_PW_MODE_CTL_MODE_MODE1:
gate_reg = HIBMC_MODE1_GATE;
break;
default:
gate_reg = HIBMC_MODE0_GATE;
break;
}
writel(gate, mmio + gate_reg);
}
static void hibmc_hw_config(struct hibmc_drm_private *priv)
{
u32 reg;
/* On hardware reset, power mode 0 is default. */
hibmc_set_power_mode(priv, HIBMC_PW_MODE_CTL_MODE_MODE0);
/* Enable display power gate & LOCALMEM power gate*/
reg = readl(priv->mmio + HIBMC_CURRENT_GATE);
reg &= ~HIBMC_CURR_GATE_DISPLAY_MASK;
reg &= ~HIBMC_CURR_GATE_LOCALMEM_MASK;
reg |= HIBMC_CURR_GATE_DISPLAY(1);
reg |= HIBMC_CURR_GATE_LOCALMEM(1);
hibmc_set_current_gate(priv, reg);
/*
* Reset the memory controller. If the memory controller
* is not reset in chip,the system might hang when sw accesses
* the memory.The memory should be resetted after
* changing the MXCLK.
*/
reg = readl(priv->mmio + HIBMC_MISC_CTRL);
reg &= ~HIBMC_MSCCTL_LOCALMEM_RESET_MASK;
reg |= HIBMC_MSCCTL_LOCALMEM_RESET(0);
writel(reg, priv->mmio + HIBMC_MISC_CTRL);
reg &= ~HIBMC_MSCCTL_LOCALMEM_RESET_MASK;
reg |= HIBMC_MSCCTL_LOCALMEM_RESET(1);
writel(reg, priv->mmio + HIBMC_MISC_CTRL);
}
static int hibmc_hw_map(struct hibmc_drm_private *priv)
{
struct drm_device *dev = &priv->dev;
struct pci_dev *pdev = to_pci_dev(dev->dev);
resource_size_t ioaddr, iosize;
ioaddr = pci_resource_start(pdev, 1);
iosize = pci_resource_len(pdev, 1);
priv->mmio = devm_ioremap(dev->dev, ioaddr, iosize);
if (!priv->mmio) {
drm_err(dev, "Cannot map mmio region\n");
return -ENOMEM;
}
return 0;
}
static int hibmc_hw_init(struct hibmc_drm_private *priv)
{
int ret;
ret = hibmc_hw_map(priv);
if (ret)
return ret;
hibmc_hw_config(priv);
return 0;
}
static int hibmc_unload(struct drm_device *dev)
{
struct pci_dev *pdev = to_pci_dev(dev->dev);
drm_atomic_helper_shutdown(dev);
free_irq(pdev->irq, dev);
pci_disable_msi(to_pci_dev(dev->dev));
return 0;
}
static int hibmc_load(struct drm_device *dev)
{
struct pci_dev *pdev = to_pci_dev(dev->dev);
struct hibmc_drm_private *priv = to_hibmc_drm_private(dev);
int ret;
ret = hibmc_hw_init(priv);
if (ret)
goto err;
ret = drmm_vram_helper_init(dev, pci_resource_start(pdev, 0),
pci_resource_len(pdev, 0));
if (ret) {
drm_err(dev, "Error initializing VRAM MM; %d\n", ret);
goto err;
}
ret = hibmc_kms_init(priv);
if (ret)
goto err;
ret = drm_vblank_init(dev, dev->mode_config.num_crtc);
if (ret) {
drm_err(dev, "failed to initialize vblank: %d\n", ret);
goto err;
}
ret = pci_enable_msi(pdev);
if (ret) {
drm_warn(dev, "enabling MSI failed: %d\n", ret);
} else {
/* PCI devices require shared interrupts. */
ret = request_irq(pdev->irq, hibmc_interrupt, IRQF_SHARED,
dev->driver->name, dev);
if (ret)
drm_warn(dev, "install irq failed: %d\n", ret);
}
/* reset all the states of crtc/plane/encoder/connector */
drm_mode_config_reset(dev);
return 0;
err:
hibmc_unload(dev);
drm_err(dev, "failed to initialize drm driver: %d\n", ret);
return ret;
}
static int hibmc_pci_probe(struct pci_dev *pdev,
const struct pci_device_id *ent)
{
struct hibmc_drm_private *priv;
struct drm_device *dev;
int ret;
ret = drm_aperture_remove_conflicting_pci_framebuffers(pdev, &hibmc_driver);
if (ret)
return ret;
priv = devm_drm_dev_alloc(&pdev->dev, &hibmc_driver,
struct hibmc_drm_private, dev);
if (IS_ERR(priv)) {
DRM_ERROR("failed to allocate drm_device\n");
return PTR_ERR(priv);
}
dev = &priv->dev;
pci_set_drvdata(pdev, dev);
ret = pcim_enable_device(pdev);
if (ret) {
drm_err(dev, "failed to enable pci device: %d\n", ret);
goto err_return;
}
ret = hibmc_load(dev);
if (ret) {
drm_err(dev, "failed to load hibmc: %d\n", ret);
goto err_return;
}
ret = drm_dev_register(dev, 0);
if (ret) {
drm_err(dev, "failed to register drv for userspace access: %d\n",
ret);
goto err_unload;
}
drm_fbdev_generic_setup(dev, 32);
return 0;
err_unload:
hibmc_unload(dev);
err_return:
return ret;
}
static void hibmc_pci_remove(struct pci_dev *pdev)
{
struct drm_device *dev = pci_get_drvdata(pdev);
drm_dev_unregister(dev);
hibmc_unload(dev);
}
static const struct pci_device_id hibmc_pci_table[] = {
{ PCI_VDEVICE(HUAWEI, 0x1711) },
{0,}
};
static struct pci_driver hibmc_pci_driver = {
.name = "hibmc-drm",
.id_table = hibmc_pci_table,
.probe = hibmc_pci_probe,
.remove = hibmc_pci_remove,
.driver.pm = &hibmc_pm_ops,
};
drm_module_pci_driver(hibmc_pci_driver);
MODULE_DEVICE_TABLE(pci, hibmc_pci_table);
MODULE_AUTHOR("RongrongZou <[email protected]>");
MODULE_DESCRIPTION("DRM Driver for Hisilicon Hibmc");
MODULE_LICENSE("GPL v2");
| linux-master | drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/* Hisilicon Hibmc SoC drm driver
*
* Based on the bochs drm driver.
*
* Copyright (c) 2016 Huawei Limited.
*
* Author:
* Rongrong Zou <[email protected]>
* Rongrong Zou <[email protected]>
* Jianhua Li <[email protected]>
*/
#include <linux/delay.h>
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_fourcc.h>
#include <drm/drm_gem_vram_helper.h>
#include <drm/drm_vblank.h>
#include "hibmc_drm_drv.h"
#include "hibmc_drm_regs.h"
struct hibmc_display_panel_pll {
u64 M;
u64 N;
u64 OD;
u64 POD;
};
struct hibmc_dislay_pll_config {
u64 hdisplay;
u64 vdisplay;
u32 pll1_config_value;
u32 pll2_config_value;
};
static const struct hibmc_dislay_pll_config hibmc_pll_table[] = {
{640, 480, CRT_PLL1_HS_25MHZ, CRT_PLL2_HS_25MHZ},
{800, 600, CRT_PLL1_HS_40MHZ, CRT_PLL2_HS_40MHZ},
{1024, 768, CRT_PLL1_HS_65MHZ, CRT_PLL2_HS_65MHZ},
{1152, 864, CRT_PLL1_HS_80MHZ_1152, CRT_PLL2_HS_80MHZ},
{1280, 768, CRT_PLL1_HS_80MHZ, CRT_PLL2_HS_80MHZ},
{1280, 720, CRT_PLL1_HS_74MHZ, CRT_PLL2_HS_74MHZ},
{1280, 960, CRT_PLL1_HS_108MHZ, CRT_PLL2_HS_108MHZ},
{1280, 1024, CRT_PLL1_HS_108MHZ, CRT_PLL2_HS_108MHZ},
{1440, 900, CRT_PLL1_HS_106MHZ, CRT_PLL2_HS_106MHZ},
{1600, 900, CRT_PLL1_HS_108MHZ, CRT_PLL2_HS_108MHZ},
{1600, 1200, CRT_PLL1_HS_162MHZ, CRT_PLL2_HS_162MHZ},
{1920, 1080, CRT_PLL1_HS_148MHZ, CRT_PLL2_HS_148MHZ},
{1920, 1200, CRT_PLL1_HS_193MHZ, CRT_PLL2_HS_193MHZ},
};
static int hibmc_plane_atomic_check(struct drm_plane *plane,
struct drm_atomic_state *state)
{
struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state,
plane);
struct drm_framebuffer *fb = new_plane_state->fb;
struct drm_crtc *crtc = new_plane_state->crtc;
struct drm_crtc_state *crtc_state;
u32 src_w = new_plane_state->src_w >> 16;
u32 src_h = new_plane_state->src_h >> 16;
if (!crtc || !fb)
return 0;
crtc_state = drm_atomic_get_crtc_state(state, crtc);
if (IS_ERR(crtc_state))
return PTR_ERR(crtc_state);
if (src_w != new_plane_state->crtc_w || src_h != new_plane_state->crtc_h) {
drm_dbg_atomic(plane->dev, "scale not support\n");
return -EINVAL;
}
if (new_plane_state->crtc_x < 0 || new_plane_state->crtc_y < 0) {
drm_dbg_atomic(plane->dev, "crtc_x/y of drm_plane state is invalid\n");
return -EINVAL;
}
if (!crtc_state->enable)
return 0;
if (new_plane_state->crtc_x + new_plane_state->crtc_w >
crtc_state->adjusted_mode.hdisplay ||
new_plane_state->crtc_y + new_plane_state->crtc_h >
crtc_state->adjusted_mode.vdisplay) {
drm_dbg_atomic(plane->dev, "visible portion of plane is invalid\n");
return -EINVAL;
}
if (new_plane_state->fb->pitches[0] % 128 != 0) {
drm_dbg_atomic(plane->dev, "wrong stride with 128-byte aligned\n");
return -EINVAL;
}
return 0;
}
static void hibmc_plane_atomic_update(struct drm_plane *plane,
struct drm_atomic_state *state)
{
struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state,
plane);
u32 reg;
s64 gpu_addr = 0;
u32 line_l;
struct hibmc_drm_private *priv = to_hibmc_drm_private(plane->dev);
struct drm_gem_vram_object *gbo;
if (!new_state->fb)
return;
gbo = drm_gem_vram_of_gem(new_state->fb->obj[0]);
gpu_addr = drm_gem_vram_offset(gbo);
if (WARN_ON_ONCE(gpu_addr < 0))
return; /* Bug: we didn't pin the BO to VRAM in prepare_fb. */
writel(gpu_addr, priv->mmio + HIBMC_CRT_FB_ADDRESS);
reg = new_state->fb->width * (new_state->fb->format->cpp[0]);
line_l = new_state->fb->pitches[0];
writel(HIBMC_FIELD(HIBMC_CRT_FB_WIDTH_WIDTH, reg) |
HIBMC_FIELD(HIBMC_CRT_FB_WIDTH_OFFS, line_l),
priv->mmio + HIBMC_CRT_FB_WIDTH);
/* SET PIXEL FORMAT */
reg = readl(priv->mmio + HIBMC_CRT_DISP_CTL);
reg &= ~HIBMC_CRT_DISP_CTL_FORMAT_MASK;
reg |= HIBMC_FIELD(HIBMC_CRT_DISP_CTL_FORMAT,
new_state->fb->format->cpp[0] * 8 / 16);
writel(reg, priv->mmio + HIBMC_CRT_DISP_CTL);
}
static const u32 channel_formats1[] = {
DRM_FORMAT_RGB565, DRM_FORMAT_BGR565, DRM_FORMAT_RGB888,
DRM_FORMAT_BGR888, DRM_FORMAT_XRGB8888, DRM_FORMAT_XBGR8888,
DRM_FORMAT_RGBA8888, DRM_FORMAT_BGRA8888, DRM_FORMAT_ARGB8888,
DRM_FORMAT_ABGR8888
};
static const struct drm_plane_funcs hibmc_plane_funcs = {
.update_plane = drm_atomic_helper_update_plane,
.disable_plane = drm_atomic_helper_disable_plane,
.destroy = drm_plane_cleanup,
.reset = drm_atomic_helper_plane_reset,
.atomic_duplicate_state = drm_atomic_helper_plane_duplicate_state,
.atomic_destroy_state = drm_atomic_helper_plane_destroy_state,
};
static const struct drm_plane_helper_funcs hibmc_plane_helper_funcs = {
DRM_GEM_VRAM_PLANE_HELPER_FUNCS,
.atomic_check = hibmc_plane_atomic_check,
.atomic_update = hibmc_plane_atomic_update,
};
static void hibmc_crtc_dpms(struct drm_crtc *crtc, u32 dpms)
{
struct hibmc_drm_private *priv = to_hibmc_drm_private(crtc->dev);
u32 reg;
reg = readl(priv->mmio + HIBMC_CRT_DISP_CTL);
reg &= ~HIBMC_CRT_DISP_CTL_DPMS_MASK;
reg |= HIBMC_FIELD(HIBMC_CRT_DISP_CTL_DPMS, dpms);
reg &= ~HIBMC_CRT_DISP_CTL_TIMING_MASK;
if (dpms == HIBMC_CRT_DPMS_ON)
reg |= HIBMC_CRT_DISP_CTL_TIMING(1);
writel(reg, priv->mmio + HIBMC_CRT_DISP_CTL);
}
static void hibmc_crtc_atomic_enable(struct drm_crtc *crtc,
struct drm_atomic_state *state)
{
u32 reg;
struct hibmc_drm_private *priv = to_hibmc_drm_private(crtc->dev);
hibmc_set_power_mode(priv, HIBMC_PW_MODE_CTL_MODE_MODE0);
/* Enable display power gate & LOCALMEM power gate*/
reg = readl(priv->mmio + HIBMC_CURRENT_GATE);
reg &= ~HIBMC_CURR_GATE_LOCALMEM_MASK;
reg &= ~HIBMC_CURR_GATE_DISPLAY_MASK;
reg |= HIBMC_CURR_GATE_LOCALMEM(1);
reg |= HIBMC_CURR_GATE_DISPLAY(1);
hibmc_set_current_gate(priv, reg);
drm_crtc_vblank_on(crtc);
hibmc_crtc_dpms(crtc, HIBMC_CRT_DPMS_ON);
}
static void hibmc_crtc_atomic_disable(struct drm_crtc *crtc,
struct drm_atomic_state *state)
{
u32 reg;
struct hibmc_drm_private *priv = to_hibmc_drm_private(crtc->dev);
hibmc_crtc_dpms(crtc, HIBMC_CRT_DPMS_OFF);
drm_crtc_vblank_off(crtc);
hibmc_set_power_mode(priv, HIBMC_PW_MODE_CTL_MODE_SLEEP);
/* Enable display power gate & LOCALMEM power gate*/
reg = readl(priv->mmio + HIBMC_CURRENT_GATE);
reg &= ~HIBMC_CURR_GATE_LOCALMEM_MASK;
reg &= ~HIBMC_CURR_GATE_DISPLAY_MASK;
reg |= HIBMC_CURR_GATE_LOCALMEM(0);
reg |= HIBMC_CURR_GATE_DISPLAY(0);
hibmc_set_current_gate(priv, reg);
}
static enum drm_mode_status
hibmc_crtc_mode_valid(struct drm_crtc *crtc,
const struct drm_display_mode *mode)
{
size_t i = 0;
int vrefresh = drm_mode_vrefresh(mode);
if (vrefresh < 59 || vrefresh > 61)
return MODE_NOCLOCK;
for (i = 0; i < ARRAY_SIZE(hibmc_pll_table); i++) {
if (hibmc_pll_table[i].hdisplay == mode->hdisplay &&
hibmc_pll_table[i].vdisplay == mode->vdisplay)
return MODE_OK;
}
return MODE_BAD;
}
static u32 format_pll_reg(void)
{
u32 pllreg = 0;
struct hibmc_display_panel_pll pll = {0};
/*
* Note that all PLL's have the same format. Here,
* we just use Panel PLL parameter to work out the bit
* fields in the register.On returning a 32 bit number, the value can
* be applied to any PLL in the calling function.
*/
pllreg |= HIBMC_FIELD(HIBMC_PLL_CTRL_BYPASS, 0);
pllreg |= HIBMC_FIELD(HIBMC_PLL_CTRL_POWER, 1);
pllreg |= HIBMC_FIELD(HIBMC_PLL_CTRL_INPUT, 0);
pllreg |= HIBMC_FIELD(HIBMC_PLL_CTRL_POD, pll.POD);
pllreg |= HIBMC_FIELD(HIBMC_PLL_CTRL_OD, pll.OD);
pllreg |= HIBMC_FIELD(HIBMC_PLL_CTRL_N, pll.N);
pllreg |= HIBMC_FIELD(HIBMC_PLL_CTRL_M, pll.M);
return pllreg;
}
static void set_vclock_hisilicon(struct drm_device *dev, u64 pll)
{
u32 val;
struct hibmc_drm_private *priv = to_hibmc_drm_private(dev);
val = readl(priv->mmio + CRT_PLL1_HS);
val &= ~(CRT_PLL1_HS_OUTER_BYPASS(1));
writel(val, priv->mmio + CRT_PLL1_HS);
val = CRT_PLL1_HS_INTER_BYPASS(1) | CRT_PLL1_HS_POWERON(1);
writel(val, priv->mmio + CRT_PLL1_HS);
writel(pll, priv->mmio + CRT_PLL1_HS);
usleep_range(1000, 2000);
val = pll & ~(CRT_PLL1_HS_POWERON(1));
writel(val, priv->mmio + CRT_PLL1_HS);
usleep_range(1000, 2000);
val &= ~(CRT_PLL1_HS_INTER_BYPASS(1));
writel(val, priv->mmio + CRT_PLL1_HS);
usleep_range(1000, 2000);
val |= CRT_PLL1_HS_OUTER_BYPASS(1);
writel(val, priv->mmio + CRT_PLL1_HS);
}
static void get_pll_config(u64 x, u64 y, u32 *pll1, u32 *pll2)
{
size_t i;
size_t count = ARRAY_SIZE(hibmc_pll_table);
for (i = 0; i < count; i++) {
if (hibmc_pll_table[i].hdisplay == x &&
hibmc_pll_table[i].vdisplay == y) {
*pll1 = hibmc_pll_table[i].pll1_config_value;
*pll2 = hibmc_pll_table[i].pll2_config_value;
return;
}
}
/* if found none, we use default value */
*pll1 = CRT_PLL1_HS_25MHZ;
*pll2 = CRT_PLL2_HS_25MHZ;
}
/*
* This function takes care the extra registers and bit fields required to
* setup a mode in board.
* Explanation about Display Control register:
* FPGA only supports 7 predefined pixel clocks, and clock select is
* in bit 4:0 of new register 0x802a8.
*/
static u32 display_ctrl_adjust(struct drm_device *dev,
struct drm_display_mode *mode,
u32 ctrl)
{
u64 x, y;
u32 pll1; /* bit[31:0] of PLL */
u32 pll2; /* bit[63:32] of PLL */
struct hibmc_drm_private *priv = to_hibmc_drm_private(dev);
x = mode->hdisplay;
y = mode->vdisplay;
get_pll_config(x, y, &pll1, &pll2);
writel(pll2, priv->mmio + CRT_PLL2_HS);
set_vclock_hisilicon(dev, pll1);
/*
* Hisilicon has to set up the top-left and bottom-right
* registers as well.
* Note that normal chip only use those two register for
* auto-centering mode.
*/
writel(HIBMC_FIELD(HIBMC_CRT_AUTO_CENTERING_TL_TOP, 0) |
HIBMC_FIELD(HIBMC_CRT_AUTO_CENTERING_TL_LEFT, 0),
priv->mmio + HIBMC_CRT_AUTO_CENTERING_TL);
writel(HIBMC_FIELD(HIBMC_CRT_AUTO_CENTERING_BR_BOTTOM, y - 1) |
HIBMC_FIELD(HIBMC_CRT_AUTO_CENTERING_BR_RIGHT, x - 1),
priv->mmio + HIBMC_CRT_AUTO_CENTERING_BR);
/*
* Assume common fields in ctrl have been properly set before
* calling this function.
* This function only sets the extra fields in ctrl.
*/
/* Set bit 25 of display controller: Select CRT or VGA clock */
ctrl &= ~HIBMC_CRT_DISP_CTL_CRTSELECT_MASK;
ctrl &= ~HIBMC_CRT_DISP_CTL_CLOCK_PHASE_MASK;
ctrl |= HIBMC_CRT_DISP_CTL_CRTSELECT(HIBMC_CRTSELECT_CRT);
/* clock_phase_polarity is 0 */
ctrl |= HIBMC_CRT_DISP_CTL_CLOCK_PHASE(0);
writel(ctrl, priv->mmio + HIBMC_CRT_DISP_CTL);
return ctrl;
}
static void hibmc_crtc_mode_set_nofb(struct drm_crtc *crtc)
{
u32 val;
struct drm_display_mode *mode = &crtc->state->mode;
struct drm_device *dev = crtc->dev;
struct hibmc_drm_private *priv = to_hibmc_drm_private(dev);
u32 width = mode->hsync_end - mode->hsync_start;
u32 height = mode->vsync_end - mode->vsync_start;
writel(format_pll_reg(), priv->mmio + HIBMC_CRT_PLL_CTRL);
writel(HIBMC_FIELD(HIBMC_CRT_HORZ_TOTAL_TOTAL, mode->htotal - 1) |
HIBMC_FIELD(HIBMC_CRT_HORZ_TOTAL_DISP_END, mode->hdisplay - 1),
priv->mmio + HIBMC_CRT_HORZ_TOTAL);
writel(HIBMC_FIELD(HIBMC_CRT_HORZ_SYNC_WIDTH, width) |
HIBMC_FIELD(HIBMC_CRT_HORZ_SYNC_START, mode->hsync_start - 1),
priv->mmio + HIBMC_CRT_HORZ_SYNC);
writel(HIBMC_FIELD(HIBMC_CRT_VERT_TOTAL_TOTAL, mode->vtotal - 1) |
HIBMC_FIELD(HIBMC_CRT_VERT_TOTAL_DISP_END, mode->vdisplay - 1),
priv->mmio + HIBMC_CRT_VERT_TOTAL);
writel(HIBMC_FIELD(HIBMC_CRT_VERT_SYNC_HEIGHT, height) |
HIBMC_FIELD(HIBMC_CRT_VERT_SYNC_START, mode->vsync_start - 1),
priv->mmio + HIBMC_CRT_VERT_SYNC);
val = HIBMC_FIELD(HIBMC_CRT_DISP_CTL_VSYNC_PHASE, 0);
val |= HIBMC_FIELD(HIBMC_CRT_DISP_CTL_HSYNC_PHASE, 0);
val |= HIBMC_CRT_DISP_CTL_TIMING(1);
val |= HIBMC_CRT_DISP_CTL_PLANE(1);
display_ctrl_adjust(dev, mode, val);
}
static void hibmc_crtc_atomic_begin(struct drm_crtc *crtc,
struct drm_atomic_state *state)
{
u32 reg;
struct drm_device *dev = crtc->dev;
struct hibmc_drm_private *priv = to_hibmc_drm_private(dev);
hibmc_set_power_mode(priv, HIBMC_PW_MODE_CTL_MODE_MODE0);
/* Enable display power gate & LOCALMEM power gate*/
reg = readl(priv->mmio + HIBMC_CURRENT_GATE);
reg &= ~HIBMC_CURR_GATE_DISPLAY_MASK;
reg &= ~HIBMC_CURR_GATE_LOCALMEM_MASK;
reg |= HIBMC_CURR_GATE_DISPLAY(1);
reg |= HIBMC_CURR_GATE_LOCALMEM(1);
hibmc_set_current_gate(priv, reg);
/* We can add more initialization as needed. */
}
static void hibmc_crtc_atomic_flush(struct drm_crtc *crtc,
struct drm_atomic_state *state)
{
unsigned long flags;
spin_lock_irqsave(&crtc->dev->event_lock, flags);
if (crtc->state->event)
drm_crtc_send_vblank_event(crtc, crtc->state->event);
crtc->state->event = NULL;
spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
}
static int hibmc_crtc_enable_vblank(struct drm_crtc *crtc)
{
struct hibmc_drm_private *priv = to_hibmc_drm_private(crtc->dev);
writel(HIBMC_RAW_INTERRUPT_EN_VBLANK(1),
priv->mmio + HIBMC_RAW_INTERRUPT_EN);
return 0;
}
static void hibmc_crtc_disable_vblank(struct drm_crtc *crtc)
{
struct hibmc_drm_private *priv = to_hibmc_drm_private(crtc->dev);
writel(HIBMC_RAW_INTERRUPT_EN_VBLANK(0),
priv->mmio + HIBMC_RAW_INTERRUPT_EN);
}
static void hibmc_crtc_load_lut(struct drm_crtc *crtc)
{
struct hibmc_drm_private *priv = to_hibmc_drm_private(crtc->dev);
void __iomem *mmio = priv->mmio;
u16 *r, *g, *b;
u32 reg;
u32 i;
r = crtc->gamma_store;
g = r + crtc->gamma_size;
b = g + crtc->gamma_size;
for (i = 0; i < crtc->gamma_size; i++) {
u32 offset = i << 2;
u8 red = *r++ >> 8;
u8 green = *g++ >> 8;
u8 blue = *b++ >> 8;
u32 rgb = (red << 16) | (green << 8) | blue;
writel(rgb, mmio + HIBMC_CRT_PALETTE + offset);
}
reg = readl(priv->mmio + HIBMC_CRT_DISP_CTL);
reg |= HIBMC_FIELD(HIBMC_CTL_DISP_CTL_GAMMA, 1);
writel(reg, priv->mmio + HIBMC_CRT_DISP_CTL);
}
static int hibmc_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green,
u16 *blue, uint32_t size,
struct drm_modeset_acquire_ctx *ctx)
{
hibmc_crtc_load_lut(crtc);
return 0;
}
static const struct drm_crtc_funcs hibmc_crtc_funcs = {
.page_flip = drm_atomic_helper_page_flip,
.set_config = drm_atomic_helper_set_config,
.destroy = drm_crtc_cleanup,
.reset = drm_atomic_helper_crtc_reset,
.atomic_duplicate_state = drm_atomic_helper_crtc_duplicate_state,
.atomic_destroy_state = drm_atomic_helper_crtc_destroy_state,
.enable_vblank = hibmc_crtc_enable_vblank,
.disable_vblank = hibmc_crtc_disable_vblank,
.gamma_set = hibmc_crtc_gamma_set,
};
static const struct drm_crtc_helper_funcs hibmc_crtc_helper_funcs = {
.mode_set_nofb = hibmc_crtc_mode_set_nofb,
.atomic_begin = hibmc_crtc_atomic_begin,
.atomic_flush = hibmc_crtc_atomic_flush,
.atomic_enable = hibmc_crtc_atomic_enable,
.atomic_disable = hibmc_crtc_atomic_disable,
.mode_valid = hibmc_crtc_mode_valid,
};
int hibmc_de_init(struct hibmc_drm_private *priv)
{
struct drm_device *dev = &priv->dev;
struct drm_crtc *crtc = &priv->crtc;
struct drm_plane *plane = &priv->primary_plane;
int ret;
ret = drm_universal_plane_init(dev, plane, 1, &hibmc_plane_funcs,
channel_formats1,
ARRAY_SIZE(channel_formats1),
NULL,
DRM_PLANE_TYPE_PRIMARY,
NULL);
if (ret) {
drm_err(dev, "failed to init plane: %d\n", ret);
return ret;
}
drm_plane_helper_add(plane, &hibmc_plane_helper_funcs);
ret = drm_crtc_init_with_planes(dev, crtc, plane,
NULL, &hibmc_crtc_funcs, NULL);
if (ret) {
drm_err(dev, "failed to init crtc: %d\n", ret);
return ret;
}
ret = drm_mode_crtc_set_gamma_size(crtc, 256);
if (ret) {
drm_err(dev, "failed to set gamma size: %d\n", ret);
return ret;
}
drm_crtc_helper_add(crtc, &hibmc_crtc_helper_funcs);
return 0;
}
| linux-master | drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_de.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* DesignWare MIPI DSI Host Controller v1.02 driver
*
* Copyright (c) 2016 Linaro Limited.
* Copyright (c) 2014-2016 HiSilicon Limited.
*
* Author:
* Xinliang Liu <[email protected]>
* Xinliang Liu <[email protected]>
* Xinwei Kong <[email protected]>
*/
#include <linux/clk.h>
#include <linux/component.h>
#include <linux/delay.h>
#include <linux/mod_devicetable.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_bridge.h>
#include <drm/drm_device.h>
#include <drm/drm_mipi_dsi.h>
#include <drm/drm_of.h>
#include <drm/drm_print.h>
#include <drm/drm_probe_helper.h>
#include <drm/drm_simple_kms_helper.h>
#include "dw_dsi_reg.h"
#define MAX_TX_ESC_CLK 10
#define ROUND(x, y) ((x) / (y) + \
((x) % (y) * 10 / (y) >= 5 ? 1 : 0))
#define PHY_REF_CLK_RATE 19200000
#define PHY_REF_CLK_PERIOD_PS (1000000000 / (PHY_REF_CLK_RATE / 1000))
#define encoder_to_dsi(encoder) \
container_of(encoder, struct dw_dsi, encoder)
#define host_to_dsi(host) \
container_of(host, struct dw_dsi, host)
struct mipi_phy_params {
u32 clk_t_lpx;
u32 clk_t_hs_prepare;
u32 clk_t_hs_zero;
u32 clk_t_hs_trial;
u32 clk_t_wakeup;
u32 data_t_lpx;
u32 data_t_hs_prepare;
u32 data_t_hs_zero;
u32 data_t_hs_trial;
u32 data_t_ta_go;
u32 data_t_ta_get;
u32 data_t_wakeup;
u32 hstx_ckg_sel;
u32 pll_fbd_div5f;
u32 pll_fbd_div1f;
u32 pll_fbd_2p;
u32 pll_enbwt;
u32 pll_fbd_p;
u32 pll_fbd_s;
u32 pll_pre_div1p;
u32 pll_pre_p;
u32 pll_vco_750M;
u32 pll_lpf_rs;
u32 pll_lpf_cs;
u32 clklp2hs_time;
u32 clkhs2lp_time;
u32 lp2hs_time;
u32 hs2lp_time;
u32 clk_to_data_delay;
u32 data_to_clk_delay;
u32 lane_byte_clk_kHz;
u32 clk_division;
};
struct dsi_hw_ctx {
void __iomem *base;
struct clk *pclk;
};
struct dw_dsi {
struct drm_encoder encoder;
struct device *dev;
struct mipi_dsi_host host;
struct drm_display_mode cur_mode;
struct dsi_hw_ctx *ctx;
struct mipi_phy_params phy;
u32 lanes;
enum mipi_dsi_pixel_format format;
unsigned long mode_flags;
bool enable;
};
struct dsi_data {
struct dw_dsi dsi;
struct dsi_hw_ctx ctx;
};
struct dsi_phy_range {
u32 min_range_kHz;
u32 max_range_kHz;
u32 pll_vco_750M;
u32 hstx_ckg_sel;
};
static const struct dsi_phy_range dphy_range_info[] = {
{ 46875, 62500, 1, 7 },
{ 62500, 93750, 0, 7 },
{ 93750, 125000, 1, 6 },
{ 125000, 187500, 0, 6 },
{ 187500, 250000, 1, 5 },
{ 250000, 375000, 0, 5 },
{ 375000, 500000, 1, 4 },
{ 500000, 750000, 0, 4 },
{ 750000, 1000000, 1, 0 },
{ 1000000, 1500000, 0, 0 }
};
static u32 dsi_calc_phy_rate(u32 req_kHz, struct mipi_phy_params *phy)
{
u32 ref_clk_ps = PHY_REF_CLK_PERIOD_PS;
u32 tmp_kHz = req_kHz;
u32 i = 0;
u32 q_pll = 1;
u32 m_pll = 0;
u32 n_pll = 0;
u32 r_pll = 1;
u32 m_n = 0;
u32 m_n_int = 0;
u32 f_kHz = 0;
u64 temp;
/*
* Find a rate >= req_kHz.
*/
do {
f_kHz = tmp_kHz;
for (i = 0; i < ARRAY_SIZE(dphy_range_info); i++)
if (f_kHz >= dphy_range_info[i].min_range_kHz &&
f_kHz <= dphy_range_info[i].max_range_kHz)
break;
if (i == ARRAY_SIZE(dphy_range_info)) {
DRM_ERROR("%dkHz out of range\n", f_kHz);
return 0;
}
phy->pll_vco_750M = dphy_range_info[i].pll_vco_750M;
phy->hstx_ckg_sel = dphy_range_info[i].hstx_ckg_sel;
if (phy->hstx_ckg_sel <= 7 &&
phy->hstx_ckg_sel >= 4)
q_pll = 0x10 >> (7 - phy->hstx_ckg_sel);
temp = f_kHz * (u64)q_pll * (u64)ref_clk_ps;
m_n_int = temp / (u64)1000000000;
m_n = (temp % (u64)1000000000) / (u64)100000000;
if (m_n_int % 2 == 0) {
if (m_n * 6 >= 50) {
n_pll = 2;
m_pll = (m_n_int + 1) * n_pll;
} else if (m_n * 6 >= 30) {
n_pll = 3;
m_pll = m_n_int * n_pll + 2;
} else {
n_pll = 1;
m_pll = m_n_int * n_pll;
}
} else {
if (m_n * 6 >= 50) {
n_pll = 1;
m_pll = (m_n_int + 1) * n_pll;
} else if (m_n * 6 >= 30) {
n_pll = 1;
m_pll = (m_n_int + 1) * n_pll;
} else if (m_n * 6 >= 10) {
n_pll = 3;
m_pll = m_n_int * n_pll + 1;
} else {
n_pll = 2;
m_pll = m_n_int * n_pll;
}
}
if (n_pll == 1) {
phy->pll_fbd_p = 0;
phy->pll_pre_div1p = 1;
} else {
phy->pll_fbd_p = n_pll;
phy->pll_pre_div1p = 0;
}
if (phy->pll_fbd_2p <= 7 && phy->pll_fbd_2p >= 4)
r_pll = 0x10 >> (7 - phy->pll_fbd_2p);
if (m_pll == 2) {
phy->pll_pre_p = 0;
phy->pll_fbd_s = 0;
phy->pll_fbd_div1f = 0;
phy->pll_fbd_div5f = 1;
} else if (m_pll >= 2 * 2 * r_pll && m_pll <= 2 * 4 * r_pll) {
phy->pll_pre_p = m_pll / (2 * r_pll);
phy->pll_fbd_s = 0;
phy->pll_fbd_div1f = 1;
phy->pll_fbd_div5f = 0;
} else if (m_pll >= 2 * 5 * r_pll && m_pll <= 2 * 150 * r_pll) {
if (((m_pll / (2 * r_pll)) % 2) == 0) {
phy->pll_pre_p =
(m_pll / (2 * r_pll)) / 2 - 1;
phy->pll_fbd_s =
(m_pll / (2 * r_pll)) % 2 + 2;
} else {
phy->pll_pre_p =
(m_pll / (2 * r_pll)) / 2;
phy->pll_fbd_s =
(m_pll / (2 * r_pll)) % 2;
}
phy->pll_fbd_div1f = 0;
phy->pll_fbd_div5f = 0;
} else {
phy->pll_pre_p = 0;
phy->pll_fbd_s = 0;
phy->pll_fbd_div1f = 0;
phy->pll_fbd_div5f = 1;
}
f_kHz = (u64)1000000000 * (u64)m_pll /
((u64)ref_clk_ps * (u64)n_pll * (u64)q_pll);
if (f_kHz >= req_kHz)
break;
tmp_kHz += 10;
} while (true);
return f_kHz;
}
static void dsi_get_phy_params(u32 phy_req_kHz,
struct mipi_phy_params *phy)
{
u32 ref_clk_ps = PHY_REF_CLK_PERIOD_PS;
u32 phy_rate_kHz;
u32 ui;
memset(phy, 0, sizeof(*phy));
phy_rate_kHz = dsi_calc_phy_rate(phy_req_kHz, phy);
if (!phy_rate_kHz)
return;
ui = 1000000 / phy_rate_kHz;
phy->clk_t_lpx = ROUND(50, 8 * ui);
phy->clk_t_hs_prepare = ROUND(133, 16 * ui) - 1;
phy->clk_t_hs_zero = ROUND(262, 8 * ui);
phy->clk_t_hs_trial = 2 * (ROUND(60, 8 * ui) - 1);
phy->clk_t_wakeup = ROUND(1000000, (ref_clk_ps / 1000) - 1);
if (phy->clk_t_wakeup > 0xff)
phy->clk_t_wakeup = 0xff;
phy->data_t_wakeup = phy->clk_t_wakeup;
phy->data_t_lpx = phy->clk_t_lpx;
phy->data_t_hs_prepare = ROUND(125 + 10 * ui, 16 * ui) - 1;
phy->data_t_hs_zero = ROUND(105 + 6 * ui, 8 * ui);
phy->data_t_hs_trial = 2 * (ROUND(60 + 4 * ui, 8 * ui) - 1);
phy->data_t_ta_go = 3;
phy->data_t_ta_get = 4;
phy->pll_enbwt = 1;
phy->clklp2hs_time = ROUND(407, 8 * ui) + 12;
phy->clkhs2lp_time = ROUND(105 + 12 * ui, 8 * ui);
phy->lp2hs_time = ROUND(240 + 12 * ui, 8 * ui) + 1;
phy->hs2lp_time = phy->clkhs2lp_time;
phy->clk_to_data_delay = 1 + phy->clklp2hs_time;
phy->data_to_clk_delay = ROUND(60 + 52 * ui, 8 * ui) +
phy->clkhs2lp_time;
phy->lane_byte_clk_kHz = phy_rate_kHz / 8;
phy->clk_division =
DIV_ROUND_UP(phy->lane_byte_clk_kHz, MAX_TX_ESC_CLK);
}
static u32 dsi_get_dpi_color_coding(enum mipi_dsi_pixel_format format)
{
u32 val;
/*
* TODO: only support RGB888 now, to support more
*/
switch (format) {
case MIPI_DSI_FMT_RGB888:
val = DSI_24BITS_1;
break;
default:
val = DSI_24BITS_1;
break;
}
return val;
}
/*
* dsi phy reg write function
*/
static void dsi_phy_tst_set(void __iomem *base, u32 reg, u32 val)
{
u32 reg_write = 0x10000 + reg;
/*
* latch reg first
*/
writel(reg_write, base + PHY_TST_CTRL1);
writel(0x02, base + PHY_TST_CTRL0);
writel(0x00, base + PHY_TST_CTRL0);
/*
* then latch value
*/
writel(val, base + PHY_TST_CTRL1);
writel(0x02, base + PHY_TST_CTRL0);
writel(0x00, base + PHY_TST_CTRL0);
}
static void dsi_set_phy_timer(void __iomem *base,
struct mipi_phy_params *phy,
u32 lanes)
{
u32 val;
/*
* Set lane value and phy stop wait time.
*/
val = (lanes - 1) | (PHY_STOP_WAIT_TIME << 8);
writel(val, base + PHY_IF_CFG);
/*
* Set phy clk division.
*/
val = readl(base + CLKMGR_CFG) | phy->clk_division;
writel(val, base + CLKMGR_CFG);
/*
* Set lp and hs switching params.
*/
dw_update_bits(base + PHY_TMR_CFG, 24, MASK(8), phy->hs2lp_time);
dw_update_bits(base + PHY_TMR_CFG, 16, MASK(8), phy->lp2hs_time);
dw_update_bits(base + PHY_TMR_LPCLK_CFG, 16, MASK(10),
phy->clkhs2lp_time);
dw_update_bits(base + PHY_TMR_LPCLK_CFG, 0, MASK(10),
phy->clklp2hs_time);
dw_update_bits(base + CLK_DATA_TMR_CFG, 8, MASK(8),
phy->data_to_clk_delay);
dw_update_bits(base + CLK_DATA_TMR_CFG, 0, MASK(8),
phy->clk_to_data_delay);
}
static void dsi_set_mipi_phy(void __iomem *base,
struct mipi_phy_params *phy,
u32 lanes)
{
u32 delay_count;
u32 val;
u32 i;
/* phy timer setting */
dsi_set_phy_timer(base, phy, lanes);
/*
* Reset to clean up phy tst params.
*/
writel(0, base + PHY_RSTZ);
writel(0, base + PHY_TST_CTRL0);
writel(1, base + PHY_TST_CTRL0);
writel(0, base + PHY_TST_CTRL0);
/*
* Clock lane timing control setting: TLPX, THS-PREPARE,
* THS-ZERO, THS-TRAIL, TWAKEUP.
*/
dsi_phy_tst_set(base, CLK_TLPX, phy->clk_t_lpx);
dsi_phy_tst_set(base, CLK_THS_PREPARE, phy->clk_t_hs_prepare);
dsi_phy_tst_set(base, CLK_THS_ZERO, phy->clk_t_hs_zero);
dsi_phy_tst_set(base, CLK_THS_TRAIL, phy->clk_t_hs_trial);
dsi_phy_tst_set(base, CLK_TWAKEUP, phy->clk_t_wakeup);
/*
* Data lane timing control setting: TLPX, THS-PREPARE,
* THS-ZERO, THS-TRAIL, TTA-GO, TTA-GET, TWAKEUP.
*/
for (i = 0; i < lanes; i++) {
dsi_phy_tst_set(base, DATA_TLPX(i), phy->data_t_lpx);
dsi_phy_tst_set(base, DATA_THS_PREPARE(i),
phy->data_t_hs_prepare);
dsi_phy_tst_set(base, DATA_THS_ZERO(i), phy->data_t_hs_zero);
dsi_phy_tst_set(base, DATA_THS_TRAIL(i), phy->data_t_hs_trial);
dsi_phy_tst_set(base, DATA_TTA_GO(i), phy->data_t_ta_go);
dsi_phy_tst_set(base, DATA_TTA_GET(i), phy->data_t_ta_get);
dsi_phy_tst_set(base, DATA_TWAKEUP(i), phy->data_t_wakeup);
}
/*
* physical configuration: I, pll I, pll II, pll III,
* pll IV, pll V.
*/
dsi_phy_tst_set(base, PHY_CFG_I, phy->hstx_ckg_sel);
val = (phy->pll_fbd_div5f << 5) + (phy->pll_fbd_div1f << 4) +
(phy->pll_fbd_2p << 1) + phy->pll_enbwt;
dsi_phy_tst_set(base, PHY_CFG_PLL_I, val);
dsi_phy_tst_set(base, PHY_CFG_PLL_II, phy->pll_fbd_p);
dsi_phy_tst_set(base, PHY_CFG_PLL_III, phy->pll_fbd_s);
val = (phy->pll_pre_div1p << 7) + phy->pll_pre_p;
dsi_phy_tst_set(base, PHY_CFG_PLL_IV, val);
val = (5 << 5) + (phy->pll_vco_750M << 4) + (phy->pll_lpf_rs << 2) +
phy->pll_lpf_cs;
dsi_phy_tst_set(base, PHY_CFG_PLL_V, val);
writel(PHY_ENABLECLK, base + PHY_RSTZ);
udelay(1);
writel(PHY_ENABLECLK | PHY_UNSHUTDOWNZ, base + PHY_RSTZ);
udelay(1);
writel(PHY_ENABLECLK | PHY_UNRSTZ | PHY_UNSHUTDOWNZ, base + PHY_RSTZ);
usleep_range(1000, 1500);
/*
* wait for phy's clock ready
*/
delay_count = 100;
while (delay_count) {
val = readl(base + PHY_STATUS);
if ((BIT(0) | BIT(2)) & val)
break;
udelay(1);
delay_count--;
}
if (!delay_count)
DRM_INFO("phylock and phystopstateclklane is not ready.\n");
}
static void dsi_set_mode_timing(void __iomem *base,
u32 lane_byte_clk_kHz,
struct drm_display_mode *mode,
enum mipi_dsi_pixel_format format)
{
u32 hfp, hbp, hsw, vfp, vbp, vsw;
u32 hline_time;
u32 hsa_time;
u32 hbp_time;
u32 pixel_clk_kHz;
int htot, vtot;
u32 val;
u64 tmp;
val = dsi_get_dpi_color_coding(format);
writel(val, base + DPI_COLOR_CODING);
val = (mode->flags & DRM_MODE_FLAG_NHSYNC ? 1 : 0) << 2;
val |= (mode->flags & DRM_MODE_FLAG_NVSYNC ? 1 : 0) << 1;
writel(val, base + DPI_CFG_POL);
/*
* The DSI IP accepts vertical timing using lines as normal,
* but horizontal timing is a mixture of pixel-clocks for the
* active region and byte-lane clocks for the blanking-related
* timings. hfp is specified as the total hline_time in byte-
* lane clocks minus hsa, hbp and active.
*/
pixel_clk_kHz = mode->clock;
htot = mode->htotal;
vtot = mode->vtotal;
hfp = mode->hsync_start - mode->hdisplay;
hbp = mode->htotal - mode->hsync_end;
hsw = mode->hsync_end - mode->hsync_start;
vfp = mode->vsync_start - mode->vdisplay;
vbp = mode->vtotal - mode->vsync_end;
vsw = mode->vsync_end - mode->vsync_start;
if (vsw > 15) {
DRM_DEBUG_DRIVER("vsw exceeded 15\n");
vsw = 15;
}
hsa_time = (hsw * lane_byte_clk_kHz) / pixel_clk_kHz;
hbp_time = (hbp * lane_byte_clk_kHz) / pixel_clk_kHz;
tmp = (u64)htot * (u64)lane_byte_clk_kHz;
hline_time = DIV_ROUND_UP(tmp, pixel_clk_kHz);
/* all specified in byte-lane clocks */
writel(hsa_time, base + VID_HSA_TIME);
writel(hbp_time, base + VID_HBP_TIME);
writel(hline_time, base + VID_HLINE_TIME);
writel(vsw, base + VID_VSA_LINES);
writel(vbp, base + VID_VBP_LINES);
writel(vfp, base + VID_VFP_LINES);
writel(mode->vdisplay, base + VID_VACTIVE_LINES);
writel(mode->hdisplay, base + VID_PKT_SIZE);
DRM_DEBUG_DRIVER("htot=%d, hfp=%d, hbp=%d, hsw=%d\n",
htot, hfp, hbp, hsw);
DRM_DEBUG_DRIVER("vtol=%d, vfp=%d, vbp=%d, vsw=%d\n",
vtot, vfp, vbp, vsw);
DRM_DEBUG_DRIVER("hsa_time=%d, hbp_time=%d, hline_time=%d\n",
hsa_time, hbp_time, hline_time);
}
static void dsi_set_video_mode(void __iomem *base, unsigned long flags)
{
u32 val;
u32 mode_mask = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_BURST |
MIPI_DSI_MODE_VIDEO_SYNC_PULSE;
u32 non_burst_sync_pulse = MIPI_DSI_MODE_VIDEO |
MIPI_DSI_MODE_VIDEO_SYNC_PULSE;
u32 non_burst_sync_event = MIPI_DSI_MODE_VIDEO;
/*
* choose video mode type
*/
if ((flags & mode_mask) == non_burst_sync_pulse)
val = DSI_NON_BURST_SYNC_PULSES;
else if ((flags & mode_mask) == non_burst_sync_event)
val = DSI_NON_BURST_SYNC_EVENTS;
else
val = DSI_BURST_SYNC_PULSES_1;
writel(val, base + VID_MODE_CFG);
writel(PHY_TXREQUESTCLKHS, base + LPCLK_CTRL);
writel(DSI_VIDEO_MODE, base + MODE_CFG);
}
static void dsi_mipi_init(struct dw_dsi *dsi)
{
struct dsi_hw_ctx *ctx = dsi->ctx;
struct mipi_phy_params *phy = &dsi->phy;
struct drm_display_mode *mode = &dsi->cur_mode;
u32 bpp = mipi_dsi_pixel_format_to_bpp(dsi->format);
void __iomem *base = ctx->base;
u32 dphy_req_kHz;
/*
* count phy params
*/
dphy_req_kHz = mode->clock * bpp / dsi->lanes;
dsi_get_phy_params(dphy_req_kHz, phy);
/* reset Core */
writel(RESET, base + PWR_UP);
/* set dsi phy params */
dsi_set_mipi_phy(base, phy, dsi->lanes);
/* set dsi mode timing */
dsi_set_mode_timing(base, phy->lane_byte_clk_kHz, mode, dsi->format);
/* set dsi video mode */
dsi_set_video_mode(base, dsi->mode_flags);
/* dsi wake up */
writel(POWERUP, base + PWR_UP);
DRM_DEBUG_DRIVER("lanes=%d, pixel_clk=%d kHz, bytes_freq=%d kHz\n",
dsi->lanes, mode->clock, phy->lane_byte_clk_kHz);
}
static void dsi_encoder_disable(struct drm_encoder *encoder)
{
struct dw_dsi *dsi = encoder_to_dsi(encoder);
struct dsi_hw_ctx *ctx = dsi->ctx;
void __iomem *base = ctx->base;
if (!dsi->enable)
return;
writel(0, base + PWR_UP);
writel(0, base + LPCLK_CTRL);
writel(0, base + PHY_RSTZ);
clk_disable_unprepare(ctx->pclk);
dsi->enable = false;
}
static void dsi_encoder_enable(struct drm_encoder *encoder)
{
struct dw_dsi *dsi = encoder_to_dsi(encoder);
struct dsi_hw_ctx *ctx = dsi->ctx;
int ret;
if (dsi->enable)
return;
ret = clk_prepare_enable(ctx->pclk);
if (ret) {
DRM_ERROR("fail to enable pclk: %d\n", ret);
return;
}
dsi_mipi_init(dsi);
dsi->enable = true;
}
static enum drm_mode_status dsi_encoder_phy_mode_valid(
struct drm_encoder *encoder,
const struct drm_display_mode *mode)
{
struct dw_dsi *dsi = encoder_to_dsi(encoder);
struct mipi_phy_params phy;
u32 bpp = mipi_dsi_pixel_format_to_bpp(dsi->format);
u32 req_kHz, act_kHz, lane_byte_clk_kHz;
/* Calculate the lane byte clk using the adjusted mode clk */
memset(&phy, 0, sizeof(phy));
req_kHz = mode->clock * bpp / dsi->lanes;
act_kHz = dsi_calc_phy_rate(req_kHz, &phy);
lane_byte_clk_kHz = act_kHz / 8;
DRM_DEBUG_DRIVER("Checking mode %ix%i-%i@%i clock: %i...",
mode->hdisplay, mode->vdisplay, bpp,
drm_mode_vrefresh(mode), mode->clock);
/*
* Make sure the adjusted mode clock and the lane byte clk
* have a common denominator base frequency
*/
if (mode->clock/dsi->lanes == lane_byte_clk_kHz/3) {
DRM_DEBUG_DRIVER("OK!\n");
return MODE_OK;
}
DRM_DEBUG_DRIVER("BAD!\n");
return MODE_BAD;
}
static enum drm_mode_status dsi_encoder_mode_valid(struct drm_encoder *encoder,
const struct drm_display_mode *mode)
{
const struct drm_crtc_helper_funcs *crtc_funcs = NULL;
struct drm_crtc *crtc = NULL;
struct drm_display_mode adj_mode;
enum drm_mode_status ret;
/*
* The crtc might adjust the mode, so go through the
* possible crtcs (technically just one) and call
* mode_fixup to figure out the adjusted mode before we
* validate it.
*/
drm_for_each_crtc(crtc, encoder->dev) {
/*
* reset adj_mode to the mode value each time,
* so we don't adjust the mode twice
*/
drm_mode_init(&adj_mode, mode);
crtc_funcs = crtc->helper_private;
if (crtc_funcs && crtc_funcs->mode_fixup)
if (!crtc_funcs->mode_fixup(crtc, mode, &adj_mode))
return MODE_BAD;
ret = dsi_encoder_phy_mode_valid(encoder, &adj_mode);
if (ret != MODE_OK)
return ret;
}
return MODE_OK;
}
static void dsi_encoder_mode_set(struct drm_encoder *encoder,
struct drm_display_mode *mode,
struct drm_display_mode *adj_mode)
{
struct dw_dsi *dsi = encoder_to_dsi(encoder);
drm_mode_copy(&dsi->cur_mode, adj_mode);
}
static int dsi_encoder_atomic_check(struct drm_encoder *encoder,
struct drm_crtc_state *crtc_state,
struct drm_connector_state *conn_state)
{
/* do nothing */
return 0;
}
static const struct drm_encoder_helper_funcs dw_encoder_helper_funcs = {
.atomic_check = dsi_encoder_atomic_check,
.mode_valid = dsi_encoder_mode_valid,
.mode_set = dsi_encoder_mode_set,
.enable = dsi_encoder_enable,
.disable = dsi_encoder_disable
};
static int dw_drm_encoder_init(struct device *dev,
struct drm_device *drm_dev,
struct drm_encoder *encoder)
{
int ret;
u32 crtc_mask = drm_of_find_possible_crtcs(drm_dev, dev->of_node);
if (!crtc_mask) {
DRM_ERROR("failed to find crtc mask\n");
return -EINVAL;
}
encoder->possible_crtcs = crtc_mask;
ret = drm_simple_encoder_init(drm_dev, encoder, DRM_MODE_ENCODER_DSI);
if (ret) {
DRM_ERROR("failed to init dsi encoder\n");
return ret;
}
drm_encoder_helper_add(encoder, &dw_encoder_helper_funcs);
return 0;
}
static const struct component_ops dsi_ops;
static int dsi_host_attach(struct mipi_dsi_host *host,
struct mipi_dsi_device *mdsi)
{
struct dw_dsi *dsi = host_to_dsi(host);
struct device *dev = host->dev;
int ret;
if (mdsi->lanes < 1 || mdsi->lanes > 4) {
DRM_ERROR("dsi device params invalid\n");
return -EINVAL;
}
dsi->lanes = mdsi->lanes;
dsi->format = mdsi->format;
dsi->mode_flags = mdsi->mode_flags;
ret = component_add(dev, &dsi_ops);
if (ret)
return ret;
return 0;
}
static int dsi_host_detach(struct mipi_dsi_host *host,
struct mipi_dsi_device *mdsi)
{
struct device *dev = host->dev;
component_del(dev, &dsi_ops);
return 0;
}
static const struct mipi_dsi_host_ops dsi_host_ops = {
.attach = dsi_host_attach,
.detach = dsi_host_detach,
};
static int dsi_host_init(struct device *dev, struct dw_dsi *dsi)
{
struct mipi_dsi_host *host = &dsi->host;
int ret;
host->dev = dev;
host->ops = &dsi_host_ops;
ret = mipi_dsi_host_register(host);
if (ret) {
DRM_ERROR("failed to register dsi host\n");
return ret;
}
return 0;
}
static int dsi_bridge_init(struct drm_device *dev, struct dw_dsi *dsi)
{
struct drm_encoder *encoder = &dsi->encoder;
struct drm_bridge *bridge;
struct device_node *np = dsi->dev->of_node;
int ret;
/*
* Get the endpoint node. In our case, dsi has one output port1
* to which the external HDMI bridge is connected.
*/
ret = drm_of_find_panel_or_bridge(np, 1, 0, NULL, &bridge);
if (ret)
return ret;
/* associate the bridge to dsi encoder */
return drm_bridge_attach(encoder, bridge, NULL, 0);
}
static int dsi_bind(struct device *dev, struct device *master, void *data)
{
struct dsi_data *ddata = dev_get_drvdata(dev);
struct dw_dsi *dsi = &ddata->dsi;
struct drm_device *drm_dev = data;
int ret;
ret = dw_drm_encoder_init(dev, drm_dev, &dsi->encoder);
if (ret)
return ret;
ret = dsi_bridge_init(drm_dev, dsi);
if (ret)
return ret;
return 0;
}
static void dsi_unbind(struct device *dev, struct device *master, void *data)
{
/* do nothing */
}
static const struct component_ops dsi_ops = {
.bind = dsi_bind,
.unbind = dsi_unbind,
};
static int dsi_parse_dt(struct platform_device *pdev, struct dw_dsi *dsi)
{
struct dsi_hw_ctx *ctx = dsi->ctx;
struct resource *res;
ctx->pclk = devm_clk_get(&pdev->dev, "pclk");
if (IS_ERR(ctx->pclk)) {
DRM_ERROR("failed to get pclk clock\n");
return PTR_ERR(ctx->pclk);
}
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
ctx->base = devm_ioremap_resource(&pdev->dev, res);
if (IS_ERR(ctx->base)) {
DRM_ERROR("failed to remap dsi io region\n");
return PTR_ERR(ctx->base);
}
return 0;
}
static int dsi_probe(struct platform_device *pdev)
{
struct dsi_data *data;
struct dw_dsi *dsi;
struct dsi_hw_ctx *ctx;
int ret;
data = devm_kzalloc(&pdev->dev, sizeof(*data), GFP_KERNEL);
if (!data) {
DRM_ERROR("failed to allocate dsi data.\n");
return -ENOMEM;
}
dsi = &data->dsi;
ctx = &data->ctx;
dsi->ctx = ctx;
dsi->dev = &pdev->dev;
ret = dsi_parse_dt(pdev, dsi);
if (ret)
return ret;
platform_set_drvdata(pdev, data);
ret = dsi_host_init(&pdev->dev, dsi);
if (ret)
return ret;
return 0;
}
static void dsi_remove(struct platform_device *pdev)
{
struct dsi_data *data = platform_get_drvdata(pdev);
struct dw_dsi *dsi = &data->dsi;
mipi_dsi_host_unregister(&dsi->host);
}
static const struct of_device_id dsi_of_match[] = {
{.compatible = "hisilicon,hi6220-dsi"},
{ }
};
MODULE_DEVICE_TABLE(of, dsi_of_match);
static struct platform_driver dsi_driver = {
.probe = dsi_probe,
.remove_new = dsi_remove,
.driver = {
.name = "dw-dsi",
.of_match_table = dsi_of_match,
},
};
module_platform_driver(dsi_driver);
MODULE_AUTHOR("Xinliang Liu <[email protected]>");
MODULE_AUTHOR("Xinliang Liu <[email protected]>");
MODULE_AUTHOR("Xinwei Kong <[email protected]>");
MODULE_DESCRIPTION("DesignWare MIPI DSI Host Controller v1.02 driver");
MODULE_LICENSE("GPL v2");
| linux-master | drivers/gpu/drm/hisilicon/kirin/dw_drm_dsi.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Hisilicon Kirin SoCs drm master driver
*
* Copyright (c) 2016 Linaro Limited.
* Copyright (c) 2014-2016 HiSilicon Limited.
*
* Author:
* Xinliang Liu <[email protected]>
* Xinliang Liu <[email protected]>
* Xinwei Kong <[email protected]>
*/
#include <linux/component.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_graph.h>
#include <linux/platform_device.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_drv.h>
#include <drm/drm_fbdev_generic.h>
#include <drm/drm_gem_dma_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_module.h>
#include <drm/drm_of.h>
#include <drm/drm_probe_helper.h>
#include <drm/drm_vblank.h>
#include "kirin_drm_drv.h"
#define KIRIN_MAX_PLANE 2
struct kirin_drm_private {
struct kirin_crtc crtc;
struct kirin_plane planes[KIRIN_MAX_PLANE];
void *hw_ctx;
};
static int kirin_drm_crtc_init(struct drm_device *dev, struct drm_crtc *crtc,
struct drm_plane *plane,
const struct kirin_drm_data *driver_data)
{
struct device_node *port;
int ret;
/* set crtc port so that
* drm_of_find_possible_crtcs call works
*/
port = of_get_child_by_name(dev->dev->of_node, "port");
if (!port) {
DRM_ERROR("no port node found in %pOF\n", dev->dev->of_node);
return -EINVAL;
}
of_node_put(port);
crtc->port = port;
ret = drm_crtc_init_with_planes(dev, crtc, plane, NULL,
driver_data->crtc_funcs, NULL);
if (ret) {
DRM_ERROR("failed to init crtc.\n");
return ret;
}
drm_crtc_helper_add(crtc, driver_data->crtc_helper_funcs);
return 0;
}
static int kirin_drm_plane_init(struct drm_device *dev, struct drm_plane *plane,
enum drm_plane_type type,
const struct kirin_drm_data *data)
{
int ret = 0;
ret = drm_universal_plane_init(dev, plane, 1, data->plane_funcs,
data->channel_formats,
data->channel_formats_cnt,
NULL, type, NULL);
if (ret) {
DRM_ERROR("fail to init plane, ch=%d\n", 0);
return ret;
}
drm_plane_helper_add(plane, data->plane_helper_funcs);
return 0;
}
static void kirin_drm_private_cleanup(struct drm_device *dev)
{
struct kirin_drm_private *kirin_priv = dev->dev_private;
struct kirin_drm_data *data;
data = (struct kirin_drm_data *)of_device_get_match_data(dev->dev);
if (data->cleanup_hw_ctx)
data->cleanup_hw_ctx(kirin_priv->hw_ctx);
devm_kfree(dev->dev, kirin_priv);
dev->dev_private = NULL;
}
static int kirin_drm_private_init(struct drm_device *dev,
const struct kirin_drm_data *driver_data)
{
struct platform_device *pdev = to_platform_device(dev->dev);
struct kirin_drm_private *kirin_priv;
struct drm_plane *prim_plane;
enum drm_plane_type type;
void *ctx;
int ret;
u32 ch;
kirin_priv = devm_kzalloc(dev->dev, sizeof(*kirin_priv), GFP_KERNEL);
if (!kirin_priv) {
DRM_ERROR("failed to alloc kirin_drm_private\n");
return -ENOMEM;
}
ctx = driver_data->alloc_hw_ctx(pdev, &kirin_priv->crtc.base);
if (IS_ERR(ctx)) {
DRM_ERROR("failed to initialize kirin_priv hw ctx\n");
return -EINVAL;
}
kirin_priv->hw_ctx = ctx;
/*
* plane init
* TODO: Now only support primary plane, overlay planes
* need to do.
*/
for (ch = 0; ch < driver_data->num_planes; ch++) {
if (ch == driver_data->prim_plane)
type = DRM_PLANE_TYPE_PRIMARY;
else
type = DRM_PLANE_TYPE_OVERLAY;
ret = kirin_drm_plane_init(dev, &kirin_priv->planes[ch].base,
type, driver_data);
if (ret)
return ret;
kirin_priv->planes[ch].ch = ch;
kirin_priv->planes[ch].hw_ctx = ctx;
}
/* crtc init */
prim_plane = &kirin_priv->planes[driver_data->prim_plane].base;
ret = kirin_drm_crtc_init(dev, &kirin_priv->crtc.base,
prim_plane, driver_data);
if (ret)
return ret;
kirin_priv->crtc.hw_ctx = ctx;
dev->dev_private = kirin_priv;
return 0;
}
static int kirin_drm_kms_init(struct drm_device *dev,
const struct kirin_drm_data *driver_data)
{
int ret;
/* dev->mode_config initialization */
drm_mode_config_init(dev);
dev->mode_config.min_width = 0;
dev->mode_config.min_height = 0;
dev->mode_config.max_width = driver_data->config_max_width;
dev->mode_config.max_height = driver_data->config_max_width;
dev->mode_config.funcs = driver_data->mode_config_funcs;
/* display controller init */
ret = kirin_drm_private_init(dev, driver_data);
if (ret)
goto err_mode_config_cleanup;
/* bind and init sub drivers */
ret = component_bind_all(dev->dev, dev);
if (ret) {
DRM_ERROR("failed to bind all component.\n");
goto err_private_cleanup;
}
/* vblank init */
ret = drm_vblank_init(dev, dev->mode_config.num_crtc);
if (ret) {
DRM_ERROR("failed to initialize vblank.\n");
goto err_unbind_all;
}
/* reset all the states of crtc/plane/encoder/connector */
drm_mode_config_reset(dev);
/* init kms poll for handling hpd */
drm_kms_helper_poll_init(dev);
return 0;
err_unbind_all:
component_unbind_all(dev->dev, dev);
err_private_cleanup:
kirin_drm_private_cleanup(dev);
err_mode_config_cleanup:
drm_mode_config_cleanup(dev);
return ret;
}
static int kirin_drm_kms_cleanup(struct drm_device *dev)
{
drm_kms_helper_poll_fini(dev);
kirin_drm_private_cleanup(dev);
drm_mode_config_cleanup(dev);
return 0;
}
static int kirin_drm_bind(struct device *dev)
{
struct kirin_drm_data *driver_data;
struct drm_device *drm_dev;
int ret;
driver_data = (struct kirin_drm_data *)of_device_get_match_data(dev);
if (!driver_data)
return -EINVAL;
drm_dev = drm_dev_alloc(driver_data->driver, dev);
if (IS_ERR(drm_dev))
return PTR_ERR(drm_dev);
dev_set_drvdata(dev, drm_dev);
/* display controller init */
ret = kirin_drm_kms_init(drm_dev, driver_data);
if (ret)
goto err_drm_dev_put;
ret = drm_dev_register(drm_dev, 0);
if (ret)
goto err_kms_cleanup;
drm_fbdev_generic_setup(drm_dev, 32);
return 0;
err_kms_cleanup:
kirin_drm_kms_cleanup(drm_dev);
err_drm_dev_put:
drm_dev_put(drm_dev);
return ret;
}
static void kirin_drm_unbind(struct device *dev)
{
struct drm_device *drm_dev = dev_get_drvdata(dev);
drm_dev_unregister(drm_dev);
kirin_drm_kms_cleanup(drm_dev);
drm_dev_put(drm_dev);
}
static const struct component_master_ops kirin_drm_ops = {
.bind = kirin_drm_bind,
.unbind = kirin_drm_unbind,
};
static int kirin_drm_platform_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct device_node *np = dev->of_node;
struct component_match *match = NULL;
struct device_node *remote;
remote = of_graph_get_remote_node(np, 0, 0);
if (!remote)
return -ENODEV;
drm_of_component_match_add(dev, &match, component_compare_of, remote);
of_node_put(remote);
return component_master_add_with_match(dev, &kirin_drm_ops, match);
}
static void kirin_drm_platform_remove(struct platform_device *pdev)
{
component_master_del(&pdev->dev, &kirin_drm_ops);
}
static const struct of_device_id kirin_drm_dt_ids[] = {
{ .compatible = "hisilicon,hi6220-ade",
.data = &ade_driver_data,
},
{ /* end node */ },
};
MODULE_DEVICE_TABLE(of, kirin_drm_dt_ids);
static struct platform_driver kirin_drm_platform_driver = {
.probe = kirin_drm_platform_probe,
.remove_new = kirin_drm_platform_remove,
.driver = {
.name = "kirin-drm",
.of_match_table = kirin_drm_dt_ids,
},
};
drm_module_platform_driver(kirin_drm_platform_driver);
MODULE_AUTHOR("Xinliang Liu <[email protected]>");
MODULE_AUTHOR("Xinliang Liu <[email protected]>");
MODULE_AUTHOR("Xinwei Kong <[email protected]>");
MODULE_DESCRIPTION("hisilicon Kirin SoCs' DRM master driver");
MODULE_LICENSE("GPL v2");
| linux-master | drivers/gpu/drm/hisilicon/kirin/kirin_drm_drv.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Hisilicon Hi6220 SoC ADE(Advanced Display Engine)'s crtc&plane driver
*
* Copyright (c) 2016 Linaro Limited.
* Copyright (c) 2014-2016 HiSilicon Limited.
*
* Author:
* Xinliang Liu <[email protected]>
* Xinliang Liu <[email protected]>
* Xinwei Kong <[email protected]>
*/
#include <linux/bitops.h>
#include <linux/clk.h>
#include <linux/mfd/syscon.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
#include <linux/reset.h>
#include <video/display_timing.h>
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_crtc.h>
#include <drm/drm_drv.h>
#include <drm/drm_fb_dma_helper.h>
#include <drm/drm_fourcc.h>
#include <drm/drm_framebuffer.h>
#include <drm/drm_gem_dma_helper.h>
#include <drm/drm_probe_helper.h>
#include <drm/drm_vblank.h>
#include <drm/drm_gem_framebuffer_helper.h>
#include "kirin_drm_drv.h"
#include "kirin_ade_reg.h"
#define OUT_OVLY ADE_OVLY2 /* output overlay compositor */
#define ADE_DEBUG 1
struct ade_hw_ctx {
void __iomem *base;
struct regmap *noc_regmap;
struct clk *ade_core_clk;
struct clk *media_noc_clk;
struct clk *ade_pix_clk;
struct reset_control *reset;
bool power_on;
int irq;
struct drm_crtc *crtc;
};
static const struct kirin_format ade_formats[] = {
/* 16bpp RGB: */
{ DRM_FORMAT_RGB565, ADE_RGB_565 },
{ DRM_FORMAT_BGR565, ADE_BGR_565 },
/* 24bpp RGB: */
{ DRM_FORMAT_RGB888, ADE_RGB_888 },
{ DRM_FORMAT_BGR888, ADE_BGR_888 },
/* 32bpp [A]RGB: */
{ DRM_FORMAT_XRGB8888, ADE_XRGB_8888 },
{ DRM_FORMAT_XBGR8888, ADE_XBGR_8888 },
{ DRM_FORMAT_RGBA8888, ADE_RGBA_8888 },
{ DRM_FORMAT_BGRA8888, ADE_BGRA_8888 },
{ DRM_FORMAT_ARGB8888, ADE_ARGB_8888 },
{ DRM_FORMAT_ABGR8888, ADE_ABGR_8888 },
};
static const u32 channel_formats[] = {
/* channel 1,2,3,4 */
DRM_FORMAT_RGB565, DRM_FORMAT_BGR565, DRM_FORMAT_RGB888,
DRM_FORMAT_BGR888, DRM_FORMAT_XRGB8888, DRM_FORMAT_XBGR8888,
DRM_FORMAT_RGBA8888, DRM_FORMAT_BGRA8888, DRM_FORMAT_ARGB8888,
DRM_FORMAT_ABGR8888
};
/* convert from fourcc format to ade format */
static u32 ade_get_format(u32 pixel_format)
{
int i;
for (i = 0; i < ARRAY_SIZE(ade_formats); i++)
if (ade_formats[i].pixel_format == pixel_format)
return ade_formats[i].hw_format;
/* not found */
DRM_ERROR("Not found pixel format!!fourcc_format= %d\n",
pixel_format);
return ADE_FORMAT_UNSUPPORT;
}
static void ade_update_reload_bit(void __iomem *base, u32 bit_num, u32 val)
{
u32 bit_ofst, reg_num;
bit_ofst = bit_num % 32;
reg_num = bit_num / 32;
ade_update_bits(base + ADE_RELOAD_DIS(reg_num), bit_ofst,
MASK(1), !!val);
}
static u32 ade_read_reload_bit(void __iomem *base, u32 bit_num)
{
u32 tmp, bit_ofst, reg_num;
bit_ofst = bit_num % 32;
reg_num = bit_num / 32;
tmp = readl(base + ADE_RELOAD_DIS(reg_num));
return !!(BIT(bit_ofst) & tmp);
}
static void ade_init(struct ade_hw_ctx *ctx)
{
void __iomem *base = ctx->base;
/* enable clk gate */
ade_update_bits(base + ADE_CTRL1, AUTO_CLK_GATE_EN_OFST,
AUTO_CLK_GATE_EN, ADE_ENABLE);
/* clear overlay */
writel(0, base + ADE_OVLY1_TRANS_CFG);
writel(0, base + ADE_OVLY_CTL);
writel(0, base + ADE_OVLYX_CTL(OUT_OVLY));
/* clear reset and reload regs */
writel(MASK(32), base + ADE_SOFT_RST_SEL(0));
writel(MASK(32), base + ADE_SOFT_RST_SEL(1));
writel(MASK(32), base + ADE_RELOAD_DIS(0));
writel(MASK(32), base + ADE_RELOAD_DIS(1));
/*
* for video mode, all the ade registers should
* become effective at frame end.
*/
ade_update_bits(base + ADE_CTRL, FRM_END_START_OFST,
FRM_END_START_MASK, REG_EFFECTIVE_IN_ADEEN_FRMEND);
}
static bool ade_crtc_mode_fixup(struct drm_crtc *crtc,
const struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
{
struct kirin_crtc *kcrtc = to_kirin_crtc(crtc);
struct ade_hw_ctx *ctx = kcrtc->hw_ctx;
adjusted_mode->clock =
clk_round_rate(ctx->ade_pix_clk, mode->clock * 1000) / 1000;
return true;
}
static void ade_set_pix_clk(struct ade_hw_ctx *ctx,
struct drm_display_mode *mode,
struct drm_display_mode *adj_mode)
{
u32 clk_Hz = mode->clock * 1000;
int ret;
/*
* Success should be guaranteed in mode_valid call back,
* so failure shouldn't happen here
*/
ret = clk_set_rate(ctx->ade_pix_clk, clk_Hz);
if (ret)
DRM_ERROR("failed to set pixel clk %dHz (%d)\n", clk_Hz, ret);
adj_mode->clock = clk_get_rate(ctx->ade_pix_clk) / 1000;
}
static void ade_ldi_set_mode(struct ade_hw_ctx *ctx,
struct drm_display_mode *mode,
struct drm_display_mode *adj_mode)
{
void __iomem *base = ctx->base;
u32 width = mode->hdisplay;
u32 height = mode->vdisplay;
u32 hfp, hbp, hsw, vfp, vbp, vsw;
u32 plr_flags;
plr_flags = (mode->flags & DRM_MODE_FLAG_NVSYNC) ? FLAG_NVSYNC : 0;
plr_flags |= (mode->flags & DRM_MODE_FLAG_NHSYNC) ? FLAG_NHSYNC : 0;
hfp = mode->hsync_start - mode->hdisplay;
hbp = mode->htotal - mode->hsync_end;
hsw = mode->hsync_end - mode->hsync_start;
vfp = mode->vsync_start - mode->vdisplay;
vbp = mode->vtotal - mode->vsync_end;
vsw = mode->vsync_end - mode->vsync_start;
if (vsw > 15) {
DRM_DEBUG_DRIVER("vsw exceeded 15\n");
vsw = 15;
}
writel((hbp << HBP_OFST) | hfp, base + LDI_HRZ_CTRL0);
/* the configured value is actual value - 1 */
writel(hsw - 1, base + LDI_HRZ_CTRL1);
writel((vbp << VBP_OFST) | vfp, base + LDI_VRT_CTRL0);
/* the configured value is actual value - 1 */
writel(vsw - 1, base + LDI_VRT_CTRL1);
/* the configured value is actual value - 1 */
writel(((height - 1) << VSIZE_OFST) | (width - 1),
base + LDI_DSP_SIZE);
writel(plr_flags, base + LDI_PLR_CTRL);
/* set overlay compositor output size */
writel(((width - 1) << OUTPUT_XSIZE_OFST) | (height - 1),
base + ADE_OVLY_OUTPUT_SIZE(OUT_OVLY));
/* ctran6 setting */
writel(CTRAN_BYPASS_ON, base + ADE_CTRAN_DIS(ADE_CTRAN6));
/* the configured value is actual value - 1 */
writel(width * height - 1, base + ADE_CTRAN_IMAGE_SIZE(ADE_CTRAN6));
ade_update_reload_bit(base, CTRAN_OFST + ADE_CTRAN6, 0);
ade_set_pix_clk(ctx, mode, adj_mode);
DRM_DEBUG_DRIVER("set mode: %dx%d\n", width, height);
}
static int ade_power_up(struct ade_hw_ctx *ctx)
{
int ret;
ret = clk_prepare_enable(ctx->media_noc_clk);
if (ret) {
DRM_ERROR("failed to enable media_noc_clk (%d)\n", ret);
return ret;
}
ret = reset_control_deassert(ctx->reset);
if (ret) {
DRM_ERROR("failed to deassert reset\n");
return ret;
}
ret = clk_prepare_enable(ctx->ade_core_clk);
if (ret) {
DRM_ERROR("failed to enable ade_core_clk (%d)\n", ret);
return ret;
}
ade_init(ctx);
ctx->power_on = true;
return 0;
}
static void ade_power_down(struct ade_hw_ctx *ctx)
{
void __iomem *base = ctx->base;
writel(ADE_DISABLE, base + LDI_CTRL);
/* dsi pixel off */
writel(DSI_PCLK_OFF, base + LDI_HDMI_DSI_GT);
clk_disable_unprepare(ctx->ade_core_clk);
reset_control_assert(ctx->reset);
clk_disable_unprepare(ctx->media_noc_clk);
ctx->power_on = false;
}
static void ade_set_medianoc_qos(struct ade_hw_ctx *ctx)
{
struct regmap *map = ctx->noc_regmap;
regmap_update_bits(map, ADE0_QOSGENERATOR_MODE,
QOSGENERATOR_MODE_MASK, BYPASS_MODE);
regmap_update_bits(map, ADE0_QOSGENERATOR_EXTCONTROL,
SOCKET_QOS_EN, SOCKET_QOS_EN);
regmap_update_bits(map, ADE1_QOSGENERATOR_MODE,
QOSGENERATOR_MODE_MASK, BYPASS_MODE);
regmap_update_bits(map, ADE1_QOSGENERATOR_EXTCONTROL,
SOCKET_QOS_EN, SOCKET_QOS_EN);
}
static int ade_crtc_enable_vblank(struct drm_crtc *crtc)
{
struct kirin_crtc *kcrtc = to_kirin_crtc(crtc);
struct ade_hw_ctx *ctx = kcrtc->hw_ctx;
void __iomem *base = ctx->base;
if (!ctx->power_on)
(void)ade_power_up(ctx);
ade_update_bits(base + LDI_INT_EN, FRAME_END_INT_EN_OFST,
MASK(1), 1);
return 0;
}
static void ade_crtc_disable_vblank(struct drm_crtc *crtc)
{
struct kirin_crtc *kcrtc = to_kirin_crtc(crtc);
struct ade_hw_ctx *ctx = kcrtc->hw_ctx;
void __iomem *base = ctx->base;
if (!ctx->power_on) {
DRM_ERROR("power is down! vblank disable fail\n");
return;
}
ade_update_bits(base + LDI_INT_EN, FRAME_END_INT_EN_OFST,
MASK(1), 0);
}
static irqreturn_t ade_irq_handler(int irq, void *data)
{
struct ade_hw_ctx *ctx = data;
struct drm_crtc *crtc = ctx->crtc;
void __iomem *base = ctx->base;
u32 status;
status = readl(base + LDI_MSK_INT);
DRM_DEBUG_VBL("LDI IRQ: status=0x%X\n", status);
/* vblank irq */
if (status & BIT(FRAME_END_INT_EN_OFST)) {
ade_update_bits(base + LDI_INT_CLR, FRAME_END_INT_EN_OFST,
MASK(1), 1);
drm_crtc_handle_vblank(crtc);
}
return IRQ_HANDLED;
}
static void ade_display_enable(struct ade_hw_ctx *ctx)
{
void __iomem *base = ctx->base;
u32 out_fmt = LDI_OUT_RGB_888;
/* enable output overlay compositor */
writel(ADE_ENABLE, base + ADE_OVLYX_CTL(OUT_OVLY));
ade_update_reload_bit(base, OVLY_OFST + OUT_OVLY, 0);
/* display source setting */
writel(DISP_SRC_OVLY2, base + ADE_DISP_SRC_CFG);
/* enable ade */
writel(ADE_ENABLE, base + ADE_EN);
/* enable ldi */
writel(NORMAL_MODE, base + LDI_WORK_MODE);
writel((out_fmt << BPP_OFST) | DATA_GATE_EN | LDI_EN,
base + LDI_CTRL);
/* dsi pixel on */
writel(DSI_PCLK_ON, base + LDI_HDMI_DSI_GT);
}
#if ADE_DEBUG
static void ade_rdma_dump_regs(void __iomem *base, u32 ch)
{
u32 reg_ctrl, reg_addr, reg_size, reg_stride, reg_space, reg_en;
u32 val;
reg_ctrl = RD_CH_CTRL(ch);
reg_addr = RD_CH_ADDR(ch);
reg_size = RD_CH_SIZE(ch);
reg_stride = RD_CH_STRIDE(ch);
reg_space = RD_CH_SPACE(ch);
reg_en = RD_CH_EN(ch);
val = ade_read_reload_bit(base, RDMA_OFST + ch);
DRM_DEBUG_DRIVER("[rdma%d]: reload(%d)\n", ch + 1, val);
val = readl(base + reg_ctrl);
DRM_DEBUG_DRIVER("[rdma%d]: reg_ctrl(0x%08x)\n", ch + 1, val);
val = readl(base + reg_addr);
DRM_DEBUG_DRIVER("[rdma%d]: reg_addr(0x%08x)\n", ch + 1, val);
val = readl(base + reg_size);
DRM_DEBUG_DRIVER("[rdma%d]: reg_size(0x%08x)\n", ch + 1, val);
val = readl(base + reg_stride);
DRM_DEBUG_DRIVER("[rdma%d]: reg_stride(0x%08x)\n", ch + 1, val);
val = readl(base + reg_space);
DRM_DEBUG_DRIVER("[rdma%d]: reg_space(0x%08x)\n", ch + 1, val);
val = readl(base + reg_en);
DRM_DEBUG_DRIVER("[rdma%d]: reg_en(0x%08x)\n", ch + 1, val);
}
static void ade_clip_dump_regs(void __iomem *base, u32 ch)
{
u32 val;
val = ade_read_reload_bit(base, CLIP_OFST + ch);
DRM_DEBUG_DRIVER("[clip%d]: reload(%d)\n", ch + 1, val);
val = readl(base + ADE_CLIP_DISABLE(ch));
DRM_DEBUG_DRIVER("[clip%d]: reg_clip_disable(0x%08x)\n", ch + 1, val);
val = readl(base + ADE_CLIP_SIZE0(ch));
DRM_DEBUG_DRIVER("[clip%d]: reg_clip_size0(0x%08x)\n", ch + 1, val);
val = readl(base + ADE_CLIP_SIZE1(ch));
DRM_DEBUG_DRIVER("[clip%d]: reg_clip_size1(0x%08x)\n", ch + 1, val);
}
static void ade_compositor_routing_dump_regs(void __iomem *base, u32 ch)
{
u8 ovly_ch = 0; /* TODO: Only primary plane now */
u32 val;
val = readl(base + ADE_OVLY_CH_XY0(ovly_ch));
DRM_DEBUG_DRIVER("[overlay ch%d]: reg_ch_xy0(0x%08x)\n", ovly_ch, val);
val = readl(base + ADE_OVLY_CH_XY1(ovly_ch));
DRM_DEBUG_DRIVER("[overlay ch%d]: reg_ch_xy1(0x%08x)\n", ovly_ch, val);
val = readl(base + ADE_OVLY_CH_CTL(ovly_ch));
DRM_DEBUG_DRIVER("[overlay ch%d]: reg_ch_ctl(0x%08x)\n", ovly_ch, val);
}
static void ade_dump_overlay_compositor_regs(void __iomem *base, u32 comp)
{
u32 val;
val = ade_read_reload_bit(base, OVLY_OFST + comp);
DRM_DEBUG_DRIVER("[overlay%d]: reload(%d)\n", comp + 1, val);
writel(ADE_ENABLE, base + ADE_OVLYX_CTL(comp));
DRM_DEBUG_DRIVER("[overlay%d]: reg_ctl(0x%08x)\n", comp + 1, val);
val = readl(base + ADE_OVLY_CTL);
DRM_DEBUG_DRIVER("ovly_ctl(0x%08x)\n", val);
}
static void ade_dump_regs(void __iomem *base)
{
u32 i;
/* dump channel regs */
for (i = 0; i < ADE_CH_NUM; i++) {
/* dump rdma regs */
ade_rdma_dump_regs(base, i);
/* dump clip regs */
ade_clip_dump_regs(base, i);
/* dump compositor routing regs */
ade_compositor_routing_dump_regs(base, i);
}
/* dump overlay compositor regs */
ade_dump_overlay_compositor_regs(base, OUT_OVLY);
}
#else
static void ade_dump_regs(void __iomem *base) { }
#endif
static void ade_crtc_atomic_enable(struct drm_crtc *crtc,
struct drm_atomic_state *state)
{
struct kirin_crtc *kcrtc = to_kirin_crtc(crtc);
struct ade_hw_ctx *ctx = kcrtc->hw_ctx;
int ret;
if (kcrtc->enable)
return;
if (!ctx->power_on) {
ret = ade_power_up(ctx);
if (ret)
return;
}
ade_set_medianoc_qos(ctx);
ade_display_enable(ctx);
ade_dump_regs(ctx->base);
drm_crtc_vblank_on(crtc);
kcrtc->enable = true;
}
static void ade_crtc_atomic_disable(struct drm_crtc *crtc,
struct drm_atomic_state *state)
{
struct kirin_crtc *kcrtc = to_kirin_crtc(crtc);
struct ade_hw_ctx *ctx = kcrtc->hw_ctx;
if (!kcrtc->enable)
return;
drm_crtc_vblank_off(crtc);
ade_power_down(ctx);
kcrtc->enable = false;
}
static void ade_crtc_mode_set_nofb(struct drm_crtc *crtc)
{
struct kirin_crtc *kcrtc = to_kirin_crtc(crtc);
struct ade_hw_ctx *ctx = kcrtc->hw_ctx;
struct drm_display_mode *mode = &crtc->state->mode;
struct drm_display_mode *adj_mode = &crtc->state->adjusted_mode;
if (!ctx->power_on)
(void)ade_power_up(ctx);
ade_ldi_set_mode(ctx, mode, adj_mode);
}
static void ade_crtc_atomic_begin(struct drm_crtc *crtc,
struct drm_atomic_state *state)
{
struct kirin_crtc *kcrtc = to_kirin_crtc(crtc);
struct ade_hw_ctx *ctx = kcrtc->hw_ctx;
struct drm_display_mode *mode = &crtc->state->mode;
struct drm_display_mode *adj_mode = &crtc->state->adjusted_mode;
if (!ctx->power_on)
(void)ade_power_up(ctx);
ade_ldi_set_mode(ctx, mode, adj_mode);
}
static void ade_crtc_atomic_flush(struct drm_crtc *crtc,
struct drm_atomic_state *state)
{
struct kirin_crtc *kcrtc = to_kirin_crtc(crtc);
struct ade_hw_ctx *ctx = kcrtc->hw_ctx;
struct drm_pending_vblank_event *event = crtc->state->event;
void __iomem *base = ctx->base;
/* only crtc is enabled regs take effect */
if (kcrtc->enable) {
ade_dump_regs(base);
/* flush ade registers */
writel(ADE_ENABLE, base + ADE_EN);
}
if (event) {
crtc->state->event = NULL;
spin_lock_irq(&crtc->dev->event_lock);
if (drm_crtc_vblank_get(crtc) == 0)
drm_crtc_arm_vblank_event(crtc, event);
else
drm_crtc_send_vblank_event(crtc, event);
spin_unlock_irq(&crtc->dev->event_lock);
}
}
static const struct drm_crtc_helper_funcs ade_crtc_helper_funcs = {
.mode_fixup = ade_crtc_mode_fixup,
.mode_set_nofb = ade_crtc_mode_set_nofb,
.atomic_begin = ade_crtc_atomic_begin,
.atomic_flush = ade_crtc_atomic_flush,
.atomic_enable = ade_crtc_atomic_enable,
.atomic_disable = ade_crtc_atomic_disable,
};
static const struct drm_crtc_funcs ade_crtc_funcs = {
.destroy = drm_crtc_cleanup,
.set_config = drm_atomic_helper_set_config,
.page_flip = drm_atomic_helper_page_flip,
.reset = drm_atomic_helper_crtc_reset,
.atomic_duplicate_state = drm_atomic_helper_crtc_duplicate_state,
.atomic_destroy_state = drm_atomic_helper_crtc_destroy_state,
.enable_vblank = ade_crtc_enable_vblank,
.disable_vblank = ade_crtc_disable_vblank,
};
static void ade_rdma_set(void __iomem *base, struct drm_framebuffer *fb,
u32 ch, u32 y, u32 in_h, u32 fmt)
{
struct drm_gem_dma_object *obj = drm_fb_dma_get_gem_obj(fb, 0);
u32 reg_ctrl, reg_addr, reg_size, reg_stride, reg_space, reg_en;
u32 stride = fb->pitches[0];
u32 addr = (u32) obj->dma_addr + y * stride;
DRM_DEBUG_DRIVER("rdma%d: (y=%d, height=%d), stride=%d, paddr=0x%x\n",
ch + 1, y, in_h, stride, (u32) obj->dma_addr);
DRM_DEBUG_DRIVER("addr=0x%x, fb:%dx%d, pixel_format=%d(%p4cc)\n",
addr, fb->width, fb->height, fmt,
&fb->format->format);
/* get reg offset */
reg_ctrl = RD_CH_CTRL(ch);
reg_addr = RD_CH_ADDR(ch);
reg_size = RD_CH_SIZE(ch);
reg_stride = RD_CH_STRIDE(ch);
reg_space = RD_CH_SPACE(ch);
reg_en = RD_CH_EN(ch);
/*
* TODO: set rotation
*/
writel((fmt << 16) & 0x1f0000, base + reg_ctrl);
writel(addr, base + reg_addr);
writel((in_h << 16) | stride, base + reg_size);
writel(stride, base + reg_stride);
writel(in_h * stride, base + reg_space);
writel(ADE_ENABLE, base + reg_en);
ade_update_reload_bit(base, RDMA_OFST + ch, 0);
}
static void ade_rdma_disable(void __iomem *base, u32 ch)
{
u32 reg_en;
/* get reg offset */
reg_en = RD_CH_EN(ch);
writel(0, base + reg_en);
ade_update_reload_bit(base, RDMA_OFST + ch, 1);
}
static void ade_clip_set(void __iomem *base, u32 ch, u32 fb_w, u32 x,
u32 in_w, u32 in_h)
{
u32 disable_val;
u32 clip_left;
u32 clip_right;
/*
* clip width, no need to clip height
*/
if (fb_w == in_w) { /* bypass */
disable_val = 1;
clip_left = 0;
clip_right = 0;
} else {
disable_val = 0;
clip_left = x;
clip_right = fb_w - (x + in_w) - 1;
}
DRM_DEBUG_DRIVER("clip%d: clip_left=%d, clip_right=%d\n",
ch + 1, clip_left, clip_right);
writel(disable_val, base + ADE_CLIP_DISABLE(ch));
writel((fb_w - 1) << 16 | (in_h - 1), base + ADE_CLIP_SIZE0(ch));
writel(clip_left << 16 | clip_right, base + ADE_CLIP_SIZE1(ch));
ade_update_reload_bit(base, CLIP_OFST + ch, 0);
}
static void ade_clip_disable(void __iomem *base, u32 ch)
{
writel(1, base + ADE_CLIP_DISABLE(ch));
ade_update_reload_bit(base, CLIP_OFST + ch, 1);
}
static bool has_Alpha_channel(int format)
{
switch (format) {
case ADE_ARGB_8888:
case ADE_ABGR_8888:
case ADE_RGBA_8888:
case ADE_BGRA_8888:
return true;
default:
return false;
}
}
static void ade_get_blending_params(u32 fmt, u8 glb_alpha, u8 *alp_mode,
u8 *alp_sel, u8 *under_alp_sel)
{
bool has_alpha = has_Alpha_channel(fmt);
/*
* get alp_mode
*/
if (has_alpha && glb_alpha < 255)
*alp_mode = ADE_ALP_PIXEL_AND_GLB;
else if (has_alpha)
*alp_mode = ADE_ALP_PIXEL;
else
*alp_mode = ADE_ALP_GLOBAL;
/*
* get alp sel
*/
*alp_sel = ADE_ALP_MUL_COEFF_3; /* 1 */
*under_alp_sel = ADE_ALP_MUL_COEFF_2; /* 0 */
}
static void ade_compositor_routing_set(void __iomem *base, u8 ch,
u32 x0, u32 y0,
u32 in_w, u32 in_h, u32 fmt)
{
u8 ovly_ch = 0; /* TODO: This is the zpos, only one plane now */
u8 glb_alpha = 255;
u32 x1 = x0 + in_w - 1;
u32 y1 = y0 + in_h - 1;
u32 val;
u8 alp_sel;
u8 under_alp_sel;
u8 alp_mode;
ade_get_blending_params(fmt, glb_alpha, &alp_mode, &alp_sel,
&under_alp_sel);
/* overlay routing setting
*/
writel(x0 << 16 | y0, base + ADE_OVLY_CH_XY0(ovly_ch));
writel(x1 << 16 | y1, base + ADE_OVLY_CH_XY1(ovly_ch));
val = (ch + 1) << CH_SEL_OFST | BIT(CH_EN_OFST) |
alp_sel << CH_ALP_SEL_OFST |
under_alp_sel << CH_UNDER_ALP_SEL_OFST |
glb_alpha << CH_ALP_GBL_OFST |
alp_mode << CH_ALP_MODE_OFST;
writel(val, base + ADE_OVLY_CH_CTL(ovly_ch));
/* connect this plane/channel to overlay2 compositor */
ade_update_bits(base + ADE_OVLY_CTL, CH_OVLY_SEL_OFST(ovly_ch),
CH_OVLY_SEL_MASK, CH_OVLY_SEL_VAL(OUT_OVLY));
}
static void ade_compositor_routing_disable(void __iomem *base, u32 ch)
{
u8 ovly_ch = 0; /* TODO: Only primary plane now */
/* disable this plane/channel */
ade_update_bits(base + ADE_OVLY_CH_CTL(ovly_ch), CH_EN_OFST,
MASK(1), 0);
/* dis-connect this plane/channel of overlay2 compositor */
ade_update_bits(base + ADE_OVLY_CTL, CH_OVLY_SEL_OFST(ovly_ch),
CH_OVLY_SEL_MASK, 0);
}
/*
* Typicaly, a channel looks like: DMA-->clip-->scale-->ctrans-->compositor
*/
static void ade_update_channel(struct kirin_plane *kplane,
struct drm_framebuffer *fb, int crtc_x,
int crtc_y, unsigned int crtc_w,
unsigned int crtc_h, u32 src_x,
u32 src_y, u32 src_w, u32 src_h)
{
struct ade_hw_ctx *ctx = kplane->hw_ctx;
void __iomem *base = ctx->base;
u32 fmt = ade_get_format(fb->format->format);
u32 ch = kplane->ch;
u32 in_w;
u32 in_h;
DRM_DEBUG_DRIVER("channel%d: src:(%d, %d)-%dx%d, crtc:(%d, %d)-%dx%d",
ch + 1, src_x, src_y, src_w, src_h,
crtc_x, crtc_y, crtc_w, crtc_h);
/* 1) DMA setting */
in_w = src_w;
in_h = src_h;
ade_rdma_set(base, fb, ch, src_y, in_h, fmt);
/* 2) clip setting */
ade_clip_set(base, ch, fb->width, src_x, in_w, in_h);
/* 3) TODO: scale setting for overlay planes */
/* 4) TODO: ctran/csc setting for overlay planes */
/* 5) compositor routing setting */
ade_compositor_routing_set(base, ch, crtc_x, crtc_y, in_w, in_h, fmt);
}
static void ade_disable_channel(struct kirin_plane *kplane)
{
struct ade_hw_ctx *ctx = kplane->hw_ctx;
void __iomem *base = ctx->base;
u32 ch = kplane->ch;
DRM_DEBUG_DRIVER("disable channel%d\n", ch + 1);
/* disable read DMA */
ade_rdma_disable(base, ch);
/* disable clip */
ade_clip_disable(base, ch);
/* disable compositor routing */
ade_compositor_routing_disable(base, ch);
}
static int ade_plane_atomic_check(struct drm_plane *plane,
struct drm_atomic_state *state)
{
struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state,
plane);
struct drm_framebuffer *fb = new_plane_state->fb;
struct drm_crtc *crtc = new_plane_state->crtc;
struct drm_crtc_state *crtc_state;
u32 src_x = new_plane_state->src_x >> 16;
u32 src_y = new_plane_state->src_y >> 16;
u32 src_w = new_plane_state->src_w >> 16;
u32 src_h = new_plane_state->src_h >> 16;
int crtc_x = new_plane_state->crtc_x;
int crtc_y = new_plane_state->crtc_y;
u32 crtc_w = new_plane_state->crtc_w;
u32 crtc_h = new_plane_state->crtc_h;
u32 fmt;
if (!crtc || !fb)
return 0;
fmt = ade_get_format(fb->format->format);
if (fmt == ADE_FORMAT_UNSUPPORT)
return -EINVAL;
crtc_state = drm_atomic_get_crtc_state(state, crtc);
if (IS_ERR(crtc_state))
return PTR_ERR(crtc_state);
if (src_w != crtc_w || src_h != crtc_h) {
return -EINVAL;
}
if (src_x + src_w > fb->width ||
src_y + src_h > fb->height)
return -EINVAL;
if (crtc_x < 0 || crtc_y < 0)
return -EINVAL;
if (crtc_x + crtc_w > crtc_state->adjusted_mode.hdisplay ||
crtc_y + crtc_h > crtc_state->adjusted_mode.vdisplay)
return -EINVAL;
return 0;
}
static void ade_plane_atomic_update(struct drm_plane *plane,
struct drm_atomic_state *state)
{
struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state,
plane);
struct kirin_plane *kplane = to_kirin_plane(plane);
ade_update_channel(kplane, new_state->fb, new_state->crtc_x,
new_state->crtc_y,
new_state->crtc_w, new_state->crtc_h,
new_state->src_x >> 16, new_state->src_y >> 16,
new_state->src_w >> 16, new_state->src_h >> 16);
}
static void ade_plane_atomic_disable(struct drm_plane *plane,
struct drm_atomic_state *state)
{
struct kirin_plane *kplane = to_kirin_plane(plane);
ade_disable_channel(kplane);
}
static const struct drm_plane_helper_funcs ade_plane_helper_funcs = {
.atomic_check = ade_plane_atomic_check,
.atomic_update = ade_plane_atomic_update,
.atomic_disable = ade_plane_atomic_disable,
};
static struct drm_plane_funcs ade_plane_funcs = {
.update_plane = drm_atomic_helper_update_plane,
.disable_plane = drm_atomic_helper_disable_plane,
.destroy = drm_plane_cleanup,
.reset = drm_atomic_helper_plane_reset,
.atomic_duplicate_state = drm_atomic_helper_plane_duplicate_state,
.atomic_destroy_state = drm_atomic_helper_plane_destroy_state,
};
static void *ade_hw_ctx_alloc(struct platform_device *pdev,
struct drm_crtc *crtc)
{
struct resource *res;
struct device *dev = &pdev->dev;
struct device_node *np = pdev->dev.of_node;
struct ade_hw_ctx *ctx = NULL;
int ret;
ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
if (!ctx) {
DRM_ERROR("failed to alloc ade_hw_ctx\n");
return ERR_PTR(-ENOMEM);
}
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
ctx->base = devm_ioremap_resource(dev, res);
if (IS_ERR(ctx->base)) {
DRM_ERROR("failed to remap ade io base\n");
return ERR_PTR(-EIO);
}
ctx->reset = devm_reset_control_get(dev, NULL);
if (IS_ERR(ctx->reset))
return ERR_PTR(-ENODEV);
ctx->noc_regmap =
syscon_regmap_lookup_by_phandle(np, "hisilicon,noc-syscon");
if (IS_ERR(ctx->noc_regmap)) {
DRM_ERROR("failed to get noc regmap\n");
return ERR_PTR(-ENODEV);
}
ctx->irq = platform_get_irq(pdev, 0);
if (ctx->irq < 0) {
DRM_ERROR("failed to get irq\n");
return ERR_PTR(-ENODEV);
}
ctx->ade_core_clk = devm_clk_get(dev, "clk_ade_core");
if (IS_ERR(ctx->ade_core_clk)) {
DRM_ERROR("failed to parse clk ADE_CORE\n");
return ERR_PTR(-ENODEV);
}
ctx->media_noc_clk = devm_clk_get(dev, "clk_codec_jpeg");
if (IS_ERR(ctx->media_noc_clk)) {
DRM_ERROR("failed to parse clk CODEC_JPEG\n");
return ERR_PTR(-ENODEV);
}
ctx->ade_pix_clk = devm_clk_get(dev, "clk_ade_pix");
if (IS_ERR(ctx->ade_pix_clk)) {
DRM_ERROR("failed to parse clk ADE_PIX\n");
return ERR_PTR(-ENODEV);
}
/* vblank irq init */
ret = devm_request_irq(dev, ctx->irq, ade_irq_handler,
IRQF_SHARED, dev->driver->name, ctx);
if (ret)
return ERR_PTR(-EIO);
ctx->crtc = crtc;
return ctx;
}
static void ade_hw_ctx_cleanup(void *hw_ctx)
{
}
static const struct drm_mode_config_funcs ade_mode_config_funcs = {
.fb_create = drm_gem_fb_create,
.atomic_check = drm_atomic_helper_check,
.atomic_commit = drm_atomic_helper_commit,
};
DEFINE_DRM_GEM_DMA_FOPS(ade_fops);
static const struct drm_driver ade_driver = {
.driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC,
.fops = &ade_fops,
DRM_GEM_DMA_DRIVER_OPS,
.name = "kirin",
.desc = "Hisilicon Kirin620 SoC DRM Driver",
.date = "20150718",
.major = 1,
.minor = 0,
};
struct kirin_drm_data ade_driver_data = {
.num_planes = ADE_CH_NUM,
.prim_plane = ADE_CH1,
.channel_formats = channel_formats,
.channel_formats_cnt = ARRAY_SIZE(channel_formats),
.config_max_width = 2048,
.config_max_height = 2048,
.driver = &ade_driver,
.crtc_helper_funcs = &ade_crtc_helper_funcs,
.crtc_funcs = &ade_crtc_funcs,
.plane_helper_funcs = &ade_plane_helper_funcs,
.plane_funcs = &ade_plane_funcs,
.mode_config_funcs = &ade_mode_config_funcs,
.alloc_hw_ctx = ade_hw_ctx_alloc,
.cleanup_hw_ctx = ade_hw_ctx_cleanup,
};
| linux-master | drivers/gpu/drm/hisilicon/kirin/kirin_drm_ade.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2013 Red Hat
* Author: Rob Clark <[email protected]>
*/
#include "msm_gpu.h"
#include "msm_gpu_trace.h"
#include <linux/devfreq.h>
#include <linux/devfreq_cooling.h>
#include <linux/math64.h>
#include <linux/units.h>
/*
* Power Management:
*/
static int msm_devfreq_target(struct device *dev, unsigned long *freq,
u32 flags)
{
struct msm_gpu *gpu = dev_to_gpu(dev);
struct msm_gpu_devfreq *df = &gpu->devfreq;
struct dev_pm_opp *opp;
/*
* Note that devfreq_recommended_opp() can modify the freq
* to something that actually is in the opp table:
*/
opp = devfreq_recommended_opp(dev, freq, flags);
if (IS_ERR(opp))
return PTR_ERR(opp);
trace_msm_gpu_freq_change(dev_pm_opp_get_freq(opp));
/*
* If the GPU is idle, devfreq is not aware, so just stash
* the new target freq (to use when we return to active)
*/
if (df->idle_freq) {
df->idle_freq = *freq;
dev_pm_opp_put(opp);
return 0;
}
if (gpu->funcs->gpu_set_freq) {
mutex_lock(&df->lock);
gpu->funcs->gpu_set_freq(gpu, opp, df->suspended);
mutex_unlock(&df->lock);
} else {
dev_pm_opp_set_rate(dev, *freq);
}
dev_pm_opp_put(opp);
return 0;
}
static unsigned long get_freq(struct msm_gpu *gpu)
{
struct msm_gpu_devfreq *df = &gpu->devfreq;
/*
* If the GPU is idle, use the shadow/saved freq to avoid
* confusing devfreq (which is unaware that we are switching
* to lowest freq until the device is active again)
*/
if (df->idle_freq)
return df->idle_freq;
if (gpu->funcs->gpu_get_freq)
return gpu->funcs->gpu_get_freq(gpu);
return clk_get_rate(gpu->core_clk);
}
static int msm_devfreq_get_dev_status(struct device *dev,
struct devfreq_dev_status *status)
{
struct msm_gpu *gpu = dev_to_gpu(dev);
struct msm_gpu_devfreq *df = &gpu->devfreq;
u64 busy_cycles, busy_time;
unsigned long sample_rate;
ktime_t time;
mutex_lock(&df->lock);
status->current_frequency = get_freq(gpu);
time = ktime_get();
status->total_time = ktime_us_delta(time, df->time);
df->time = time;
if (df->suspended) {
mutex_unlock(&df->lock);
status->busy_time = 0;
return 0;
}
busy_cycles = gpu->funcs->gpu_busy(gpu, &sample_rate);
busy_time = busy_cycles - df->busy_cycles;
df->busy_cycles = busy_cycles;
mutex_unlock(&df->lock);
busy_time *= USEC_PER_SEC;
busy_time = div64_ul(busy_time, sample_rate);
if (WARN_ON(busy_time > ~0LU))
busy_time = ~0LU;
status->busy_time = busy_time;
return 0;
}
static int msm_devfreq_get_cur_freq(struct device *dev, unsigned long *freq)
{
*freq = get_freq(dev_to_gpu(dev));
return 0;
}
static struct devfreq_dev_profile msm_devfreq_profile = {
.timer = DEVFREQ_TIMER_DELAYED,
.polling_ms = 50,
.target = msm_devfreq_target,
.get_dev_status = msm_devfreq_get_dev_status,
.get_cur_freq = msm_devfreq_get_cur_freq,
};
static void msm_devfreq_boost_work(struct kthread_work *work);
static void msm_devfreq_idle_work(struct kthread_work *work);
static bool has_devfreq(struct msm_gpu *gpu)
{
struct msm_gpu_devfreq *df = &gpu->devfreq;
return !!df->devfreq;
}
void msm_devfreq_init(struct msm_gpu *gpu)
{
struct msm_gpu_devfreq *df = &gpu->devfreq;
struct msm_drm_private *priv = gpu->dev->dev_private;
/* We need target support to do devfreq */
if (!gpu->funcs->gpu_busy)
return;
/*
* Setup default values for simple_ondemand governor tuning. We
* want to throttle up at 50% load for the double-buffer case,
* where due to stalling waiting for vblank we could get stuck
* at (for ex) 30fps at 50% utilization.
*/
priv->gpu_devfreq_config.upthreshold = 50;
priv->gpu_devfreq_config.downdifferential = 10;
mutex_init(&df->lock);
dev_pm_qos_add_request(&gpu->pdev->dev, &df->boost_freq,
DEV_PM_QOS_MIN_FREQUENCY, 0);
msm_devfreq_profile.initial_freq = gpu->fast_rate;
/*
* Don't set the freq_table or max_state and let devfreq build the table
* from OPP
* After a deferred probe, these may have be left to non-zero values,
* so set them back to zero before creating the devfreq device
*/
msm_devfreq_profile.freq_table = NULL;
msm_devfreq_profile.max_state = 0;
df->devfreq = devm_devfreq_add_device(&gpu->pdev->dev,
&msm_devfreq_profile, DEVFREQ_GOV_SIMPLE_ONDEMAND,
&priv->gpu_devfreq_config);
if (IS_ERR(df->devfreq)) {
DRM_DEV_ERROR(&gpu->pdev->dev, "Couldn't initialize GPU devfreq\n");
dev_pm_qos_remove_request(&df->boost_freq);
df->devfreq = NULL;
return;
}
devfreq_suspend_device(df->devfreq);
gpu->cooling = of_devfreq_cooling_register(gpu->pdev->dev.of_node, df->devfreq);
if (IS_ERR(gpu->cooling)) {
DRM_DEV_ERROR(&gpu->pdev->dev,
"Couldn't register GPU cooling device\n");
gpu->cooling = NULL;
}
msm_hrtimer_work_init(&df->boost_work, gpu->worker, msm_devfreq_boost_work,
CLOCK_MONOTONIC, HRTIMER_MODE_REL);
msm_hrtimer_work_init(&df->idle_work, gpu->worker, msm_devfreq_idle_work,
CLOCK_MONOTONIC, HRTIMER_MODE_REL);
}
static void cancel_idle_work(struct msm_gpu_devfreq *df)
{
hrtimer_cancel(&df->idle_work.timer);
kthread_cancel_work_sync(&df->idle_work.work);
}
static void cancel_boost_work(struct msm_gpu_devfreq *df)
{
hrtimer_cancel(&df->boost_work.timer);
kthread_cancel_work_sync(&df->boost_work.work);
}
void msm_devfreq_cleanup(struct msm_gpu *gpu)
{
struct msm_gpu_devfreq *df = &gpu->devfreq;
if (!has_devfreq(gpu))
return;
devfreq_cooling_unregister(gpu->cooling);
dev_pm_qos_remove_request(&df->boost_freq);
}
void msm_devfreq_resume(struct msm_gpu *gpu)
{
struct msm_gpu_devfreq *df = &gpu->devfreq;
unsigned long sample_rate;
if (!has_devfreq(gpu))
return;
mutex_lock(&df->lock);
df->busy_cycles = gpu->funcs->gpu_busy(gpu, &sample_rate);
df->time = ktime_get();
df->suspended = false;
mutex_unlock(&df->lock);
devfreq_resume_device(df->devfreq);
}
void msm_devfreq_suspend(struct msm_gpu *gpu)
{
struct msm_gpu_devfreq *df = &gpu->devfreq;
if (!has_devfreq(gpu))
return;
mutex_lock(&df->lock);
df->suspended = true;
mutex_unlock(&df->lock);
devfreq_suspend_device(df->devfreq);
cancel_idle_work(df);
cancel_boost_work(df);
}
static void msm_devfreq_boost_work(struct kthread_work *work)
{
struct msm_gpu_devfreq *df = container_of(work,
struct msm_gpu_devfreq, boost_work.work);
dev_pm_qos_update_request(&df->boost_freq, 0);
}
void msm_devfreq_boost(struct msm_gpu *gpu, unsigned factor)
{
struct msm_gpu_devfreq *df = &gpu->devfreq;
uint64_t freq;
if (!has_devfreq(gpu))
return;
freq = get_freq(gpu);
freq *= factor;
/*
* A nice little trap is that PM QoS operates in terms of KHz,
* while devfreq operates in terms of Hz:
*/
do_div(freq, HZ_PER_KHZ);
dev_pm_qos_update_request(&df->boost_freq, freq);
msm_hrtimer_queue_work(&df->boost_work,
ms_to_ktime(msm_devfreq_profile.polling_ms),
HRTIMER_MODE_REL);
}
void msm_devfreq_active(struct msm_gpu *gpu)
{
struct msm_gpu_devfreq *df = &gpu->devfreq;
unsigned int idle_time;
unsigned long target_freq;
if (!has_devfreq(gpu))
return;
/*
* Cancel any pending transition to idle frequency:
*/
cancel_idle_work(df);
/*
* Hold devfreq lock to synchronize with get_dev_status()/
* target() callbacks
*/
mutex_lock(&df->devfreq->lock);
target_freq = df->idle_freq;
idle_time = ktime_to_ms(ktime_sub(ktime_get(), df->idle_time));
df->idle_freq = 0;
/*
* We could have become active again before the idle work had a
* chance to run, in which case the df->idle_freq would have
* still been zero. In this case, no need to change freq.
*/
if (target_freq)
msm_devfreq_target(&gpu->pdev->dev, &target_freq, 0);
mutex_unlock(&df->devfreq->lock);
/*
* If we've been idle for a significant fraction of a polling
* interval, then we won't meet the threshold of busyness for
* the governor to ramp up the freq.. so give some boost
*/
if (idle_time > msm_devfreq_profile.polling_ms) {
msm_devfreq_boost(gpu, 2);
}
}
static void msm_devfreq_idle_work(struct kthread_work *work)
{
struct msm_gpu_devfreq *df = container_of(work,
struct msm_gpu_devfreq, idle_work.work);
struct msm_gpu *gpu = container_of(df, struct msm_gpu, devfreq);
struct msm_drm_private *priv = gpu->dev->dev_private;
unsigned long idle_freq, target_freq = 0;
/*
* Hold devfreq lock to synchronize with get_dev_status()/
* target() callbacks
*/
mutex_lock(&df->devfreq->lock);
idle_freq = get_freq(gpu);
if (priv->gpu_clamp_to_idle)
msm_devfreq_target(&gpu->pdev->dev, &target_freq, 0);
df->idle_time = ktime_get();
df->idle_freq = idle_freq;
mutex_unlock(&df->devfreq->lock);
}
void msm_devfreq_idle(struct msm_gpu *gpu)
{
struct msm_gpu_devfreq *df = &gpu->devfreq;
if (!has_devfreq(gpu))
return;
msm_hrtimer_queue_work(&df->idle_work, ms_to_ktime(1),
HRTIMER_MODE_REL);
}
| linux-master | drivers/gpu/drm/msm/msm_gpu_devfreq.c |
/*
* SPDX-License-Identifier: GPL-2.0
* Copyright (c) 2018, The Linux Foundation
*/
#include <linux/clk.h>
#include <linux/delay.h>
#include <linux/interconnect.h>
#include <linux/irq.h>
#include <linux/irqchip.h>
#include <linux/irqdesc.h>
#include <linux/irqchip/chained_irq.h>
#include <linux/of_platform.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/reset.h>
#include "msm_mdss.h"
#include "msm_kms.h"
#define HW_REV 0x0
#define HW_INTR_STATUS 0x0010
#define UBWC_DEC_HW_VERSION 0x58
#define UBWC_STATIC 0x144
#define UBWC_CTRL_2 0x150
#define UBWC_PREDICTION_MODE 0x154
#define MIN_IB_BW 400000000UL /* Min ib vote 400MB */
struct msm_mdss {
struct device *dev;
void __iomem *mmio;
struct clk_bulk_data *clocks;
size_t num_clocks;
bool is_mdp5;
struct {
unsigned long enabled_mask;
struct irq_domain *domain;
} irq_controller;
const struct msm_mdss_data *mdss_data;
struct icc_path *path[2];
u32 num_paths;
};
static int msm_mdss_parse_data_bus_icc_path(struct device *dev,
struct msm_mdss *msm_mdss)
{
struct icc_path *path0;
struct icc_path *path1;
path0 = of_icc_get(dev, "mdp0-mem");
if (IS_ERR_OR_NULL(path0))
return PTR_ERR_OR_ZERO(path0);
msm_mdss->path[0] = path0;
msm_mdss->num_paths = 1;
path1 = of_icc_get(dev, "mdp1-mem");
if (!IS_ERR_OR_NULL(path1)) {
msm_mdss->path[1] = path1;
msm_mdss->num_paths++;
}
return 0;
}
static void msm_mdss_put_icc_path(void *data)
{
struct msm_mdss *msm_mdss = data;
int i;
for (i = 0; i < msm_mdss->num_paths; i++)
icc_put(msm_mdss->path[i]);
}
static void msm_mdss_icc_request_bw(struct msm_mdss *msm_mdss, unsigned long bw)
{
int i;
for (i = 0; i < msm_mdss->num_paths; i++)
icc_set_bw(msm_mdss->path[i], 0, Bps_to_icc(bw));
}
static void msm_mdss_irq(struct irq_desc *desc)
{
struct msm_mdss *msm_mdss = irq_desc_get_handler_data(desc);
struct irq_chip *chip = irq_desc_get_chip(desc);
u32 interrupts;
chained_irq_enter(chip, desc);
interrupts = readl_relaxed(msm_mdss->mmio + HW_INTR_STATUS);
while (interrupts) {
irq_hw_number_t hwirq = fls(interrupts) - 1;
int rc;
rc = generic_handle_domain_irq(msm_mdss->irq_controller.domain,
hwirq);
if (rc < 0) {
dev_err(msm_mdss->dev, "handle irq fail: irq=%lu rc=%d\n",
hwirq, rc);
break;
}
interrupts &= ~(1 << hwirq);
}
chained_irq_exit(chip, desc);
}
static void msm_mdss_irq_mask(struct irq_data *irqd)
{
struct msm_mdss *msm_mdss = irq_data_get_irq_chip_data(irqd);
/* memory barrier */
smp_mb__before_atomic();
clear_bit(irqd->hwirq, &msm_mdss->irq_controller.enabled_mask);
/* memory barrier */
smp_mb__after_atomic();
}
static void msm_mdss_irq_unmask(struct irq_data *irqd)
{
struct msm_mdss *msm_mdss = irq_data_get_irq_chip_data(irqd);
/* memory barrier */
smp_mb__before_atomic();
set_bit(irqd->hwirq, &msm_mdss->irq_controller.enabled_mask);
/* memory barrier */
smp_mb__after_atomic();
}
static struct irq_chip msm_mdss_irq_chip = {
.name = "msm_mdss",
.irq_mask = msm_mdss_irq_mask,
.irq_unmask = msm_mdss_irq_unmask,
};
static struct lock_class_key msm_mdss_lock_key, msm_mdss_request_key;
static int msm_mdss_irqdomain_map(struct irq_domain *domain,
unsigned int irq, irq_hw_number_t hwirq)
{
struct msm_mdss *msm_mdss = domain->host_data;
irq_set_lockdep_class(irq, &msm_mdss_lock_key, &msm_mdss_request_key);
irq_set_chip_and_handler(irq, &msm_mdss_irq_chip, handle_level_irq);
return irq_set_chip_data(irq, msm_mdss);
}
static const struct irq_domain_ops msm_mdss_irqdomain_ops = {
.map = msm_mdss_irqdomain_map,
.xlate = irq_domain_xlate_onecell,
};
static int _msm_mdss_irq_domain_add(struct msm_mdss *msm_mdss)
{
struct device *dev;
struct irq_domain *domain;
dev = msm_mdss->dev;
domain = irq_domain_add_linear(dev->of_node, 32,
&msm_mdss_irqdomain_ops, msm_mdss);
if (!domain) {
dev_err(dev, "failed to add irq_domain\n");
return -EINVAL;
}
msm_mdss->irq_controller.enabled_mask = 0;
msm_mdss->irq_controller.domain = domain;
return 0;
}
static void msm_mdss_setup_ubwc_dec_20(struct msm_mdss *msm_mdss)
{
const struct msm_mdss_data *data = msm_mdss->mdss_data;
writel_relaxed(data->ubwc_static, msm_mdss->mmio + UBWC_STATIC);
}
static void msm_mdss_setup_ubwc_dec_30(struct msm_mdss *msm_mdss)
{
const struct msm_mdss_data *data = msm_mdss->mdss_data;
u32 value = (data->ubwc_swizzle & 0x1) |
(data->highest_bank_bit & 0x3) << 4 |
(data->macrotile_mode & 0x1) << 12;
if (data->ubwc_enc_version == UBWC_3_0)
value |= BIT(10);
if (data->ubwc_enc_version == UBWC_1_0)
value |= BIT(8);
writel_relaxed(value, msm_mdss->mmio + UBWC_STATIC);
}
static void msm_mdss_setup_ubwc_dec_40(struct msm_mdss *msm_mdss)
{
const struct msm_mdss_data *data = msm_mdss->mdss_data;
u32 value = (data->ubwc_swizzle & 0x7) |
(data->ubwc_static & 0x1) << 3 |
(data->highest_bank_bit & 0x7) << 4 |
(data->macrotile_mode & 0x1) << 12;
writel_relaxed(value, msm_mdss->mmio + UBWC_STATIC);
if (data->ubwc_enc_version == UBWC_3_0) {
writel_relaxed(1, msm_mdss->mmio + UBWC_CTRL_2);
writel_relaxed(0, msm_mdss->mmio + UBWC_PREDICTION_MODE);
} else {
if (data->ubwc_dec_version == UBWC_4_3)
writel_relaxed(3, msm_mdss->mmio + UBWC_CTRL_2);
else
writel_relaxed(2, msm_mdss->mmio + UBWC_CTRL_2);
writel_relaxed(1, msm_mdss->mmio + UBWC_PREDICTION_MODE);
}
}
const struct msm_mdss_data *msm_mdss_get_mdss_data(struct device *dev)
{
struct msm_mdss *mdss;
if (!dev)
return ERR_PTR(-EINVAL);
mdss = dev_get_drvdata(dev);
return mdss->mdss_data;
}
static int msm_mdss_enable(struct msm_mdss *msm_mdss)
{
int ret;
/*
* Several components have AXI clocks that can only be turned on if
* the interconnect is enabled (non-zero bandwidth). Let's make sure
* that the interconnects are at least at a minimum amount.
*/
msm_mdss_icc_request_bw(msm_mdss, MIN_IB_BW);
ret = clk_bulk_prepare_enable(msm_mdss->num_clocks, msm_mdss->clocks);
if (ret) {
dev_err(msm_mdss->dev, "clock enable failed, ret:%d\n", ret);
return ret;
}
/*
* Register access requires MDSS_MDP_CLK, which is not enabled by the
* mdss on mdp5 hardware. Skip it for now.
*/
if (msm_mdss->is_mdp5 || !msm_mdss->mdss_data)
return 0;
/*
* ubwc config is part of the "mdss" region which is not accessible
* from the rest of the driver. hardcode known configurations here
*
* Decoder version can be read from the UBWC_DEC_HW_VERSION reg,
* UBWC_n and the rest of params comes from hw data.
*/
switch (msm_mdss->mdss_data->ubwc_dec_version) {
case 0: /* no UBWC */
case UBWC_1_0:
/* do nothing */
break;
case UBWC_2_0:
msm_mdss_setup_ubwc_dec_20(msm_mdss);
break;
case UBWC_3_0:
msm_mdss_setup_ubwc_dec_30(msm_mdss);
break;
case UBWC_4_0:
case UBWC_4_3:
msm_mdss_setup_ubwc_dec_40(msm_mdss);
break;
default:
dev_err(msm_mdss->dev, "Unsupported UBWC decoder version %x\n",
msm_mdss->mdss_data->ubwc_dec_version);
dev_err(msm_mdss->dev, "HW_REV: 0x%x\n",
readl_relaxed(msm_mdss->mmio + HW_REV));
dev_err(msm_mdss->dev, "UBWC_DEC_HW_VERSION: 0x%x\n",
readl_relaxed(msm_mdss->mmio + UBWC_DEC_HW_VERSION));
break;
}
return ret;
}
static int msm_mdss_disable(struct msm_mdss *msm_mdss)
{
clk_bulk_disable_unprepare(msm_mdss->num_clocks, msm_mdss->clocks);
msm_mdss_icc_request_bw(msm_mdss, 0);
return 0;
}
static void msm_mdss_destroy(struct msm_mdss *msm_mdss)
{
struct platform_device *pdev = to_platform_device(msm_mdss->dev);
int irq;
pm_runtime_suspend(msm_mdss->dev);
pm_runtime_disable(msm_mdss->dev);
irq_domain_remove(msm_mdss->irq_controller.domain);
msm_mdss->irq_controller.domain = NULL;
irq = platform_get_irq(pdev, 0);
irq_set_chained_handler_and_data(irq, NULL, NULL);
}
static int msm_mdss_reset(struct device *dev)
{
struct reset_control *reset;
reset = reset_control_get_optional_exclusive(dev, NULL);
if (!reset) {
/* Optional reset not specified */
return 0;
} else if (IS_ERR(reset)) {
return dev_err_probe(dev, PTR_ERR(reset),
"failed to acquire mdss reset\n");
}
reset_control_assert(reset);
/*
* Tests indicate that reset has to be held for some period of time,
* make it one frame in a typical system
*/
msleep(20);
reset_control_deassert(reset);
reset_control_put(reset);
return 0;
}
/*
* MDP5 MDSS uses at most three specified clocks.
*/
#define MDP5_MDSS_NUM_CLOCKS 3
static int mdp5_mdss_parse_clock(struct platform_device *pdev, struct clk_bulk_data **clocks)
{
struct clk_bulk_data *bulk;
int num_clocks = 0;
int ret;
if (!pdev)
return -EINVAL;
bulk = devm_kcalloc(&pdev->dev, MDP5_MDSS_NUM_CLOCKS, sizeof(struct clk_bulk_data), GFP_KERNEL);
if (!bulk)
return -ENOMEM;
bulk[num_clocks++].id = "iface";
bulk[num_clocks++].id = "bus";
bulk[num_clocks++].id = "vsync";
ret = devm_clk_bulk_get_optional(&pdev->dev, num_clocks, bulk);
if (ret)
return ret;
*clocks = bulk;
return num_clocks;
}
static struct msm_mdss *msm_mdss_init(struct platform_device *pdev, bool is_mdp5)
{
struct msm_mdss *msm_mdss;
int ret;
int irq;
ret = msm_mdss_reset(&pdev->dev);
if (ret)
return ERR_PTR(ret);
msm_mdss = devm_kzalloc(&pdev->dev, sizeof(*msm_mdss), GFP_KERNEL);
if (!msm_mdss)
return ERR_PTR(-ENOMEM);
msm_mdss->mmio = devm_platform_ioremap_resource_byname(pdev, is_mdp5 ? "mdss_phys" : "mdss");
if (IS_ERR(msm_mdss->mmio))
return ERR_CAST(msm_mdss->mmio);
dev_dbg(&pdev->dev, "mapped mdss address space @%pK\n", msm_mdss->mmio);
ret = msm_mdss_parse_data_bus_icc_path(&pdev->dev, msm_mdss);
if (ret)
return ERR_PTR(ret);
ret = devm_add_action_or_reset(&pdev->dev, msm_mdss_put_icc_path, msm_mdss);
if (ret)
return ERR_PTR(ret);
if (is_mdp5)
ret = mdp5_mdss_parse_clock(pdev, &msm_mdss->clocks);
else
ret = devm_clk_bulk_get_all(&pdev->dev, &msm_mdss->clocks);
if (ret < 0) {
dev_err(&pdev->dev, "failed to parse clocks, ret=%d\n", ret);
return ERR_PTR(ret);
}
msm_mdss->num_clocks = ret;
msm_mdss->is_mdp5 = is_mdp5;
msm_mdss->dev = &pdev->dev;
irq = platform_get_irq(pdev, 0);
if (irq < 0)
return ERR_PTR(irq);
ret = _msm_mdss_irq_domain_add(msm_mdss);
if (ret)
return ERR_PTR(ret);
irq_set_chained_handler_and_data(irq, msm_mdss_irq,
msm_mdss);
pm_runtime_enable(&pdev->dev);
return msm_mdss;
}
static int __maybe_unused mdss_runtime_suspend(struct device *dev)
{
struct msm_mdss *mdss = dev_get_drvdata(dev);
DBG("");
return msm_mdss_disable(mdss);
}
static int __maybe_unused mdss_runtime_resume(struct device *dev)
{
struct msm_mdss *mdss = dev_get_drvdata(dev);
DBG("");
return msm_mdss_enable(mdss);
}
static int __maybe_unused mdss_pm_suspend(struct device *dev)
{
if (pm_runtime_suspended(dev))
return 0;
return mdss_runtime_suspend(dev);
}
static int __maybe_unused mdss_pm_resume(struct device *dev)
{
if (pm_runtime_suspended(dev))
return 0;
return mdss_runtime_resume(dev);
}
static const struct dev_pm_ops mdss_pm_ops = {
SET_SYSTEM_SLEEP_PM_OPS(mdss_pm_suspend, mdss_pm_resume)
SET_RUNTIME_PM_OPS(mdss_runtime_suspend, mdss_runtime_resume, NULL)
};
static int mdss_probe(struct platform_device *pdev)
{
struct msm_mdss *mdss;
bool is_mdp5 = of_device_is_compatible(pdev->dev.of_node, "qcom,mdss");
struct device *dev = &pdev->dev;
int ret;
mdss = msm_mdss_init(pdev, is_mdp5);
if (IS_ERR(mdss))
return PTR_ERR(mdss);
mdss->mdss_data = of_device_get_match_data(&pdev->dev);
platform_set_drvdata(pdev, mdss);
/*
* MDP5/DPU based devices don't have a flat hierarchy. There is a top
* level parent: MDSS, and children: MDP5/DPU, DSI, HDMI, eDP etc.
* Populate the children devices, find the MDP5/DPU node, and then add
* the interfaces to our components list.
*/
ret = of_platform_populate(dev->of_node, NULL, NULL, dev);
if (ret) {
DRM_DEV_ERROR(dev, "failed to populate children devices\n");
msm_mdss_destroy(mdss);
return ret;
}
return 0;
}
static int mdss_remove(struct platform_device *pdev)
{
struct msm_mdss *mdss = platform_get_drvdata(pdev);
of_platform_depopulate(&pdev->dev);
msm_mdss_destroy(mdss);
return 0;
}
static const struct msm_mdss_data msm8998_data = {
.ubwc_enc_version = UBWC_1_0,
.ubwc_dec_version = UBWC_1_0,
.highest_bank_bit = 1,
};
static const struct msm_mdss_data qcm2290_data = {
/* no UBWC */
.highest_bank_bit = 0x2,
};
static const struct msm_mdss_data sc7180_data = {
.ubwc_enc_version = UBWC_2_0,
.ubwc_dec_version = UBWC_2_0,
.ubwc_static = 0x1e,
.highest_bank_bit = 0x3,
};
static const struct msm_mdss_data sc7280_data = {
.ubwc_enc_version = UBWC_3_0,
.ubwc_dec_version = UBWC_4_0,
.ubwc_swizzle = 6,
.ubwc_static = 1,
.highest_bank_bit = 1,
.macrotile_mode = 1,
};
static const struct msm_mdss_data sc8180x_data = {
.ubwc_enc_version = UBWC_3_0,
.ubwc_dec_version = UBWC_3_0,
.highest_bank_bit = 3,
.macrotile_mode = 1,
};
static const struct msm_mdss_data sc8280xp_data = {
.ubwc_enc_version = UBWC_4_0,
.ubwc_dec_version = UBWC_4_0,
.ubwc_swizzle = 6,
.ubwc_static = 1,
.highest_bank_bit = 2,
.macrotile_mode = 1,
};
static const struct msm_mdss_data sdm845_data = {
.ubwc_enc_version = UBWC_2_0,
.ubwc_dec_version = UBWC_2_0,
.highest_bank_bit = 2,
};
static const struct msm_mdss_data sm6350_data = {
.ubwc_enc_version = UBWC_2_0,
.ubwc_dec_version = UBWC_2_0,
.ubwc_swizzle = 6,
.ubwc_static = 0x1e,
.highest_bank_bit = 1,
};
static const struct msm_mdss_data sm8150_data = {
.ubwc_enc_version = UBWC_3_0,
.ubwc_dec_version = UBWC_3_0,
.highest_bank_bit = 2,
};
static const struct msm_mdss_data sm6115_data = {
.ubwc_enc_version = UBWC_1_0,
.ubwc_dec_version = UBWC_2_0,
.ubwc_swizzle = 7,
.ubwc_static = 0x11f,
.highest_bank_bit = 0x1,
};
static const struct msm_mdss_data sm6125_data = {
.ubwc_enc_version = UBWC_1_0,
.ubwc_dec_version = UBWC_3_0,
.ubwc_swizzle = 1,
.highest_bank_bit = 1,
};
static const struct msm_mdss_data sm8250_data = {
.ubwc_enc_version = UBWC_4_0,
.ubwc_dec_version = UBWC_4_0,
.ubwc_swizzle = 6,
.ubwc_static = 1,
/* TODO: highest_bank_bit = 2 for LP_DDR4 */
.highest_bank_bit = 3,
.macrotile_mode = 1,
};
static const struct msm_mdss_data sm8550_data = {
.ubwc_enc_version = UBWC_4_0,
.ubwc_dec_version = UBWC_4_3,
.ubwc_swizzle = 6,
.ubwc_static = 1,
/* TODO: highest_bank_bit = 2 for LP_DDR4 */
.highest_bank_bit = 3,
.macrotile_mode = 1,
};
static const struct of_device_id mdss_dt_match[] = {
{ .compatible = "qcom,mdss" },
{ .compatible = "qcom,msm8998-mdss", .data = &msm8998_data },
{ .compatible = "qcom,qcm2290-mdss", .data = &qcm2290_data },
{ .compatible = "qcom,sdm845-mdss", .data = &sdm845_data },
{ .compatible = "qcom,sc7180-mdss", .data = &sc7180_data },
{ .compatible = "qcom,sc7280-mdss", .data = &sc7280_data },
{ .compatible = "qcom,sc8180x-mdss", .data = &sc8180x_data },
{ .compatible = "qcom,sc8280xp-mdss", .data = &sc8280xp_data },
{ .compatible = "qcom,sm6115-mdss", .data = &sm6115_data },
{ .compatible = "qcom,sm6125-mdss", .data = &sm6125_data },
{ .compatible = "qcom,sm6350-mdss", .data = &sm6350_data },
{ .compatible = "qcom,sm6375-mdss", .data = &sm6350_data },
{ .compatible = "qcom,sm8150-mdss", .data = &sm8150_data },
{ .compatible = "qcom,sm8250-mdss", .data = &sm8250_data },
{ .compatible = "qcom,sm8350-mdss", .data = &sm8250_data },
{ .compatible = "qcom,sm8450-mdss", .data = &sm8250_data },
{ .compatible = "qcom,sm8550-mdss", .data = &sm8550_data },
{}
};
MODULE_DEVICE_TABLE(of, mdss_dt_match);
static struct platform_driver mdss_platform_driver = {
.probe = mdss_probe,
.remove = mdss_remove,
.driver = {
.name = "msm-mdss",
.of_match_table = mdss_dt_match,
.pm = &mdss_pm_ops,
},
};
void __init msm_mdss_register(void)
{
platform_driver_register(&mdss_platform_driver);
}
void __exit msm_mdss_unregister(void)
{
platform_driver_unregister(&mdss_platform_driver);
}
| linux-master | drivers/gpu/drm/msm/msm_mdss.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2013 Red Hat
* Author: Rob Clark <[email protected]>
*/
/* For debugging crashes, userspace can:
*
* tail -f /sys/kernel/debug/dri/<minor>/rd > logfile.rd
*
* to log the cmdstream in a format that is understood by freedreno/cffdump
* utility. By comparing the last successfully completed fence #, to the
* cmdstream for the next fence, you can narrow down which process and submit
* caused the gpu crash/lockup.
*
* Additionally:
*
* tail -f /sys/kernel/debug/dri/<minor>/hangrd > logfile.rd
*
* will capture just the cmdstream from submits which triggered a GPU hang.
*
* This bypasses drm_debugfs_create_files() mainly because we need to use
* our own fops for a bit more control. In particular, we don't want to
* do anything if userspace doesn't have the debugfs file open.
*
* The module-param "rd_full", which defaults to false, enables snapshotting
* all (non-written) buffers in the submit, rather than just cmdstream bo's.
* This is useful to capture the contents of (for example) vbo's or textures,
* or shader programs (if not emitted inline in cmdstream).
*/
#include <linux/circ_buf.h>
#include <linux/debugfs.h>
#include <linux/kfifo.h>
#include <linux/uaccess.h>
#include <linux/wait.h>
#include <drm/drm_file.h>
#include "msm_drv.h"
#include "msm_gpu.h"
#include "msm_gem.h"
bool rd_full = false;
MODULE_PARM_DESC(rd_full, "If true, $debugfs/.../rd will snapshot all buffer contents");
module_param_named(rd_full, rd_full, bool, 0600);
#ifdef CONFIG_DEBUG_FS
enum rd_sect_type {
RD_NONE,
RD_TEST, /* ascii text */
RD_CMD, /* ascii text */
RD_GPUADDR, /* u32 gpuaddr, u32 size */
RD_CONTEXT, /* raw dump */
RD_CMDSTREAM, /* raw dump */
RD_CMDSTREAM_ADDR, /* gpu addr of cmdstream */
RD_PARAM, /* u32 param_type, u32 param_val, u32 bitlen */
RD_FLUSH, /* empty, clear previous params */
RD_PROGRAM, /* shader program, raw dump */
RD_VERT_SHADER,
RD_FRAG_SHADER,
RD_BUFFER_CONTENTS,
RD_GPU_ID,
RD_CHIP_ID,
};
#define BUF_SZ 512 /* should be power of 2 */
/* space used: */
#define circ_count(circ) \
(CIRC_CNT((circ)->head, (circ)->tail, BUF_SZ))
#define circ_count_to_end(circ) \
(CIRC_CNT_TO_END((circ)->head, (circ)->tail, BUF_SZ))
/* space available: */
#define circ_space(circ) \
(CIRC_SPACE((circ)->head, (circ)->tail, BUF_SZ))
#define circ_space_to_end(circ) \
(CIRC_SPACE_TO_END((circ)->head, (circ)->tail, BUF_SZ))
struct msm_rd_state {
struct drm_device *dev;
bool open;
/* fifo access is synchronized on the producer side by
* write_lock. And read_lock synchronizes the reads
*/
struct mutex read_lock, write_lock;
wait_queue_head_t fifo_event;
struct circ_buf fifo;
char buf[BUF_SZ];
};
static void rd_write(struct msm_rd_state *rd, const void *buf, int sz)
{
struct circ_buf *fifo = &rd->fifo;
const char *ptr = buf;
while (sz > 0) {
char *fptr = &fifo->buf[fifo->head];
int n;
wait_event(rd->fifo_event, circ_space(&rd->fifo) > 0 || !rd->open);
if (!rd->open)
return;
/* Note that smp_load_acquire() is not strictly required
* as CIRC_SPACE_TO_END() does not access the tail more
* than once.
*/
n = min(sz, circ_space_to_end(&rd->fifo));
memcpy(fptr, ptr, n);
smp_store_release(&fifo->head, (fifo->head + n) & (BUF_SZ - 1));
sz -= n;
ptr += n;
wake_up_all(&rd->fifo_event);
}
}
static void rd_write_section(struct msm_rd_state *rd,
enum rd_sect_type type, const void *buf, int sz)
{
rd_write(rd, &type, 4);
rd_write(rd, &sz, 4);
rd_write(rd, buf, sz);
}
static ssize_t rd_read(struct file *file, char __user *buf,
size_t sz, loff_t *ppos)
{
struct msm_rd_state *rd = file->private_data;
struct circ_buf *fifo = &rd->fifo;
const char *fptr = &fifo->buf[fifo->tail];
int n = 0, ret = 0;
mutex_lock(&rd->read_lock);
ret = wait_event_interruptible(rd->fifo_event,
circ_count(&rd->fifo) > 0);
if (ret)
goto out;
/* Note that smp_load_acquire() is not strictly required
* as CIRC_CNT_TO_END() does not access the head more than
* once.
*/
n = min_t(int, sz, circ_count_to_end(&rd->fifo));
if (copy_to_user(buf, fptr, n)) {
ret = -EFAULT;
goto out;
}
smp_store_release(&fifo->tail, (fifo->tail + n) & (BUF_SZ - 1));
*ppos += n;
wake_up_all(&rd->fifo_event);
out:
mutex_unlock(&rd->read_lock);
if (ret)
return ret;
return n;
}
static int rd_open(struct inode *inode, struct file *file)
{
struct msm_rd_state *rd = inode->i_private;
struct drm_device *dev = rd->dev;
struct msm_drm_private *priv = dev->dev_private;
struct msm_gpu *gpu = priv->gpu;
uint64_t val;
uint32_t gpu_id;
uint32_t zero = 0;
int ret = 0;
if (!gpu)
return -ENODEV;
mutex_lock(&gpu->lock);
if (rd->open) {
ret = -EBUSY;
goto out;
}
file->private_data = rd;
rd->open = true;
/* Reset fifo to clear any previously unread data: */
rd->fifo.head = rd->fifo.tail = 0;
/* the parsing tools need to know gpu-id to know which
* register database to load.
*
* Note: These particular params do not require a context
*/
gpu->funcs->get_param(gpu, NULL, MSM_PARAM_GPU_ID, &val, &zero);
gpu_id = val;
rd_write_section(rd, RD_GPU_ID, &gpu_id, sizeof(gpu_id));
gpu->funcs->get_param(gpu, NULL, MSM_PARAM_CHIP_ID, &val, &zero);
rd_write_section(rd, RD_CHIP_ID, &val, sizeof(val));
out:
mutex_unlock(&gpu->lock);
return ret;
}
static int rd_release(struct inode *inode, struct file *file)
{
struct msm_rd_state *rd = inode->i_private;
rd->open = false;
wake_up_all(&rd->fifo_event);
return 0;
}
static const struct file_operations rd_debugfs_fops = {
.owner = THIS_MODULE,
.open = rd_open,
.read = rd_read,
.llseek = no_llseek,
.release = rd_release,
};
static void rd_cleanup(struct msm_rd_state *rd)
{
if (!rd)
return;
mutex_destroy(&rd->read_lock);
mutex_destroy(&rd->write_lock);
kfree(rd);
}
static struct msm_rd_state *rd_init(struct drm_minor *minor, const char *name)
{
struct msm_rd_state *rd;
rd = kzalloc(sizeof(*rd), GFP_KERNEL);
if (!rd)
return ERR_PTR(-ENOMEM);
rd->dev = minor->dev;
rd->fifo.buf = rd->buf;
mutex_init(&rd->read_lock);
mutex_init(&rd->write_lock);
init_waitqueue_head(&rd->fifo_event);
debugfs_create_file(name, S_IFREG | S_IRUGO, minor->debugfs_root, rd,
&rd_debugfs_fops);
return rd;
}
int msm_rd_debugfs_init(struct drm_minor *minor)
{
struct msm_drm_private *priv = minor->dev->dev_private;
struct msm_rd_state *rd;
int ret;
/* only create on first minor: */
if (priv->rd)
return 0;
rd = rd_init(minor, "rd");
if (IS_ERR(rd)) {
ret = PTR_ERR(rd);
goto fail;
}
priv->rd = rd;
rd = rd_init(minor, "hangrd");
if (IS_ERR(rd)) {
ret = PTR_ERR(rd);
goto fail;
}
priv->hangrd = rd;
return 0;
fail:
msm_rd_debugfs_cleanup(priv);
return ret;
}
void msm_rd_debugfs_cleanup(struct msm_drm_private *priv)
{
rd_cleanup(priv->rd);
priv->rd = NULL;
rd_cleanup(priv->hangrd);
priv->hangrd = NULL;
}
static void snapshot_buf(struct msm_rd_state *rd,
struct msm_gem_submit *submit, int idx,
uint64_t iova, uint32_t size, bool full)
{
struct drm_gem_object *obj = submit->bos[idx].obj;
unsigned offset = 0;
const char *buf;
if (iova) {
offset = iova - submit->bos[idx].iova;
} else {
iova = submit->bos[idx].iova;
size = obj->size;
}
/*
* Always write the GPUADDR header so can get a complete list of all the
* buffers in the cmd
*/
rd_write_section(rd, RD_GPUADDR,
(uint32_t[3]){ iova, size, iova >> 32 }, 12);
if (!full)
return;
/* But only dump the contents of buffers marked READ */
if (!(submit->bos[idx].flags & MSM_SUBMIT_BO_READ))
return;
buf = msm_gem_get_vaddr_active(obj);
if (IS_ERR(buf))
return;
buf += offset;
rd_write_section(rd, RD_BUFFER_CONTENTS, buf, size);
msm_gem_put_vaddr_locked(obj);
}
/* called under gpu->lock */
void msm_rd_dump_submit(struct msm_rd_state *rd, struct msm_gem_submit *submit,
const char *fmt, ...)
{
struct task_struct *task;
char msg[256];
int i, n;
if (!rd->open)
return;
mutex_lock(&rd->write_lock);
if (fmt) {
va_list args;
va_start(args, fmt);
n = vscnprintf(msg, sizeof(msg), fmt, args);
va_end(args);
rd_write_section(rd, RD_CMD, msg, ALIGN(n, 4));
}
rcu_read_lock();
task = pid_task(submit->pid, PIDTYPE_PID);
if (task) {
n = scnprintf(msg, sizeof(msg), "%.*s/%d: fence=%u",
TASK_COMM_LEN, task->comm,
pid_nr(submit->pid), submit->seqno);
} else {
n = scnprintf(msg, sizeof(msg), "???/%d: fence=%u",
pid_nr(submit->pid), submit->seqno);
}
rcu_read_unlock();
rd_write_section(rd, RD_CMD, msg, ALIGN(n, 4));
for (i = 0; i < submit->nr_bos; i++)
snapshot_buf(rd, submit, i, 0, 0, should_dump(submit, i));
for (i = 0; i < submit->nr_cmds; i++) {
uint32_t szd = submit->cmd[i].size; /* in dwords */
/* snapshot cmdstream bo's (if we haven't already): */
if (!should_dump(submit, i)) {
snapshot_buf(rd, submit, submit->cmd[i].idx,
submit->cmd[i].iova, szd * 4, true);
}
}
for (i = 0; i < submit->nr_cmds; i++) {
uint64_t iova = submit->cmd[i].iova;
uint32_t szd = submit->cmd[i].size; /* in dwords */
switch (submit->cmd[i].type) {
case MSM_SUBMIT_CMD_IB_TARGET_BUF:
/* ignore IB-targets, we've logged the buffer, the
* parser tool will follow the IB based on the logged
* buffer/gpuaddr, so nothing more to do.
*/
break;
case MSM_SUBMIT_CMD_CTX_RESTORE_BUF:
case MSM_SUBMIT_CMD_BUF:
rd_write_section(rd, RD_CMDSTREAM_ADDR,
(uint32_t[3]){ iova, szd, iova >> 32 }, 12);
break;
}
}
mutex_unlock(&rd->write_lock);
}
#endif
| linux-master | drivers/gpu/drm/msm/msm_rd.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2013 Red Hat
* Author: Rob Clark <[email protected]>
*/
#include <drm/drm_crtc.h>
#include <drm/drm_damage_helper.h>
#include <drm/drm_file.h>
#include <drm/drm_fourcc.h>
#include <drm/drm_framebuffer.h>
#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_probe_helper.h>
#include "msm_drv.h"
#include "msm_kms.h"
#include "msm_gem.h"
struct msm_framebuffer {
struct drm_framebuffer base;
const struct msm_format *format;
/* Count of # of attached planes which need dirtyfb: */
refcount_t dirtyfb;
/* Framebuffer per-plane address, if pinned, else zero: */
uint64_t iova[DRM_FORMAT_MAX_PLANES];
atomic_t prepare_count;
};
#define to_msm_framebuffer(x) container_of(x, struct msm_framebuffer, base)
static struct drm_framebuffer *msm_framebuffer_init(struct drm_device *dev,
const struct drm_mode_fb_cmd2 *mode_cmd, struct drm_gem_object **bos);
static int msm_framebuffer_dirtyfb(struct drm_framebuffer *fb,
struct drm_file *file_priv, unsigned int flags,
unsigned int color, struct drm_clip_rect *clips,
unsigned int num_clips)
{
struct msm_framebuffer *msm_fb = to_msm_framebuffer(fb);
/* If this fb is not used on any display requiring pixel data to be
* flushed, then skip dirtyfb
*/
if (refcount_read(&msm_fb->dirtyfb) == 1)
return 0;
return drm_atomic_helper_dirtyfb(fb, file_priv, flags, color,
clips, num_clips);
}
static const struct drm_framebuffer_funcs msm_framebuffer_funcs = {
.create_handle = drm_gem_fb_create_handle,
.destroy = drm_gem_fb_destroy,
.dirty = msm_framebuffer_dirtyfb,
};
#ifdef CONFIG_DEBUG_FS
void msm_framebuffer_describe(struct drm_framebuffer *fb, struct seq_file *m)
{
struct msm_gem_stats stats = {};
int i, n = fb->format->num_planes;
seq_printf(m, "fb: %dx%d@%4.4s (%2d, ID:%d)\n",
fb->width, fb->height, (char *)&fb->format->format,
drm_framebuffer_read_refcount(fb), fb->base.id);
for (i = 0; i < n; i++) {
seq_printf(m, " %d: offset=%d pitch=%d, obj: ",
i, fb->offsets[i], fb->pitches[i]);
msm_gem_describe(fb->obj[i], m, &stats);
}
}
#endif
/* prepare/pin all the fb's bo's for scanout.
*/
int msm_framebuffer_prepare(struct drm_framebuffer *fb,
struct msm_gem_address_space *aspace,
bool needs_dirtyfb)
{
struct msm_framebuffer *msm_fb = to_msm_framebuffer(fb);
int ret, i, n = fb->format->num_planes;
if (needs_dirtyfb)
refcount_inc(&msm_fb->dirtyfb);
atomic_inc(&msm_fb->prepare_count);
for (i = 0; i < n; i++) {
ret = msm_gem_get_and_pin_iova(fb->obj[i], aspace, &msm_fb->iova[i]);
drm_dbg_state(fb->dev, "FB[%u]: iova[%d]: %08llx (%d)",
fb->base.id, i, msm_fb->iova[i], ret);
if (ret)
return ret;
}
return 0;
}
void msm_framebuffer_cleanup(struct drm_framebuffer *fb,
struct msm_gem_address_space *aspace,
bool needed_dirtyfb)
{
struct msm_framebuffer *msm_fb = to_msm_framebuffer(fb);
int i, n = fb->format->num_planes;
if (needed_dirtyfb)
refcount_dec(&msm_fb->dirtyfb);
for (i = 0; i < n; i++)
msm_gem_unpin_iova(fb->obj[i], aspace);
if (!atomic_dec_return(&msm_fb->prepare_count))
memset(msm_fb->iova, 0, sizeof(msm_fb->iova));
}
uint32_t msm_framebuffer_iova(struct drm_framebuffer *fb,
struct msm_gem_address_space *aspace, int plane)
{
struct msm_framebuffer *msm_fb = to_msm_framebuffer(fb);
return msm_fb->iova[plane] + fb->offsets[plane];
}
struct drm_gem_object *msm_framebuffer_bo(struct drm_framebuffer *fb, int plane)
{
return drm_gem_fb_get_obj(fb, plane);
}
const struct msm_format *msm_framebuffer_format(struct drm_framebuffer *fb)
{
struct msm_framebuffer *msm_fb = to_msm_framebuffer(fb);
return msm_fb->format;
}
struct drm_framebuffer *msm_framebuffer_create(struct drm_device *dev,
struct drm_file *file, const struct drm_mode_fb_cmd2 *mode_cmd)
{
const struct drm_format_info *info = drm_get_format_info(dev,
mode_cmd);
struct drm_gem_object *bos[4] = {0};
struct drm_framebuffer *fb;
int ret, i, n = info->num_planes;
for (i = 0; i < n; i++) {
bos[i] = drm_gem_object_lookup(file, mode_cmd->handles[i]);
if (!bos[i]) {
ret = -ENXIO;
goto out_unref;
}
}
fb = msm_framebuffer_init(dev, mode_cmd, bos);
if (IS_ERR(fb)) {
ret = PTR_ERR(fb);
goto out_unref;
}
return fb;
out_unref:
for (i = 0; i < n; i++)
drm_gem_object_put(bos[i]);
return ERR_PTR(ret);
}
static struct drm_framebuffer *msm_framebuffer_init(struct drm_device *dev,
const struct drm_mode_fb_cmd2 *mode_cmd, struct drm_gem_object **bos)
{
const struct drm_format_info *info = drm_get_format_info(dev,
mode_cmd);
struct msm_drm_private *priv = dev->dev_private;
struct msm_kms *kms = priv->kms;
struct msm_framebuffer *msm_fb = NULL;
struct drm_framebuffer *fb;
const struct msm_format *format;
int ret, i, n;
drm_dbg_state(dev, "create framebuffer: mode_cmd=%p (%dx%d@%4.4s)",
mode_cmd, mode_cmd->width, mode_cmd->height,
(char *)&mode_cmd->pixel_format);
n = info->num_planes;
format = kms->funcs->get_format(kms, mode_cmd->pixel_format,
mode_cmd->modifier[0]);
if (!format) {
DRM_DEV_ERROR(dev->dev, "unsupported pixel format: %4.4s\n",
(char *)&mode_cmd->pixel_format);
ret = -EINVAL;
goto fail;
}
msm_fb = kzalloc(sizeof(*msm_fb), GFP_KERNEL);
if (!msm_fb) {
ret = -ENOMEM;
goto fail;
}
fb = &msm_fb->base;
msm_fb->format = format;
if (n > ARRAY_SIZE(fb->obj)) {
ret = -EINVAL;
goto fail;
}
for (i = 0; i < n; i++) {
unsigned int width = mode_cmd->width / (i ? info->hsub : 1);
unsigned int height = mode_cmd->height / (i ? info->vsub : 1);
unsigned int min_size;
min_size = (height - 1) * mode_cmd->pitches[i]
+ width * info->cpp[i]
+ mode_cmd->offsets[i];
if (bos[i]->size < min_size) {
ret = -EINVAL;
goto fail;
}
msm_fb->base.obj[i] = bos[i];
}
drm_helper_mode_fill_fb_struct(dev, fb, mode_cmd);
ret = drm_framebuffer_init(dev, fb, &msm_framebuffer_funcs);
if (ret) {
DRM_DEV_ERROR(dev->dev, "framebuffer init failed: %d\n", ret);
goto fail;
}
refcount_set(&msm_fb->dirtyfb, 1);
drm_dbg_state(dev, "create: FB ID: %d (%p)", fb->base.id, fb);
return fb;
fail:
kfree(msm_fb);
return ERR_PTR(ret);
}
struct drm_framebuffer *
msm_alloc_stolen_fb(struct drm_device *dev, int w, int h, int p, uint32_t format)
{
struct drm_mode_fb_cmd2 mode_cmd = {
.pixel_format = format,
.width = w,
.height = h,
.pitches = { p },
};
struct drm_gem_object *bo;
struct drm_framebuffer *fb;
int size;
/* allocate backing bo */
size = mode_cmd.pitches[0] * mode_cmd.height;
DBG("allocating %d bytes for fb %d", size, dev->primary->index);
bo = msm_gem_new(dev, size, MSM_BO_SCANOUT | MSM_BO_WC | MSM_BO_STOLEN);
if (IS_ERR(bo)) {
dev_warn(dev->dev, "could not allocate stolen bo\n");
/* try regular bo: */
bo = msm_gem_new(dev, size, MSM_BO_SCANOUT | MSM_BO_WC);
}
if (IS_ERR(bo)) {
DRM_DEV_ERROR(dev->dev, "failed to allocate buffer object\n");
return ERR_CAST(bo);
}
msm_gem_object_set_name(bo, "stolenfb");
fb = msm_framebuffer_init(dev, &mode_cmd, &bo);
if (IS_ERR(fb)) {
DRM_DEV_ERROR(dev->dev, "failed to allocate fb\n");
/* note: if fb creation failed, we can't rely on fb destroy
* to unref the bo:
*/
drm_gem_object_put(bo);
return ERR_CAST(fb);
}
return fb;
}
| linux-master | drivers/gpu/drm/msm/msm_fb.c |
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2018 The Linux Foundation. All rights reserved. */
#include <linux/dma-mapping.h>
#include "msm_drv.h"
#include "msm_mmu.h"
#include "adreno/adreno_gpu.h"
#include "adreno/a2xx.xml.h"
struct msm_gpummu {
struct msm_mmu base;
struct msm_gpu *gpu;
dma_addr_t pt_base;
uint32_t *table;
};
#define to_msm_gpummu(x) container_of(x, struct msm_gpummu, base)
#define GPUMMU_VA_START SZ_16M
#define GPUMMU_VA_RANGE (0xfff * SZ_64K)
#define GPUMMU_PAGE_SIZE SZ_4K
#define TABLE_SIZE (sizeof(uint32_t) * GPUMMU_VA_RANGE / GPUMMU_PAGE_SIZE)
static void msm_gpummu_detach(struct msm_mmu *mmu)
{
}
static int msm_gpummu_map(struct msm_mmu *mmu, uint64_t iova,
struct sg_table *sgt, size_t len, int prot)
{
struct msm_gpummu *gpummu = to_msm_gpummu(mmu);
unsigned idx = (iova - GPUMMU_VA_START) / GPUMMU_PAGE_SIZE;
struct sg_dma_page_iter dma_iter;
unsigned prot_bits = 0;
if (prot & IOMMU_WRITE)
prot_bits |= 1;
if (prot & IOMMU_READ)
prot_bits |= 2;
for_each_sgtable_dma_page(sgt, &dma_iter, 0) {
dma_addr_t addr = sg_page_iter_dma_address(&dma_iter);
int i;
for (i = 0; i < PAGE_SIZE; i += GPUMMU_PAGE_SIZE)
gpummu->table[idx++] = (addr + i) | prot_bits;
}
/* we can improve by deferring flush for multiple map() */
gpu_write(gpummu->gpu, REG_A2XX_MH_MMU_INVALIDATE,
A2XX_MH_MMU_INVALIDATE_INVALIDATE_ALL |
A2XX_MH_MMU_INVALIDATE_INVALIDATE_TC);
return 0;
}
static int msm_gpummu_unmap(struct msm_mmu *mmu, uint64_t iova, size_t len)
{
struct msm_gpummu *gpummu = to_msm_gpummu(mmu);
unsigned idx = (iova - GPUMMU_VA_START) / GPUMMU_PAGE_SIZE;
unsigned i;
for (i = 0; i < len / GPUMMU_PAGE_SIZE; i++, idx++)
gpummu->table[idx] = 0;
gpu_write(gpummu->gpu, REG_A2XX_MH_MMU_INVALIDATE,
A2XX_MH_MMU_INVALIDATE_INVALIDATE_ALL |
A2XX_MH_MMU_INVALIDATE_INVALIDATE_TC);
return 0;
}
static void msm_gpummu_resume_translation(struct msm_mmu *mmu)
{
}
static void msm_gpummu_destroy(struct msm_mmu *mmu)
{
struct msm_gpummu *gpummu = to_msm_gpummu(mmu);
dma_free_attrs(mmu->dev, TABLE_SIZE, gpummu->table, gpummu->pt_base,
DMA_ATTR_FORCE_CONTIGUOUS);
kfree(gpummu);
}
static const struct msm_mmu_funcs funcs = {
.detach = msm_gpummu_detach,
.map = msm_gpummu_map,
.unmap = msm_gpummu_unmap,
.destroy = msm_gpummu_destroy,
.resume_translation = msm_gpummu_resume_translation,
};
struct msm_mmu *msm_gpummu_new(struct device *dev, struct msm_gpu *gpu)
{
struct msm_gpummu *gpummu;
gpummu = kzalloc(sizeof(*gpummu), GFP_KERNEL);
if (!gpummu)
return ERR_PTR(-ENOMEM);
gpummu->table = dma_alloc_attrs(dev, TABLE_SIZE + 32, &gpummu->pt_base,
GFP_KERNEL | __GFP_ZERO, DMA_ATTR_FORCE_CONTIGUOUS);
if (!gpummu->table) {
kfree(gpummu);
return ERR_PTR(-ENOMEM);
}
gpummu->gpu = gpu;
msm_mmu_init(&gpummu->base, dev, &funcs, MSM_MMU_GPUMMU);
return &gpummu->base;
}
void msm_gpummu_params(struct msm_mmu *mmu, dma_addr_t *pt_base,
dma_addr_t *tran_error)
{
dma_addr_t base = to_msm_gpummu(mmu)->pt_base;
*pt_base = base;
*tran_error = base + TABLE_SIZE; /* 32-byte aligned */
}
| linux-master | drivers/gpu/drm/msm/msm_gpummu.c |
// SPDX-License-Identifier: GPL-2.0
#define CREATE_TRACE_POINTS
#include "msm_atomic_trace.h"
| linux-master | drivers/gpu/drm/msm/msm_atomic_tracepoints.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2013 Red Hat
* Author: Rob Clark <[email protected]>
*/
#include <linux/dma-map-ops.h>
#include <linux/vmalloc.h>
#include <linux/spinlock.h>
#include <linux/shmem_fs.h>
#include <linux/dma-buf.h>
#include <linux/pfn_t.h>
#include <drm/drm_prime.h>
#include "msm_drv.h"
#include "msm_fence.h"
#include "msm_gem.h"
#include "msm_gpu.h"
#include "msm_mmu.h"
static dma_addr_t physaddr(struct drm_gem_object *obj)
{
struct msm_gem_object *msm_obj = to_msm_bo(obj);
struct msm_drm_private *priv = obj->dev->dev_private;
return (((dma_addr_t)msm_obj->vram_node->start) << PAGE_SHIFT) +
priv->vram.paddr;
}
static bool use_pages(struct drm_gem_object *obj)
{
struct msm_gem_object *msm_obj = to_msm_bo(obj);
return !msm_obj->vram_node;
}
/*
* Cache sync.. this is a bit over-complicated, to fit dma-mapping
* API. Really GPU cache is out of scope here (handled on cmdstream)
* and all we need to do is invalidate newly allocated pages before
* mapping to CPU as uncached/writecombine.
*
* On top of this, we have the added headache, that depending on
* display generation, the display's iommu may be wired up to either
* the toplevel drm device (mdss), or to the mdp sub-node, meaning
* that here we either have dma-direct or iommu ops.
*
* Let this be a cautionary tail of abstraction gone wrong.
*/
static void sync_for_device(struct msm_gem_object *msm_obj)
{
struct device *dev = msm_obj->base.dev->dev;
dma_map_sgtable(dev, msm_obj->sgt, DMA_BIDIRECTIONAL, 0);
}
static void sync_for_cpu(struct msm_gem_object *msm_obj)
{
struct device *dev = msm_obj->base.dev->dev;
dma_unmap_sgtable(dev, msm_obj->sgt, DMA_BIDIRECTIONAL, 0);
}
static void update_lru_active(struct drm_gem_object *obj)
{
struct msm_drm_private *priv = obj->dev->dev_private;
struct msm_gem_object *msm_obj = to_msm_bo(obj);
GEM_WARN_ON(!msm_obj->pages);
if (msm_obj->pin_count) {
drm_gem_lru_move_tail_locked(&priv->lru.pinned, obj);
} else if (msm_obj->madv == MSM_MADV_WILLNEED) {
drm_gem_lru_move_tail_locked(&priv->lru.willneed, obj);
} else {
GEM_WARN_ON(msm_obj->madv != MSM_MADV_DONTNEED);
drm_gem_lru_move_tail_locked(&priv->lru.dontneed, obj);
}
}
static void update_lru_locked(struct drm_gem_object *obj)
{
struct msm_drm_private *priv = obj->dev->dev_private;
struct msm_gem_object *msm_obj = to_msm_bo(obj);
msm_gem_assert_locked(&msm_obj->base);
if (!msm_obj->pages) {
GEM_WARN_ON(msm_obj->pin_count);
drm_gem_lru_move_tail_locked(&priv->lru.unbacked, obj);
} else {
update_lru_active(obj);
}
}
static void update_lru(struct drm_gem_object *obj)
{
struct msm_drm_private *priv = obj->dev->dev_private;
mutex_lock(&priv->lru.lock);
update_lru_locked(obj);
mutex_unlock(&priv->lru.lock);
}
/* allocate pages from VRAM carveout, used when no IOMMU: */
static struct page **get_pages_vram(struct drm_gem_object *obj, int npages)
{
struct msm_gem_object *msm_obj = to_msm_bo(obj);
struct msm_drm_private *priv = obj->dev->dev_private;
dma_addr_t paddr;
struct page **p;
int ret, i;
p = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL);
if (!p)
return ERR_PTR(-ENOMEM);
spin_lock(&priv->vram.lock);
ret = drm_mm_insert_node(&priv->vram.mm, msm_obj->vram_node, npages);
spin_unlock(&priv->vram.lock);
if (ret) {
kvfree(p);
return ERR_PTR(ret);
}
paddr = physaddr(obj);
for (i = 0; i < npages; i++) {
p[i] = pfn_to_page(__phys_to_pfn(paddr));
paddr += PAGE_SIZE;
}
return p;
}
static struct page **get_pages(struct drm_gem_object *obj)
{
struct msm_gem_object *msm_obj = to_msm_bo(obj);
msm_gem_assert_locked(obj);
if (!msm_obj->pages) {
struct drm_device *dev = obj->dev;
struct page **p;
int npages = obj->size >> PAGE_SHIFT;
if (use_pages(obj))
p = drm_gem_get_pages(obj);
else
p = get_pages_vram(obj, npages);
if (IS_ERR(p)) {
DRM_DEV_ERROR(dev->dev, "could not get pages: %ld\n",
PTR_ERR(p));
return p;
}
msm_obj->pages = p;
msm_obj->sgt = drm_prime_pages_to_sg(obj->dev, p, npages);
if (IS_ERR(msm_obj->sgt)) {
void *ptr = ERR_CAST(msm_obj->sgt);
DRM_DEV_ERROR(dev->dev, "failed to allocate sgt\n");
msm_obj->sgt = NULL;
return ptr;
}
/* For non-cached buffers, ensure the new pages are clean
* because display controller, GPU, etc. are not coherent:
*/
if (msm_obj->flags & MSM_BO_WC)
sync_for_device(msm_obj);
update_lru(obj);
}
return msm_obj->pages;
}
static void put_pages_vram(struct drm_gem_object *obj)
{
struct msm_gem_object *msm_obj = to_msm_bo(obj);
struct msm_drm_private *priv = obj->dev->dev_private;
spin_lock(&priv->vram.lock);
drm_mm_remove_node(msm_obj->vram_node);
spin_unlock(&priv->vram.lock);
kvfree(msm_obj->pages);
}
static void put_pages(struct drm_gem_object *obj)
{
struct msm_gem_object *msm_obj = to_msm_bo(obj);
if (msm_obj->pages) {
if (msm_obj->sgt) {
/* For non-cached buffers, ensure the new
* pages are clean because display controller,
* GPU, etc. are not coherent:
*/
if (msm_obj->flags & MSM_BO_WC)
sync_for_cpu(msm_obj);
sg_free_table(msm_obj->sgt);
kfree(msm_obj->sgt);
msm_obj->sgt = NULL;
}
if (use_pages(obj))
drm_gem_put_pages(obj, msm_obj->pages, true, false);
else
put_pages_vram(obj);
msm_obj->pages = NULL;
update_lru(obj);
}
}
static struct page **msm_gem_pin_pages_locked(struct drm_gem_object *obj,
unsigned madv)
{
struct msm_gem_object *msm_obj = to_msm_bo(obj);
msm_gem_assert_locked(obj);
if (GEM_WARN_ON(msm_obj->madv > madv)) {
DRM_DEV_ERROR(obj->dev->dev, "Invalid madv state: %u vs %u\n",
msm_obj->madv, madv);
return ERR_PTR(-EBUSY);
}
return get_pages(obj);
}
/*
* Update the pin count of the object, call under lru.lock
*/
void msm_gem_pin_obj_locked(struct drm_gem_object *obj)
{
struct msm_drm_private *priv = obj->dev->dev_private;
msm_gem_assert_locked(obj);
to_msm_bo(obj)->pin_count++;
drm_gem_lru_move_tail_locked(&priv->lru.pinned, obj);
}
static void pin_obj_locked(struct drm_gem_object *obj)
{
struct msm_drm_private *priv = obj->dev->dev_private;
mutex_lock(&priv->lru.lock);
msm_gem_pin_obj_locked(obj);
mutex_unlock(&priv->lru.lock);
}
struct page **msm_gem_pin_pages(struct drm_gem_object *obj)
{
struct page **p;
msm_gem_lock(obj);
p = msm_gem_pin_pages_locked(obj, MSM_MADV_WILLNEED);
if (!IS_ERR(p))
pin_obj_locked(obj);
msm_gem_unlock(obj);
return p;
}
void msm_gem_unpin_pages(struct drm_gem_object *obj)
{
msm_gem_lock(obj);
msm_gem_unpin_locked(obj);
msm_gem_unlock(obj);
}
static pgprot_t msm_gem_pgprot(struct msm_gem_object *msm_obj, pgprot_t prot)
{
if (msm_obj->flags & MSM_BO_WC)
return pgprot_writecombine(prot);
return prot;
}
static vm_fault_t msm_gem_fault(struct vm_fault *vmf)
{
struct vm_area_struct *vma = vmf->vma;
struct drm_gem_object *obj = vma->vm_private_data;
struct msm_gem_object *msm_obj = to_msm_bo(obj);
struct page **pages;
unsigned long pfn;
pgoff_t pgoff;
int err;
vm_fault_t ret;
/*
* vm_ops.open/drm_gem_mmap_obj and close get and put
* a reference on obj. So, we dont need to hold one here.
*/
err = msm_gem_lock_interruptible(obj);
if (err) {
ret = VM_FAULT_NOPAGE;
goto out;
}
if (GEM_WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED)) {
msm_gem_unlock(obj);
return VM_FAULT_SIGBUS;
}
/* make sure we have pages attached now */
pages = get_pages(obj);
if (IS_ERR(pages)) {
ret = vmf_error(PTR_ERR(pages));
goto out_unlock;
}
/* We don't use vmf->pgoff since that has the fake offset: */
pgoff = (vmf->address - vma->vm_start) >> PAGE_SHIFT;
pfn = page_to_pfn(pages[pgoff]);
VERB("Inserting %p pfn %lx, pa %lx", (void *)vmf->address,
pfn, pfn << PAGE_SHIFT);
ret = vmf_insert_pfn(vma, vmf->address, pfn);
out_unlock:
msm_gem_unlock(obj);
out:
return ret;
}
/** get mmap offset */
static uint64_t mmap_offset(struct drm_gem_object *obj)
{
struct drm_device *dev = obj->dev;
int ret;
msm_gem_assert_locked(obj);
/* Make it mmapable */
ret = drm_gem_create_mmap_offset(obj);
if (ret) {
DRM_DEV_ERROR(dev->dev, "could not allocate mmap offset\n");
return 0;
}
return drm_vma_node_offset_addr(&obj->vma_node);
}
uint64_t msm_gem_mmap_offset(struct drm_gem_object *obj)
{
uint64_t offset;
msm_gem_lock(obj);
offset = mmap_offset(obj);
msm_gem_unlock(obj);
return offset;
}
static struct msm_gem_vma *add_vma(struct drm_gem_object *obj,
struct msm_gem_address_space *aspace)
{
struct msm_gem_object *msm_obj = to_msm_bo(obj);
struct msm_gem_vma *vma;
msm_gem_assert_locked(obj);
vma = msm_gem_vma_new(aspace);
if (!vma)
return ERR_PTR(-ENOMEM);
list_add_tail(&vma->list, &msm_obj->vmas);
return vma;
}
static struct msm_gem_vma *lookup_vma(struct drm_gem_object *obj,
struct msm_gem_address_space *aspace)
{
struct msm_gem_object *msm_obj = to_msm_bo(obj);
struct msm_gem_vma *vma;
msm_gem_assert_locked(obj);
list_for_each_entry(vma, &msm_obj->vmas, list) {
if (vma->aspace == aspace)
return vma;
}
return NULL;
}
static void del_vma(struct msm_gem_vma *vma)
{
if (!vma)
return;
list_del(&vma->list);
kfree(vma);
}
/*
* If close is true, this also closes the VMA (releasing the allocated
* iova range) in addition to removing the iommu mapping. In the eviction
* case (!close), we keep the iova allocated, but only remove the iommu
* mapping.
*/
static void
put_iova_spaces(struct drm_gem_object *obj, bool close)
{
struct msm_gem_object *msm_obj = to_msm_bo(obj);
struct msm_gem_vma *vma;
msm_gem_assert_locked(obj);
list_for_each_entry(vma, &msm_obj->vmas, list) {
if (vma->aspace) {
msm_gem_vma_purge(vma);
if (close)
msm_gem_vma_close(vma);
}
}
}
/* Called with msm_obj locked */
static void
put_iova_vmas(struct drm_gem_object *obj)
{
struct msm_gem_object *msm_obj = to_msm_bo(obj);
struct msm_gem_vma *vma, *tmp;
msm_gem_assert_locked(obj);
list_for_each_entry_safe(vma, tmp, &msm_obj->vmas, list) {
del_vma(vma);
}
}
static struct msm_gem_vma *get_vma_locked(struct drm_gem_object *obj,
struct msm_gem_address_space *aspace,
u64 range_start, u64 range_end)
{
struct msm_gem_vma *vma;
msm_gem_assert_locked(obj);
vma = lookup_vma(obj, aspace);
if (!vma) {
int ret;
vma = add_vma(obj, aspace);
if (IS_ERR(vma))
return vma;
ret = msm_gem_vma_init(vma, obj->size,
range_start, range_end);
if (ret) {
del_vma(vma);
return ERR_PTR(ret);
}
} else {
GEM_WARN_ON(vma->iova < range_start);
GEM_WARN_ON((vma->iova + obj->size) > range_end);
}
return vma;
}
int msm_gem_pin_vma_locked(struct drm_gem_object *obj, struct msm_gem_vma *vma)
{
struct msm_gem_object *msm_obj = to_msm_bo(obj);
struct page **pages;
int prot = IOMMU_READ;
if (!(msm_obj->flags & MSM_BO_GPU_READONLY))
prot |= IOMMU_WRITE;
if (msm_obj->flags & MSM_BO_MAP_PRIV)
prot |= IOMMU_PRIV;
if (msm_obj->flags & MSM_BO_CACHED_COHERENT)
prot |= IOMMU_CACHE;
msm_gem_assert_locked(obj);
pages = msm_gem_pin_pages_locked(obj, MSM_MADV_WILLNEED);
if (IS_ERR(pages))
return PTR_ERR(pages);
return msm_gem_vma_map(vma, prot, msm_obj->sgt, obj->size);
}
void msm_gem_unpin_locked(struct drm_gem_object *obj)
{
struct msm_drm_private *priv = obj->dev->dev_private;
struct msm_gem_object *msm_obj = to_msm_bo(obj);
msm_gem_assert_locked(obj);
mutex_lock(&priv->lru.lock);
msm_obj->pin_count--;
GEM_WARN_ON(msm_obj->pin_count < 0);
update_lru_locked(obj);
mutex_unlock(&priv->lru.lock);
}
/* Special unpin path for use in fence-signaling path, avoiding the need
* to hold the obj lock by only depending on things that a protected by
* the LRU lock. In particular we know that that we already have backing
* and and that the object's dma_resv has the fence for the current
* submit/job which will prevent us racing against page eviction.
*/
void msm_gem_unpin_active(struct drm_gem_object *obj)
{
struct msm_gem_object *msm_obj = to_msm_bo(obj);
msm_obj->pin_count--;
GEM_WARN_ON(msm_obj->pin_count < 0);
update_lru_active(obj);
}
struct msm_gem_vma *msm_gem_get_vma_locked(struct drm_gem_object *obj,
struct msm_gem_address_space *aspace)
{
return get_vma_locked(obj, aspace, 0, U64_MAX);
}
static int get_and_pin_iova_range_locked(struct drm_gem_object *obj,
struct msm_gem_address_space *aspace, uint64_t *iova,
u64 range_start, u64 range_end)
{
struct msm_gem_vma *vma;
int ret;
msm_gem_assert_locked(obj);
vma = get_vma_locked(obj, aspace, range_start, range_end);
if (IS_ERR(vma))
return PTR_ERR(vma);
ret = msm_gem_pin_vma_locked(obj, vma);
if (!ret) {
*iova = vma->iova;
pin_obj_locked(obj);
}
return ret;
}
/*
* get iova and pin it. Should have a matching put
* limits iova to specified range (in pages)
*/
int msm_gem_get_and_pin_iova_range(struct drm_gem_object *obj,
struct msm_gem_address_space *aspace, uint64_t *iova,
u64 range_start, u64 range_end)
{
int ret;
msm_gem_lock(obj);
ret = get_and_pin_iova_range_locked(obj, aspace, iova, range_start, range_end);
msm_gem_unlock(obj);
return ret;
}
/* get iova and pin it. Should have a matching put */
int msm_gem_get_and_pin_iova(struct drm_gem_object *obj,
struct msm_gem_address_space *aspace, uint64_t *iova)
{
return msm_gem_get_and_pin_iova_range(obj, aspace, iova, 0, U64_MAX);
}
/*
* Get an iova but don't pin it. Doesn't need a put because iovas are currently
* valid for the life of the object
*/
int msm_gem_get_iova(struct drm_gem_object *obj,
struct msm_gem_address_space *aspace, uint64_t *iova)
{
struct msm_gem_vma *vma;
int ret = 0;
msm_gem_lock(obj);
vma = get_vma_locked(obj, aspace, 0, U64_MAX);
if (IS_ERR(vma)) {
ret = PTR_ERR(vma);
} else {
*iova = vma->iova;
}
msm_gem_unlock(obj);
return ret;
}
static int clear_iova(struct drm_gem_object *obj,
struct msm_gem_address_space *aspace)
{
struct msm_gem_vma *vma = lookup_vma(obj, aspace);
if (!vma)
return 0;
msm_gem_vma_purge(vma);
msm_gem_vma_close(vma);
del_vma(vma);
return 0;
}
/*
* Get the requested iova but don't pin it. Fails if the requested iova is
* not available. Doesn't need a put because iovas are currently valid for
* the life of the object.
*
* Setting an iova of zero will clear the vma.
*/
int msm_gem_set_iova(struct drm_gem_object *obj,
struct msm_gem_address_space *aspace, uint64_t iova)
{
int ret = 0;
msm_gem_lock(obj);
if (!iova) {
ret = clear_iova(obj, aspace);
} else {
struct msm_gem_vma *vma;
vma = get_vma_locked(obj, aspace, iova, iova + obj->size);
if (IS_ERR(vma)) {
ret = PTR_ERR(vma);
} else if (GEM_WARN_ON(vma->iova != iova)) {
clear_iova(obj, aspace);
ret = -EBUSY;
}
}
msm_gem_unlock(obj);
return ret;
}
/*
* Unpin a iova by updating the reference counts. The memory isn't actually
* purged until something else (shrinker, mm_notifier, destroy, etc) decides
* to get rid of it
*/
void msm_gem_unpin_iova(struct drm_gem_object *obj,
struct msm_gem_address_space *aspace)
{
struct msm_gem_vma *vma;
msm_gem_lock(obj);
vma = lookup_vma(obj, aspace);
if (!GEM_WARN_ON(!vma)) {
msm_gem_unpin_locked(obj);
}
msm_gem_unlock(obj);
}
int msm_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
struct drm_mode_create_dumb *args)
{
args->pitch = align_pitch(args->width, args->bpp);
args->size = PAGE_ALIGN(args->pitch * args->height);
return msm_gem_new_handle(dev, file, args->size,
MSM_BO_SCANOUT | MSM_BO_WC, &args->handle, "dumb");
}
int msm_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev,
uint32_t handle, uint64_t *offset)
{
struct drm_gem_object *obj;
int ret = 0;
/* GEM does all our handle to object mapping */
obj = drm_gem_object_lookup(file, handle);
if (obj == NULL) {
ret = -ENOENT;
goto fail;
}
*offset = msm_gem_mmap_offset(obj);
drm_gem_object_put(obj);
fail:
return ret;
}
static void *get_vaddr(struct drm_gem_object *obj, unsigned madv)
{
struct msm_gem_object *msm_obj = to_msm_bo(obj);
struct page **pages;
int ret = 0;
msm_gem_assert_locked(obj);
if (obj->import_attach)
return ERR_PTR(-ENODEV);
pages = msm_gem_pin_pages_locked(obj, madv);
if (IS_ERR(pages))
return ERR_CAST(pages);
pin_obj_locked(obj);
/* increment vmap_count *before* vmap() call, so shrinker can
* check vmap_count (is_vunmapable()) outside of msm_obj lock.
* This guarantees that we won't try to msm_gem_vunmap() this
* same object from within the vmap() call (while we already
* hold msm_obj lock)
*/
msm_obj->vmap_count++;
if (!msm_obj->vaddr) {
msm_obj->vaddr = vmap(pages, obj->size >> PAGE_SHIFT,
VM_MAP, msm_gem_pgprot(msm_obj, PAGE_KERNEL));
if (msm_obj->vaddr == NULL) {
ret = -ENOMEM;
goto fail;
}
}
return msm_obj->vaddr;
fail:
msm_obj->vmap_count--;
msm_gem_unpin_locked(obj);
return ERR_PTR(ret);
}
void *msm_gem_get_vaddr_locked(struct drm_gem_object *obj)
{
return get_vaddr(obj, MSM_MADV_WILLNEED);
}
void *msm_gem_get_vaddr(struct drm_gem_object *obj)
{
void *ret;
msm_gem_lock(obj);
ret = msm_gem_get_vaddr_locked(obj);
msm_gem_unlock(obj);
return ret;
}
/*
* Don't use this! It is for the very special case of dumping
* submits from GPU hangs or faults, were the bo may already
* be MSM_MADV_DONTNEED, but we know the buffer is still on the
* active list.
*/
void *msm_gem_get_vaddr_active(struct drm_gem_object *obj)
{
return get_vaddr(obj, __MSM_MADV_PURGED);
}
void msm_gem_put_vaddr_locked(struct drm_gem_object *obj)
{
struct msm_gem_object *msm_obj = to_msm_bo(obj);
msm_gem_assert_locked(obj);
GEM_WARN_ON(msm_obj->vmap_count < 1);
msm_obj->vmap_count--;
msm_gem_unpin_locked(obj);
}
void msm_gem_put_vaddr(struct drm_gem_object *obj)
{
msm_gem_lock(obj);
msm_gem_put_vaddr_locked(obj);
msm_gem_unlock(obj);
}
/* Update madvise status, returns true if not purged, else
* false or -errno.
*/
int msm_gem_madvise(struct drm_gem_object *obj, unsigned madv)
{
struct msm_drm_private *priv = obj->dev->dev_private;
struct msm_gem_object *msm_obj = to_msm_bo(obj);
msm_gem_lock(obj);
mutex_lock(&priv->lru.lock);
if (msm_obj->madv != __MSM_MADV_PURGED)
msm_obj->madv = madv;
madv = msm_obj->madv;
/* If the obj is inactive, we might need to move it
* between inactive lists
*/
update_lru_locked(obj);
mutex_unlock(&priv->lru.lock);
msm_gem_unlock(obj);
return (madv != __MSM_MADV_PURGED);
}
void msm_gem_purge(struct drm_gem_object *obj)
{
struct drm_device *dev = obj->dev;
struct msm_drm_private *priv = obj->dev->dev_private;
struct msm_gem_object *msm_obj = to_msm_bo(obj);
msm_gem_assert_locked(obj);
GEM_WARN_ON(!is_purgeable(msm_obj));
/* Get rid of any iommu mapping(s): */
put_iova_spaces(obj, true);
msm_gem_vunmap(obj);
drm_vma_node_unmap(&obj->vma_node, dev->anon_inode->i_mapping);
put_pages(obj);
put_iova_vmas(obj);
mutex_lock(&priv->lru.lock);
/* A one-way transition: */
msm_obj->madv = __MSM_MADV_PURGED;
mutex_unlock(&priv->lru.lock);
drm_gem_free_mmap_offset(obj);
/* Our goal here is to return as much of the memory as
* is possible back to the system as we are called from OOM.
* To do this we must instruct the shmfs to drop all of its
* backing pages, *now*.
*/
shmem_truncate_range(file_inode(obj->filp), 0, (loff_t)-1);
invalidate_mapping_pages(file_inode(obj->filp)->i_mapping,
0, (loff_t)-1);
}
/*
* Unpin the backing pages and make them available to be swapped out.
*/
void msm_gem_evict(struct drm_gem_object *obj)
{
struct drm_device *dev = obj->dev;
struct msm_gem_object *msm_obj = to_msm_bo(obj);
msm_gem_assert_locked(obj);
GEM_WARN_ON(is_unevictable(msm_obj));
/* Get rid of any iommu mapping(s): */
put_iova_spaces(obj, false);
drm_vma_node_unmap(&obj->vma_node, dev->anon_inode->i_mapping);
put_pages(obj);
}
void msm_gem_vunmap(struct drm_gem_object *obj)
{
struct msm_gem_object *msm_obj = to_msm_bo(obj);
msm_gem_assert_locked(obj);
if (!msm_obj->vaddr || GEM_WARN_ON(!is_vunmapable(msm_obj)))
return;
vunmap(msm_obj->vaddr);
msm_obj->vaddr = NULL;
}
bool msm_gem_active(struct drm_gem_object *obj)
{
msm_gem_assert_locked(obj);
if (to_msm_bo(obj)->pin_count)
return true;
return !dma_resv_test_signaled(obj->resv, dma_resv_usage_rw(true));
}
int msm_gem_cpu_prep(struct drm_gem_object *obj, uint32_t op, ktime_t *timeout)
{
bool write = !!(op & MSM_PREP_WRITE);
unsigned long remain =
op & MSM_PREP_NOSYNC ? 0 : timeout_to_jiffies(timeout);
long ret;
if (op & MSM_PREP_BOOST) {
dma_resv_set_deadline(obj->resv, dma_resv_usage_rw(write),
ktime_get());
}
ret = dma_resv_wait_timeout(obj->resv, dma_resv_usage_rw(write),
true, remain);
if (ret == 0)
return remain == 0 ? -EBUSY : -ETIMEDOUT;
else if (ret < 0)
return ret;
/* TODO cache maintenance */
return 0;
}
int msm_gem_cpu_fini(struct drm_gem_object *obj)
{
/* TODO cache maintenance */
return 0;
}
#ifdef CONFIG_DEBUG_FS
void msm_gem_describe(struct drm_gem_object *obj, struct seq_file *m,
struct msm_gem_stats *stats)
{
struct msm_gem_object *msm_obj = to_msm_bo(obj);
struct dma_resv *robj = obj->resv;
struct msm_gem_vma *vma;
uint64_t off = drm_vma_node_start(&obj->vma_node);
const char *madv;
msm_gem_lock(obj);
stats->all.count++;
stats->all.size += obj->size;
if (msm_gem_active(obj)) {
stats->active.count++;
stats->active.size += obj->size;
}
if (msm_obj->pages) {
stats->resident.count++;
stats->resident.size += obj->size;
}
switch (msm_obj->madv) {
case __MSM_MADV_PURGED:
stats->purged.count++;
stats->purged.size += obj->size;
madv = " purged";
break;
case MSM_MADV_DONTNEED:
stats->purgeable.count++;
stats->purgeable.size += obj->size;
madv = " purgeable";
break;
case MSM_MADV_WILLNEED:
default:
madv = "";
break;
}
seq_printf(m, "%08x: %c %2d (%2d) %08llx %p",
msm_obj->flags, msm_gem_active(obj) ? 'A' : 'I',
obj->name, kref_read(&obj->refcount),
off, msm_obj->vaddr);
seq_printf(m, " %08zu %9s %-32s\n", obj->size, madv, msm_obj->name);
if (!list_empty(&msm_obj->vmas)) {
seq_puts(m, " vmas:");
list_for_each_entry(vma, &msm_obj->vmas, list) {
const char *name, *comm;
if (vma->aspace) {
struct msm_gem_address_space *aspace = vma->aspace;
struct task_struct *task =
get_pid_task(aspace->pid, PIDTYPE_PID);
if (task) {
comm = kstrdup(task->comm, GFP_KERNEL);
put_task_struct(task);
} else {
comm = NULL;
}
name = aspace->name;
} else {
name = comm = NULL;
}
seq_printf(m, " [%s%s%s: aspace=%p, %08llx,%s]",
name, comm ? ":" : "", comm ? comm : "",
vma->aspace, vma->iova,
vma->mapped ? "mapped" : "unmapped");
kfree(comm);
}
seq_puts(m, "\n");
}
dma_resv_describe(robj, m);
msm_gem_unlock(obj);
}
void msm_gem_describe_objects(struct list_head *list, struct seq_file *m)
{
struct msm_gem_stats stats = {};
struct msm_gem_object *msm_obj;
seq_puts(m, " flags id ref offset kaddr size madv name\n");
list_for_each_entry(msm_obj, list, node) {
struct drm_gem_object *obj = &msm_obj->base;
seq_puts(m, " ");
msm_gem_describe(obj, m, &stats);
}
seq_printf(m, "Total: %4d objects, %9zu bytes\n",
stats.all.count, stats.all.size);
seq_printf(m, "Active: %4d objects, %9zu bytes\n",
stats.active.count, stats.active.size);
seq_printf(m, "Resident: %4d objects, %9zu bytes\n",
stats.resident.count, stats.resident.size);
seq_printf(m, "Purgeable: %4d objects, %9zu bytes\n",
stats.purgeable.count, stats.purgeable.size);
seq_printf(m, "Purged: %4d objects, %9zu bytes\n",
stats.purged.count, stats.purged.size);
}
#endif
/* don't call directly! Use drm_gem_object_put() */
static void msm_gem_free_object(struct drm_gem_object *obj)
{
struct msm_gem_object *msm_obj = to_msm_bo(obj);
struct drm_device *dev = obj->dev;
struct msm_drm_private *priv = dev->dev_private;
mutex_lock(&priv->obj_lock);
list_del(&msm_obj->node);
mutex_unlock(&priv->obj_lock);
put_iova_spaces(obj, true);
if (obj->import_attach) {
GEM_WARN_ON(msm_obj->vaddr);
/* Don't drop the pages for imported dmabuf, as they are not
* ours, just free the array we allocated:
*/
kvfree(msm_obj->pages);
put_iova_vmas(obj);
drm_prime_gem_destroy(obj, msm_obj->sgt);
} else {
msm_gem_vunmap(obj);
put_pages(obj);
put_iova_vmas(obj);
}
drm_gem_object_release(obj);
kfree(msm_obj);
}
static int msm_gem_object_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma)
{
struct msm_gem_object *msm_obj = to_msm_bo(obj);
vm_flags_set(vma, VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP);
vma->vm_page_prot = msm_gem_pgprot(msm_obj, vm_get_page_prot(vma->vm_flags));
return 0;
}
/* convenience method to construct a GEM buffer object, and userspace handle */
int msm_gem_new_handle(struct drm_device *dev, struct drm_file *file,
uint32_t size, uint32_t flags, uint32_t *handle,
char *name)
{
struct drm_gem_object *obj;
int ret;
obj = msm_gem_new(dev, size, flags);
if (IS_ERR(obj))
return PTR_ERR(obj);
if (name)
msm_gem_object_set_name(obj, "%s", name);
ret = drm_gem_handle_create(file, obj, handle);
/* drop reference from allocate - handle holds it now */
drm_gem_object_put(obj);
return ret;
}
static enum drm_gem_object_status msm_gem_status(struct drm_gem_object *obj)
{
struct msm_gem_object *msm_obj = to_msm_bo(obj);
enum drm_gem_object_status status = 0;
if (msm_obj->pages)
status |= DRM_GEM_OBJECT_RESIDENT;
if (msm_obj->madv == MSM_MADV_DONTNEED)
status |= DRM_GEM_OBJECT_PURGEABLE;
return status;
}
static const struct vm_operations_struct vm_ops = {
.fault = msm_gem_fault,
.open = drm_gem_vm_open,
.close = drm_gem_vm_close,
};
static const struct drm_gem_object_funcs msm_gem_object_funcs = {
.free = msm_gem_free_object,
.pin = msm_gem_prime_pin,
.unpin = msm_gem_prime_unpin,
.get_sg_table = msm_gem_prime_get_sg_table,
.vmap = msm_gem_prime_vmap,
.vunmap = msm_gem_prime_vunmap,
.mmap = msm_gem_object_mmap,
.status = msm_gem_status,
.vm_ops = &vm_ops,
};
static int msm_gem_new_impl(struct drm_device *dev,
uint32_t size, uint32_t flags,
struct drm_gem_object **obj)
{
struct msm_drm_private *priv = dev->dev_private;
struct msm_gem_object *msm_obj;
switch (flags & MSM_BO_CACHE_MASK) {
case MSM_BO_CACHED:
case MSM_BO_WC:
break;
case MSM_BO_CACHED_COHERENT:
if (priv->has_cached_coherent)
break;
fallthrough;
default:
DRM_DEV_DEBUG(dev->dev, "invalid cache flag: %x\n",
(flags & MSM_BO_CACHE_MASK));
return -EINVAL;
}
msm_obj = kzalloc(sizeof(*msm_obj), GFP_KERNEL);
if (!msm_obj)
return -ENOMEM;
msm_obj->flags = flags;
msm_obj->madv = MSM_MADV_WILLNEED;
INIT_LIST_HEAD(&msm_obj->node);
INIT_LIST_HEAD(&msm_obj->vmas);
*obj = &msm_obj->base;
(*obj)->funcs = &msm_gem_object_funcs;
return 0;
}
struct drm_gem_object *msm_gem_new(struct drm_device *dev, uint32_t size, uint32_t flags)
{
struct msm_drm_private *priv = dev->dev_private;
struct msm_gem_object *msm_obj;
struct drm_gem_object *obj = NULL;
bool use_vram = false;
int ret;
size = PAGE_ALIGN(size);
if (!msm_use_mmu(dev))
use_vram = true;
else if ((flags & (MSM_BO_STOLEN | MSM_BO_SCANOUT)) && priv->vram.size)
use_vram = true;
if (GEM_WARN_ON(use_vram && !priv->vram.size))
return ERR_PTR(-EINVAL);
/* Disallow zero sized objects as they make the underlying
* infrastructure grumpy
*/
if (size == 0)
return ERR_PTR(-EINVAL);
ret = msm_gem_new_impl(dev, size, flags, &obj);
if (ret)
return ERR_PTR(ret);
msm_obj = to_msm_bo(obj);
if (use_vram) {
struct msm_gem_vma *vma;
struct page **pages;
drm_gem_private_object_init(dev, obj, size);
msm_gem_lock(obj);
vma = add_vma(obj, NULL);
msm_gem_unlock(obj);
if (IS_ERR(vma)) {
ret = PTR_ERR(vma);
goto fail;
}
to_msm_bo(obj)->vram_node = &vma->node;
msm_gem_lock(obj);
pages = get_pages(obj);
msm_gem_unlock(obj);
if (IS_ERR(pages)) {
ret = PTR_ERR(pages);
goto fail;
}
vma->iova = physaddr(obj);
} else {
ret = drm_gem_object_init(dev, obj, size);
if (ret)
goto fail;
/*
* Our buffers are kept pinned, so allocating them from the
* MOVABLE zone is a really bad idea, and conflicts with CMA.
* See comments above new_inode() why this is required _and_
* expected if you're going to pin these pages.
*/
mapping_set_gfp_mask(obj->filp->f_mapping, GFP_HIGHUSER);
}
drm_gem_lru_move_tail(&priv->lru.unbacked, obj);
mutex_lock(&priv->obj_lock);
list_add_tail(&msm_obj->node, &priv->objects);
mutex_unlock(&priv->obj_lock);
ret = drm_gem_create_mmap_offset(obj);
if (ret)
goto fail;
return obj;
fail:
drm_gem_object_put(obj);
return ERR_PTR(ret);
}
struct drm_gem_object *msm_gem_import(struct drm_device *dev,
struct dma_buf *dmabuf, struct sg_table *sgt)
{
struct msm_drm_private *priv = dev->dev_private;
struct msm_gem_object *msm_obj;
struct drm_gem_object *obj;
uint32_t size;
int ret, npages;
/* if we don't have IOMMU, don't bother pretending we can import: */
if (!msm_use_mmu(dev)) {
DRM_DEV_ERROR(dev->dev, "cannot import without IOMMU\n");
return ERR_PTR(-EINVAL);
}
size = PAGE_ALIGN(dmabuf->size);
ret = msm_gem_new_impl(dev, size, MSM_BO_WC, &obj);
if (ret)
return ERR_PTR(ret);
drm_gem_private_object_init(dev, obj, size);
npages = size / PAGE_SIZE;
msm_obj = to_msm_bo(obj);
msm_gem_lock(obj);
msm_obj->sgt = sgt;
msm_obj->pages = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL);
if (!msm_obj->pages) {
msm_gem_unlock(obj);
ret = -ENOMEM;
goto fail;
}
ret = drm_prime_sg_to_page_array(sgt, msm_obj->pages, npages);
if (ret) {
msm_gem_unlock(obj);
goto fail;
}
msm_gem_unlock(obj);
drm_gem_lru_move_tail(&priv->lru.pinned, obj);
mutex_lock(&priv->obj_lock);
list_add_tail(&msm_obj->node, &priv->objects);
mutex_unlock(&priv->obj_lock);
ret = drm_gem_create_mmap_offset(obj);
if (ret)
goto fail;
return obj;
fail:
drm_gem_object_put(obj);
return ERR_PTR(ret);
}
void *msm_gem_kernel_new(struct drm_device *dev, uint32_t size,
uint32_t flags, struct msm_gem_address_space *aspace,
struct drm_gem_object **bo, uint64_t *iova)
{
void *vaddr;
struct drm_gem_object *obj = msm_gem_new(dev, size, flags);
int ret;
if (IS_ERR(obj))
return ERR_CAST(obj);
if (iova) {
ret = msm_gem_get_and_pin_iova(obj, aspace, iova);
if (ret)
goto err;
}
vaddr = msm_gem_get_vaddr(obj);
if (IS_ERR(vaddr)) {
msm_gem_unpin_iova(obj, aspace);
ret = PTR_ERR(vaddr);
goto err;
}
if (bo)
*bo = obj;
return vaddr;
err:
drm_gem_object_put(obj);
return ERR_PTR(ret);
}
void msm_gem_kernel_put(struct drm_gem_object *bo,
struct msm_gem_address_space *aspace)
{
if (IS_ERR_OR_NULL(bo))
return;
msm_gem_put_vaddr(bo);
msm_gem_unpin_iova(bo, aspace);
drm_gem_object_put(bo);
}
void msm_gem_object_set_name(struct drm_gem_object *bo, const char *fmt, ...)
{
struct msm_gem_object *msm_obj = to_msm_bo(bo);
va_list ap;
if (!fmt)
return;
va_start(ap, fmt);
vsnprintf(msm_obj->name, sizeof(msm_obj->name), fmt, ap);
va_end(ap);
}
| linux-master | drivers/gpu/drm/msm/msm_gem.c |
// SPDX-License-Identifier: GPL-2.0
#include "msm_gem.h"
#include "msm_ringbuffer.h"
#define CREATE_TRACE_POINTS
#include "msm_gpu_trace.h"
| linux-master | drivers/gpu/drm/msm/msm_gpu_tracepoints.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2013 Red Hat
* Author: Rob Clark <[email protected]>
*/
#include <linux/adreno-smmu-priv.h>
#include <linux/io-pgtable.h>
#include "msm_drv.h"
#include "msm_mmu.h"
struct msm_iommu {
struct msm_mmu base;
struct iommu_domain *domain;
atomic_t pagetables;
};
#define to_msm_iommu(x) container_of(x, struct msm_iommu, base)
struct msm_iommu_pagetable {
struct msm_mmu base;
struct msm_mmu *parent;
struct io_pgtable_ops *pgtbl_ops;
unsigned long pgsize_bitmap; /* Bitmap of page sizes in use */
phys_addr_t ttbr;
u32 asid;
};
static struct msm_iommu_pagetable *to_pagetable(struct msm_mmu *mmu)
{
return container_of(mmu, struct msm_iommu_pagetable, base);
}
/* based on iommu_pgsize() in iommu.c: */
static size_t calc_pgsize(struct msm_iommu_pagetable *pagetable,
unsigned long iova, phys_addr_t paddr,
size_t size, size_t *count)
{
unsigned int pgsize_idx, pgsize_idx_next;
unsigned long pgsizes;
size_t offset, pgsize, pgsize_next;
unsigned long addr_merge = paddr | iova;
/* Page sizes supported by the hardware and small enough for @size */
pgsizes = pagetable->pgsize_bitmap & GENMASK(__fls(size), 0);
/* Constrain the page sizes further based on the maximum alignment */
if (likely(addr_merge))
pgsizes &= GENMASK(__ffs(addr_merge), 0);
/* Make sure we have at least one suitable page size */
BUG_ON(!pgsizes);
/* Pick the biggest page size remaining */
pgsize_idx = __fls(pgsizes);
pgsize = BIT(pgsize_idx);
if (!count)
return pgsize;
/* Find the next biggest support page size, if it exists */
pgsizes = pagetable->pgsize_bitmap & ~GENMASK(pgsize_idx, 0);
if (!pgsizes)
goto out_set_count;
pgsize_idx_next = __ffs(pgsizes);
pgsize_next = BIT(pgsize_idx_next);
/*
* There's no point trying a bigger page size unless the virtual
* and physical addresses are similarly offset within the larger page.
*/
if ((iova ^ paddr) & (pgsize_next - 1))
goto out_set_count;
/* Calculate the offset to the next page size alignment boundary */
offset = pgsize_next - (addr_merge & (pgsize_next - 1));
/*
* If size is big enough to accommodate the larger page, reduce
* the number of smaller pages.
*/
if (offset + pgsize_next <= size)
size = offset;
out_set_count:
*count = size >> pgsize_idx;
return pgsize;
}
static int msm_iommu_pagetable_unmap(struct msm_mmu *mmu, u64 iova,
size_t size)
{
struct msm_iommu_pagetable *pagetable = to_pagetable(mmu);
struct io_pgtable_ops *ops = pagetable->pgtbl_ops;
while (size) {
size_t unmapped, pgsize, count;
pgsize = calc_pgsize(pagetable, iova, iova, size, &count);
unmapped = ops->unmap_pages(ops, iova, pgsize, count, NULL);
if (!unmapped)
break;
iova += unmapped;
size -= unmapped;
}
iommu_flush_iotlb_all(to_msm_iommu(pagetable->parent)->domain);
return (size == 0) ? 0 : -EINVAL;
}
static int msm_iommu_pagetable_map(struct msm_mmu *mmu, u64 iova,
struct sg_table *sgt, size_t len, int prot)
{
struct msm_iommu_pagetable *pagetable = to_pagetable(mmu);
struct io_pgtable_ops *ops = pagetable->pgtbl_ops;
struct scatterlist *sg;
u64 addr = iova;
unsigned int i;
for_each_sgtable_sg(sgt, sg, i) {
size_t size = sg->length;
phys_addr_t phys = sg_phys(sg);
while (size) {
size_t pgsize, count, mapped = 0;
int ret;
pgsize = calc_pgsize(pagetable, addr, phys, size, &count);
ret = ops->map_pages(ops, addr, phys, pgsize, count,
prot, GFP_KERNEL, &mapped);
/* map_pages could fail after mapping some of the pages,
* so update the counters before error handling.
*/
phys += mapped;
addr += mapped;
size -= mapped;
if (ret) {
msm_iommu_pagetable_unmap(mmu, iova, addr - iova);
return -EINVAL;
}
}
}
return 0;
}
static void msm_iommu_pagetable_destroy(struct msm_mmu *mmu)
{
struct msm_iommu_pagetable *pagetable = to_pagetable(mmu);
struct msm_iommu *iommu = to_msm_iommu(pagetable->parent);
struct adreno_smmu_priv *adreno_smmu =
dev_get_drvdata(pagetable->parent->dev);
/*
* If this is the last attached pagetable for the parent,
* disable TTBR0 in the arm-smmu driver
*/
if (atomic_dec_return(&iommu->pagetables) == 0)
adreno_smmu->set_ttbr0_cfg(adreno_smmu->cookie, NULL);
free_io_pgtable_ops(pagetable->pgtbl_ops);
kfree(pagetable);
}
int msm_iommu_pagetable_params(struct msm_mmu *mmu,
phys_addr_t *ttbr, int *asid)
{
struct msm_iommu_pagetable *pagetable;
if (mmu->type != MSM_MMU_IOMMU_PAGETABLE)
return -EINVAL;
pagetable = to_pagetable(mmu);
if (ttbr)
*ttbr = pagetable->ttbr;
if (asid)
*asid = pagetable->asid;
return 0;
}
struct iommu_domain_geometry *msm_iommu_get_geometry(struct msm_mmu *mmu)
{
struct msm_iommu *iommu = to_msm_iommu(mmu);
return &iommu->domain->geometry;
}
static const struct msm_mmu_funcs pagetable_funcs = {
.map = msm_iommu_pagetable_map,
.unmap = msm_iommu_pagetable_unmap,
.destroy = msm_iommu_pagetable_destroy,
};
static void msm_iommu_tlb_flush_all(void *cookie)
{
}
static void msm_iommu_tlb_flush_walk(unsigned long iova, size_t size,
size_t granule, void *cookie)
{
}
static void msm_iommu_tlb_add_page(struct iommu_iotlb_gather *gather,
unsigned long iova, size_t granule, void *cookie)
{
}
static const struct iommu_flush_ops null_tlb_ops = {
.tlb_flush_all = msm_iommu_tlb_flush_all,
.tlb_flush_walk = msm_iommu_tlb_flush_walk,
.tlb_add_page = msm_iommu_tlb_add_page,
};
static int msm_fault_handler(struct iommu_domain *domain, struct device *dev,
unsigned long iova, int flags, void *arg);
struct msm_mmu *msm_iommu_pagetable_create(struct msm_mmu *parent)
{
struct adreno_smmu_priv *adreno_smmu = dev_get_drvdata(parent->dev);
struct msm_iommu *iommu = to_msm_iommu(parent);
struct msm_iommu_pagetable *pagetable;
const struct io_pgtable_cfg *ttbr1_cfg = NULL;
struct io_pgtable_cfg ttbr0_cfg;
int ret;
/* Get the pagetable configuration from the domain */
if (adreno_smmu->cookie)
ttbr1_cfg = adreno_smmu->get_ttbr1_cfg(adreno_smmu->cookie);
/*
* If you hit this WARN_ONCE() you are probably missing an entry in
* qcom_smmu_impl_of_match[] in arm-smmu-qcom.c
*/
if (WARN_ONCE(!ttbr1_cfg, "No per-process page tables"))
return ERR_PTR(-ENODEV);
pagetable = kzalloc(sizeof(*pagetable), GFP_KERNEL);
if (!pagetable)
return ERR_PTR(-ENOMEM);
msm_mmu_init(&pagetable->base, parent->dev, &pagetable_funcs,
MSM_MMU_IOMMU_PAGETABLE);
/* Clone the TTBR1 cfg as starting point for TTBR0 cfg: */
ttbr0_cfg = *ttbr1_cfg;
/* The incoming cfg will have the TTBR1 quirk enabled */
ttbr0_cfg.quirks &= ~IO_PGTABLE_QUIRK_ARM_TTBR1;
ttbr0_cfg.tlb = &null_tlb_ops;
pagetable->pgtbl_ops = alloc_io_pgtable_ops(ARM_64_LPAE_S1,
&ttbr0_cfg, iommu->domain);
if (!pagetable->pgtbl_ops) {
kfree(pagetable);
return ERR_PTR(-ENOMEM);
}
/*
* If this is the first pagetable that we've allocated, send it back to
* the arm-smmu driver as a trigger to set up TTBR0
*/
if (atomic_inc_return(&iommu->pagetables) == 1) {
ret = adreno_smmu->set_ttbr0_cfg(adreno_smmu->cookie, &ttbr0_cfg);
if (ret) {
free_io_pgtable_ops(pagetable->pgtbl_ops);
kfree(pagetable);
return ERR_PTR(ret);
}
}
/* Needed later for TLB flush */
pagetable->parent = parent;
pagetable->pgsize_bitmap = ttbr0_cfg.pgsize_bitmap;
pagetable->ttbr = ttbr0_cfg.arm_lpae_s1_cfg.ttbr;
/*
* TODO we would like each set of page tables to have a unique ASID
* to optimize TLB invalidation. But iommu_flush_iotlb_all() will
* end up flushing the ASID used for TTBR1 pagetables, which is not
* what we want. So for now just use the same ASID as TTBR1.
*/
pagetable->asid = 0;
return &pagetable->base;
}
static int msm_fault_handler(struct iommu_domain *domain, struct device *dev,
unsigned long iova, int flags, void *arg)
{
struct msm_iommu *iommu = arg;
struct msm_mmu *mmu = &iommu->base;
struct adreno_smmu_priv *adreno_smmu = dev_get_drvdata(iommu->base.dev);
struct adreno_smmu_fault_info info, *ptr = NULL;
if (adreno_smmu->get_fault_info) {
adreno_smmu->get_fault_info(adreno_smmu->cookie, &info);
ptr = &info;
}
if (iommu->base.handler)
return iommu->base.handler(iommu->base.arg, iova, flags, ptr);
pr_warn_ratelimited("*** fault: iova=%16lx, flags=%d\n", iova, flags);
if (mmu->funcs->resume_translation)
mmu->funcs->resume_translation(mmu);
return 0;
}
static void msm_iommu_resume_translation(struct msm_mmu *mmu)
{
struct adreno_smmu_priv *adreno_smmu = dev_get_drvdata(mmu->dev);
if (adreno_smmu->resume_translation)
adreno_smmu->resume_translation(adreno_smmu->cookie, true);
}
static void msm_iommu_detach(struct msm_mmu *mmu)
{
struct msm_iommu *iommu = to_msm_iommu(mmu);
iommu_detach_device(iommu->domain, mmu->dev);
}
static int msm_iommu_map(struct msm_mmu *mmu, uint64_t iova,
struct sg_table *sgt, size_t len, int prot)
{
struct msm_iommu *iommu = to_msm_iommu(mmu);
size_t ret;
/* The arm-smmu driver expects the addresses to be sign extended */
if (iova & BIT_ULL(48))
iova |= GENMASK_ULL(63, 49);
ret = iommu_map_sgtable(iommu->domain, iova, sgt, prot);
WARN_ON(!ret);
return (ret == len) ? 0 : -EINVAL;
}
static int msm_iommu_unmap(struct msm_mmu *mmu, uint64_t iova, size_t len)
{
struct msm_iommu *iommu = to_msm_iommu(mmu);
if (iova & BIT_ULL(48))
iova |= GENMASK_ULL(63, 49);
iommu_unmap(iommu->domain, iova, len);
return 0;
}
static void msm_iommu_destroy(struct msm_mmu *mmu)
{
struct msm_iommu *iommu = to_msm_iommu(mmu);
iommu_domain_free(iommu->domain);
kfree(iommu);
}
static const struct msm_mmu_funcs funcs = {
.detach = msm_iommu_detach,
.map = msm_iommu_map,
.unmap = msm_iommu_unmap,
.destroy = msm_iommu_destroy,
.resume_translation = msm_iommu_resume_translation,
};
struct msm_mmu *msm_iommu_new(struct device *dev, unsigned long quirks)
{
struct iommu_domain *domain;
struct msm_iommu *iommu;
int ret;
domain = iommu_domain_alloc(dev->bus);
if (!domain)
return NULL;
iommu_set_pgtable_quirks(domain, quirks);
iommu = kzalloc(sizeof(*iommu), GFP_KERNEL);
if (!iommu) {
iommu_domain_free(domain);
return ERR_PTR(-ENOMEM);
}
iommu->domain = domain;
msm_mmu_init(&iommu->base, dev, &funcs, MSM_MMU_IOMMU);
atomic_set(&iommu->pagetables, 0);
ret = iommu_attach_device(iommu->domain, dev);
if (ret) {
iommu_domain_free(domain);
kfree(iommu);
return ERR_PTR(ret);
}
return &iommu->base;
}
struct msm_mmu *msm_iommu_gpu_new(struct device *dev, struct msm_gpu *gpu, unsigned long quirks)
{
struct adreno_smmu_priv *adreno_smmu = dev_get_drvdata(dev);
struct msm_iommu *iommu;
struct msm_mmu *mmu;
mmu = msm_iommu_new(dev, quirks);
if (IS_ERR_OR_NULL(mmu))
return mmu;
iommu = to_msm_iommu(mmu);
iommu_set_fault_handler(iommu->domain, msm_fault_handler, iommu);
/* Enable stall on iommu fault: */
if (adreno_smmu->set_stall)
adreno_smmu->set_stall(adreno_smmu->cookie, true);
return mmu;
}
| linux-master | drivers/gpu/drm/msm/msm_iommu.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2016-2018, 2020-2021 The Linux Foundation. All rights reserved.
* Copyright (C) 2013 Red Hat
* Author: Rob Clark <[email protected]>
*/
#include <linux/dma-mapping.h>
#include <linux/fault-inject.h>
#include <linux/kthread.h>
#include <linux/of_address.h>
#include <linux/sched/mm.h>
#include <linux/uaccess.h>
#include <uapi/linux/sched/types.h>
#include <drm/drm_aperture.h>
#include <drm/drm_bridge.h>
#include <drm/drm_drv.h>
#include <drm/drm_file.h>
#include <drm/drm_ioctl.h>
#include <drm/drm_prime.h>
#include <drm/drm_of.h>
#include <drm/drm_vblank.h>
#include "disp/msm_disp_snapshot.h"
#include "msm_drv.h"
#include "msm_debugfs.h"
#include "msm_fence.h"
#include "msm_gem.h"
#include "msm_gpu.h"
#include "msm_kms.h"
#include "msm_mmu.h"
#include "adreno/adreno_gpu.h"
/*
* MSM driver version:
* - 1.0.0 - initial interface
* - 1.1.0 - adds madvise, and support for submits with > 4 cmd buffers
* - 1.2.0 - adds explicit fence support for submit ioctl
* - 1.3.0 - adds GMEM_BASE + NR_RINGS params, SUBMITQUEUE_NEW +
* SUBMITQUEUE_CLOSE ioctls, and MSM_INFO_IOVA flag for
* MSM_GEM_INFO ioctl.
* - 1.4.0 - softpin, MSM_RELOC_BO_DUMP, and GEM_INFO support to set/get
* GEM object's debug name
* - 1.5.0 - Add SUBMITQUERY_QUERY ioctl
* - 1.6.0 - Syncobj support
* - 1.7.0 - Add MSM_PARAM_SUSPENDS to access suspend count
* - 1.8.0 - Add MSM_BO_CACHED_COHERENT for supported GPUs (a6xx)
* - 1.9.0 - Add MSM_SUBMIT_FENCE_SN_IN
* - 1.10.0 - Add MSM_SUBMIT_BO_NO_IMPLICIT
* - 1.11.0 - Add wait boost (MSM_WAIT_FENCE_BOOST, MSM_PREP_BOOST)
*/
#define MSM_VERSION_MAJOR 1
#define MSM_VERSION_MINOR 10
#define MSM_VERSION_PATCHLEVEL 0
static void msm_deinit_vram(struct drm_device *ddev);
static const struct drm_mode_config_funcs mode_config_funcs = {
.fb_create = msm_framebuffer_create,
.atomic_check = msm_atomic_check,
.atomic_commit = drm_atomic_helper_commit,
};
static const struct drm_mode_config_helper_funcs mode_config_helper_funcs = {
.atomic_commit_tail = msm_atomic_commit_tail,
};
static char *vram = "16m";
MODULE_PARM_DESC(vram, "Configure VRAM size (for devices without IOMMU/GPUMMU)");
module_param(vram, charp, 0);
bool dumpstate;
MODULE_PARM_DESC(dumpstate, "Dump KMS state on errors");
module_param(dumpstate, bool, 0600);
static bool modeset = true;
MODULE_PARM_DESC(modeset, "Use kernel modesetting [KMS] (1=on (default), 0=disable)");
module_param(modeset, bool, 0600);
#ifdef CONFIG_FAULT_INJECTION
DECLARE_FAULT_ATTR(fail_gem_alloc);
DECLARE_FAULT_ATTR(fail_gem_iova);
#endif
static irqreturn_t msm_irq(int irq, void *arg)
{
struct drm_device *dev = arg;
struct msm_drm_private *priv = dev->dev_private;
struct msm_kms *kms = priv->kms;
BUG_ON(!kms);
return kms->funcs->irq(kms);
}
static void msm_irq_preinstall(struct drm_device *dev)
{
struct msm_drm_private *priv = dev->dev_private;
struct msm_kms *kms = priv->kms;
BUG_ON(!kms);
kms->funcs->irq_preinstall(kms);
}
static int msm_irq_postinstall(struct drm_device *dev)
{
struct msm_drm_private *priv = dev->dev_private;
struct msm_kms *kms = priv->kms;
BUG_ON(!kms);
if (kms->funcs->irq_postinstall)
return kms->funcs->irq_postinstall(kms);
return 0;
}
static int msm_irq_install(struct drm_device *dev, unsigned int irq)
{
struct msm_drm_private *priv = dev->dev_private;
struct msm_kms *kms = priv->kms;
int ret;
if (irq == IRQ_NOTCONNECTED)
return -ENOTCONN;
msm_irq_preinstall(dev);
ret = request_irq(irq, msm_irq, 0, dev->driver->name, dev);
if (ret)
return ret;
kms->irq_requested = true;
ret = msm_irq_postinstall(dev);
if (ret) {
free_irq(irq, dev);
return ret;
}
return 0;
}
static void msm_irq_uninstall(struct drm_device *dev)
{
struct msm_drm_private *priv = dev->dev_private;
struct msm_kms *kms = priv->kms;
kms->funcs->irq_uninstall(kms);
if (kms->irq_requested)
free_irq(kms->irq, dev);
}
struct msm_vblank_work {
struct work_struct work;
struct drm_crtc *crtc;
bool enable;
struct msm_drm_private *priv;
};
static void vblank_ctrl_worker(struct work_struct *work)
{
struct msm_vblank_work *vbl_work = container_of(work,
struct msm_vblank_work, work);
struct msm_drm_private *priv = vbl_work->priv;
struct msm_kms *kms = priv->kms;
if (vbl_work->enable)
kms->funcs->enable_vblank(kms, vbl_work->crtc);
else
kms->funcs->disable_vblank(kms, vbl_work->crtc);
kfree(vbl_work);
}
static int vblank_ctrl_queue_work(struct msm_drm_private *priv,
struct drm_crtc *crtc, bool enable)
{
struct msm_vblank_work *vbl_work;
vbl_work = kzalloc(sizeof(*vbl_work), GFP_ATOMIC);
if (!vbl_work)
return -ENOMEM;
INIT_WORK(&vbl_work->work, vblank_ctrl_worker);
vbl_work->crtc = crtc;
vbl_work->enable = enable;
vbl_work->priv = priv;
queue_work(priv->wq, &vbl_work->work);
return 0;
}
static int msm_drm_uninit(struct device *dev)
{
struct platform_device *pdev = to_platform_device(dev);
struct msm_drm_private *priv = platform_get_drvdata(pdev);
struct drm_device *ddev = priv->dev;
struct msm_kms *kms = priv->kms;
int i;
/*
* Shutdown the hw if we're far enough along where things might be on.
* If we run this too early, we'll end up panicking in any variety of
* places. Since we don't register the drm device until late in
* msm_drm_init, drm_dev->registered is used as an indicator that the
* shutdown will be successful.
*/
if (ddev->registered) {
drm_dev_unregister(ddev);
drm_atomic_helper_shutdown(ddev);
}
/* We must cancel and cleanup any pending vblank enable/disable
* work before msm_irq_uninstall() to avoid work re-enabling an
* irq after uninstall has disabled it.
*/
flush_workqueue(priv->wq);
/* clean up event worker threads */
for (i = 0; i < priv->num_crtcs; i++) {
if (priv->event_thread[i].worker)
kthread_destroy_worker(priv->event_thread[i].worker);
}
msm_gem_shrinker_cleanup(ddev);
drm_kms_helper_poll_fini(ddev);
msm_perf_debugfs_cleanup(priv);
msm_rd_debugfs_cleanup(priv);
if (kms)
msm_disp_snapshot_destroy(ddev);
drm_mode_config_cleanup(ddev);
for (i = 0; i < priv->num_bridges; i++)
drm_bridge_remove(priv->bridges[i]);
priv->num_bridges = 0;
if (kms) {
pm_runtime_get_sync(dev);
msm_irq_uninstall(ddev);
pm_runtime_put_sync(dev);
}
if (kms && kms->funcs)
kms->funcs->destroy(kms);
msm_deinit_vram(ddev);
component_unbind_all(dev, ddev);
ddev->dev_private = NULL;
drm_dev_put(ddev);
destroy_workqueue(priv->wq);
return 0;
}
struct msm_gem_address_space *msm_kms_init_aspace(struct drm_device *dev)
{
struct msm_gem_address_space *aspace;
struct msm_mmu *mmu;
struct device *mdp_dev = dev->dev;
struct device *mdss_dev = mdp_dev->parent;
struct device *iommu_dev;
/*
* IOMMUs can be a part of MDSS device tree binding, or the
* MDP/DPU device.
*/
if (device_iommu_mapped(mdp_dev))
iommu_dev = mdp_dev;
else
iommu_dev = mdss_dev;
mmu = msm_iommu_new(iommu_dev, 0);
if (IS_ERR(mmu))
return ERR_CAST(mmu);
if (!mmu) {
drm_info(dev, "no IOMMU, fallback to phys contig buffers for scanout\n");
return NULL;
}
aspace = msm_gem_address_space_create(mmu, "mdp_kms",
0x1000, 0x100000000 - 0x1000);
if (IS_ERR(aspace)) {
dev_err(mdp_dev, "aspace create, error %pe\n", aspace);
mmu->funcs->destroy(mmu);
}
return aspace;
}
bool msm_use_mmu(struct drm_device *dev)
{
struct msm_drm_private *priv = dev->dev_private;
/*
* a2xx comes with its own MMU
* On other platforms IOMMU can be declared specified either for the
* MDP/DPU device or for its parent, MDSS device.
*/
return priv->is_a2xx ||
device_iommu_mapped(dev->dev) ||
device_iommu_mapped(dev->dev->parent);
}
static int msm_init_vram(struct drm_device *dev)
{
struct msm_drm_private *priv = dev->dev_private;
struct device_node *node;
unsigned long size = 0;
int ret = 0;
/* In the device-tree world, we could have a 'memory-region'
* phandle, which gives us a link to our "vram". Allocating
* is all nicely abstracted behind the dma api, but we need
* to know the entire size to allocate it all in one go. There
* are two cases:
* 1) device with no IOMMU, in which case we need exclusive
* access to a VRAM carveout big enough for all gpu
* buffers
* 2) device with IOMMU, but where the bootloader puts up
* a splash screen. In this case, the VRAM carveout
* need only be large enough for fbdev fb. But we need
* exclusive access to the buffer to avoid the kernel
* using those pages for other purposes (which appears
* as corruption on screen before we have a chance to
* load and do initial modeset)
*/
node = of_parse_phandle(dev->dev->of_node, "memory-region", 0);
if (node) {
struct resource r;
ret = of_address_to_resource(node, 0, &r);
of_node_put(node);
if (ret)
return ret;
size = r.end - r.start + 1;
DRM_INFO("using VRAM carveout: %lx@%pa\n", size, &r.start);
/* if we have no IOMMU, then we need to use carveout allocator.
* Grab the entire DMA chunk carved out in early startup in
* mach-msm:
*/
} else if (!msm_use_mmu(dev)) {
DRM_INFO("using %s VRAM carveout\n", vram);
size = memparse(vram, NULL);
}
if (size) {
unsigned long attrs = 0;
void *p;
priv->vram.size = size;
drm_mm_init(&priv->vram.mm, 0, (size >> PAGE_SHIFT) - 1);
spin_lock_init(&priv->vram.lock);
attrs |= DMA_ATTR_NO_KERNEL_MAPPING;
attrs |= DMA_ATTR_WRITE_COMBINE;
/* note that for no-kernel-mapping, the vaddr returned
* is bogus, but non-null if allocation succeeded:
*/
p = dma_alloc_attrs(dev->dev, size,
&priv->vram.paddr, GFP_KERNEL, attrs);
if (!p) {
DRM_DEV_ERROR(dev->dev, "failed to allocate VRAM\n");
priv->vram.paddr = 0;
return -ENOMEM;
}
DRM_DEV_INFO(dev->dev, "VRAM: %08x->%08x\n",
(uint32_t)priv->vram.paddr,
(uint32_t)(priv->vram.paddr + size));
}
return ret;
}
static void msm_deinit_vram(struct drm_device *ddev)
{
struct msm_drm_private *priv = ddev->dev_private;
unsigned long attrs = DMA_ATTR_NO_KERNEL_MAPPING;
if (!priv->vram.paddr)
return;
drm_mm_takedown(&priv->vram.mm);
dma_free_attrs(ddev->dev, priv->vram.size, NULL, priv->vram.paddr,
attrs);
}
static int msm_drm_init(struct device *dev, const struct drm_driver *drv)
{
struct msm_drm_private *priv = dev_get_drvdata(dev);
struct drm_device *ddev;
struct msm_kms *kms;
struct drm_crtc *crtc;
int ret;
if (drm_firmware_drivers_only())
return -ENODEV;
ddev = drm_dev_alloc(drv, dev);
if (IS_ERR(ddev)) {
DRM_DEV_ERROR(dev, "failed to allocate drm_device\n");
return PTR_ERR(ddev);
}
ddev->dev_private = priv;
priv->dev = ddev;
priv->wq = alloc_ordered_workqueue("msm", 0);
if (!priv->wq) {
ret = -ENOMEM;
goto err_put_dev;
}
INIT_LIST_HEAD(&priv->objects);
mutex_init(&priv->obj_lock);
/*
* Initialize the LRUs:
*/
mutex_init(&priv->lru.lock);
drm_gem_lru_init(&priv->lru.unbacked, &priv->lru.lock);
drm_gem_lru_init(&priv->lru.pinned, &priv->lru.lock);
drm_gem_lru_init(&priv->lru.willneed, &priv->lru.lock);
drm_gem_lru_init(&priv->lru.dontneed, &priv->lru.lock);
/* Teach lockdep about lock ordering wrt. shrinker: */
fs_reclaim_acquire(GFP_KERNEL);
might_lock(&priv->lru.lock);
fs_reclaim_release(GFP_KERNEL);
drm_mode_config_init(ddev);
ret = msm_init_vram(ddev);
if (ret)
goto err_cleanup_mode_config;
dma_set_max_seg_size(dev, UINT_MAX);
/* Bind all our sub-components: */
ret = component_bind_all(dev, ddev);
if (ret)
goto err_deinit_vram;
/* the fw fb could be anywhere in memory */
ret = drm_aperture_remove_framebuffers(drv);
if (ret)
goto err_msm_uninit;
msm_gem_shrinker_init(ddev);
if (priv->kms_init) {
ret = priv->kms_init(ddev);
if (ret) {
DRM_DEV_ERROR(dev, "failed to load kms\n");
priv->kms = NULL;
goto err_msm_uninit;
}
kms = priv->kms;
} else {
/* valid only for the dummy headless case, where of_node=NULL */
WARN_ON(dev->of_node);
kms = NULL;
}
/* Enable normalization of plane zpos */
ddev->mode_config.normalize_zpos = true;
if (kms) {
kms->dev = ddev;
ret = kms->funcs->hw_init(kms);
if (ret) {
DRM_DEV_ERROR(dev, "kms hw init failed: %d\n", ret);
goto err_msm_uninit;
}
}
drm_helper_move_panel_connectors_to_head(ddev);
ddev->mode_config.funcs = &mode_config_funcs;
ddev->mode_config.helper_private = &mode_config_helper_funcs;
drm_for_each_crtc(crtc, ddev) {
struct msm_drm_thread *ev_thread;
/* initialize event thread */
ev_thread = &priv->event_thread[drm_crtc_index(crtc)];
ev_thread->dev = ddev;
ev_thread->worker = kthread_create_worker(0, "crtc_event:%d", crtc->base.id);
if (IS_ERR(ev_thread->worker)) {
ret = PTR_ERR(ev_thread->worker);
DRM_DEV_ERROR(dev, "failed to create crtc_event kthread\n");
ev_thread->worker = NULL;
goto err_msm_uninit;
}
sched_set_fifo(ev_thread->worker->task);
}
ret = drm_vblank_init(ddev, priv->num_crtcs);
if (ret < 0) {
DRM_DEV_ERROR(dev, "failed to initialize vblank\n");
goto err_msm_uninit;
}
if (kms) {
pm_runtime_get_sync(dev);
ret = msm_irq_install(ddev, kms->irq);
pm_runtime_put_sync(dev);
if (ret < 0) {
DRM_DEV_ERROR(dev, "failed to install IRQ handler\n");
goto err_msm_uninit;
}
}
ret = drm_dev_register(ddev, 0);
if (ret)
goto err_msm_uninit;
if (kms) {
ret = msm_disp_snapshot_init(ddev);
if (ret)
DRM_DEV_ERROR(dev, "msm_disp_snapshot_init failed ret = %d\n", ret);
}
drm_mode_config_reset(ddev);
ret = msm_debugfs_late_init(ddev);
if (ret)
goto err_msm_uninit;
drm_kms_helper_poll_init(ddev);
if (kms)
msm_fbdev_setup(ddev);
return 0;
err_msm_uninit:
msm_drm_uninit(dev);
return ret;
err_deinit_vram:
msm_deinit_vram(ddev);
err_cleanup_mode_config:
drm_mode_config_cleanup(ddev);
destroy_workqueue(priv->wq);
err_put_dev:
drm_dev_put(ddev);
return ret;
}
/*
* DRM operations:
*/
static void load_gpu(struct drm_device *dev)
{
static DEFINE_MUTEX(init_lock);
struct msm_drm_private *priv = dev->dev_private;
mutex_lock(&init_lock);
if (!priv->gpu)
priv->gpu = adreno_load_gpu(dev);
mutex_unlock(&init_lock);
}
static int context_init(struct drm_device *dev, struct drm_file *file)
{
static atomic_t ident = ATOMIC_INIT(0);
struct msm_drm_private *priv = dev->dev_private;
struct msm_file_private *ctx;
ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
if (!ctx)
return -ENOMEM;
INIT_LIST_HEAD(&ctx->submitqueues);
rwlock_init(&ctx->queuelock);
kref_init(&ctx->ref);
msm_submitqueue_init(dev, ctx);
ctx->aspace = msm_gpu_create_private_address_space(priv->gpu, current);
file->driver_priv = ctx;
ctx->seqno = atomic_inc_return(&ident);
return 0;
}
static int msm_open(struct drm_device *dev, struct drm_file *file)
{
/* For now, load gpu on open.. to avoid the requirement of having
* firmware in the initrd.
*/
load_gpu(dev);
return context_init(dev, file);
}
static void context_close(struct msm_file_private *ctx)
{
msm_submitqueue_close(ctx);
msm_file_private_put(ctx);
}
static void msm_postclose(struct drm_device *dev, struct drm_file *file)
{
struct msm_drm_private *priv = dev->dev_private;
struct msm_file_private *ctx = file->driver_priv;
/*
* It is not possible to set sysprof param to non-zero if gpu
* is not initialized:
*/
if (priv->gpu)
msm_file_private_set_sysprof(ctx, priv->gpu, 0);
context_close(ctx);
}
int msm_crtc_enable_vblank(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
struct msm_drm_private *priv = dev->dev_private;
struct msm_kms *kms = priv->kms;
if (!kms)
return -ENXIO;
drm_dbg_vbl(dev, "crtc=%u", crtc->base.id);
return vblank_ctrl_queue_work(priv, crtc, true);
}
void msm_crtc_disable_vblank(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
struct msm_drm_private *priv = dev->dev_private;
struct msm_kms *kms = priv->kms;
if (!kms)
return;
drm_dbg_vbl(dev, "crtc=%u", crtc->base.id);
vblank_ctrl_queue_work(priv, crtc, false);
}
/*
* DRM ioctls:
*/
static int msm_ioctl_get_param(struct drm_device *dev, void *data,
struct drm_file *file)
{
struct msm_drm_private *priv = dev->dev_private;
struct drm_msm_param *args = data;
struct msm_gpu *gpu;
/* for now, we just have 3d pipe.. eventually this would need to
* be more clever to dispatch to appropriate gpu module:
*/
if ((args->pipe != MSM_PIPE_3D0) || (args->pad != 0))
return -EINVAL;
gpu = priv->gpu;
if (!gpu)
return -ENXIO;
return gpu->funcs->get_param(gpu, file->driver_priv,
args->param, &args->value, &args->len);
}
static int msm_ioctl_set_param(struct drm_device *dev, void *data,
struct drm_file *file)
{
struct msm_drm_private *priv = dev->dev_private;
struct drm_msm_param *args = data;
struct msm_gpu *gpu;
if ((args->pipe != MSM_PIPE_3D0) || (args->pad != 0))
return -EINVAL;
gpu = priv->gpu;
if (!gpu)
return -ENXIO;
return gpu->funcs->set_param(gpu, file->driver_priv,
args->param, args->value, args->len);
}
static int msm_ioctl_gem_new(struct drm_device *dev, void *data,
struct drm_file *file)
{
struct drm_msm_gem_new *args = data;
uint32_t flags = args->flags;
if (args->flags & ~MSM_BO_FLAGS) {
DRM_ERROR("invalid flags: %08x\n", args->flags);
return -EINVAL;
}
/*
* Uncached CPU mappings are deprecated, as of:
*
* 9ef364432db4 ("drm/msm: deprecate MSM_BO_UNCACHED (map as writecombine instead)")
*
* So promote them to WC.
*/
if (flags & MSM_BO_UNCACHED) {
flags &= ~MSM_BO_CACHED;
flags |= MSM_BO_WC;
}
if (should_fail(&fail_gem_alloc, args->size))
return -ENOMEM;
return msm_gem_new_handle(dev, file, args->size,
args->flags, &args->handle, NULL);
}
static inline ktime_t to_ktime(struct drm_msm_timespec timeout)
{
return ktime_set(timeout.tv_sec, timeout.tv_nsec);
}
static int msm_ioctl_gem_cpu_prep(struct drm_device *dev, void *data,
struct drm_file *file)
{
struct drm_msm_gem_cpu_prep *args = data;
struct drm_gem_object *obj;
ktime_t timeout = to_ktime(args->timeout);
int ret;
if (args->op & ~MSM_PREP_FLAGS) {
DRM_ERROR("invalid op: %08x\n", args->op);
return -EINVAL;
}
obj = drm_gem_object_lookup(file, args->handle);
if (!obj)
return -ENOENT;
ret = msm_gem_cpu_prep(obj, args->op, &timeout);
drm_gem_object_put(obj);
return ret;
}
static int msm_ioctl_gem_cpu_fini(struct drm_device *dev, void *data,
struct drm_file *file)
{
struct drm_msm_gem_cpu_fini *args = data;
struct drm_gem_object *obj;
int ret;
obj = drm_gem_object_lookup(file, args->handle);
if (!obj)
return -ENOENT;
ret = msm_gem_cpu_fini(obj);
drm_gem_object_put(obj);
return ret;
}
static int msm_ioctl_gem_info_iova(struct drm_device *dev,
struct drm_file *file, struct drm_gem_object *obj,
uint64_t *iova)
{
struct msm_drm_private *priv = dev->dev_private;
struct msm_file_private *ctx = file->driver_priv;
if (!priv->gpu)
return -EINVAL;
if (should_fail(&fail_gem_iova, obj->size))
return -ENOMEM;
/*
* Don't pin the memory here - just get an address so that userspace can
* be productive
*/
return msm_gem_get_iova(obj, ctx->aspace, iova);
}
static int msm_ioctl_gem_info_set_iova(struct drm_device *dev,
struct drm_file *file, struct drm_gem_object *obj,
uint64_t iova)
{
struct msm_drm_private *priv = dev->dev_private;
struct msm_file_private *ctx = file->driver_priv;
if (!priv->gpu)
return -EINVAL;
/* Only supported if per-process address space is supported: */
if (priv->gpu->aspace == ctx->aspace)
return -EOPNOTSUPP;
if (should_fail(&fail_gem_iova, obj->size))
return -ENOMEM;
return msm_gem_set_iova(obj, ctx->aspace, iova);
}
static int msm_ioctl_gem_info(struct drm_device *dev, void *data,
struct drm_file *file)
{
struct drm_msm_gem_info *args = data;
struct drm_gem_object *obj;
struct msm_gem_object *msm_obj;
int i, ret = 0;
if (args->pad)
return -EINVAL;
switch (args->info) {
case MSM_INFO_GET_OFFSET:
case MSM_INFO_GET_IOVA:
case MSM_INFO_SET_IOVA:
case MSM_INFO_GET_FLAGS:
/* value returned as immediate, not pointer, so len==0: */
if (args->len)
return -EINVAL;
break;
case MSM_INFO_SET_NAME:
case MSM_INFO_GET_NAME:
break;
default:
return -EINVAL;
}
obj = drm_gem_object_lookup(file, args->handle);
if (!obj)
return -ENOENT;
msm_obj = to_msm_bo(obj);
switch (args->info) {
case MSM_INFO_GET_OFFSET:
args->value = msm_gem_mmap_offset(obj);
break;
case MSM_INFO_GET_IOVA:
ret = msm_ioctl_gem_info_iova(dev, file, obj, &args->value);
break;
case MSM_INFO_SET_IOVA:
ret = msm_ioctl_gem_info_set_iova(dev, file, obj, args->value);
break;
case MSM_INFO_GET_FLAGS:
if (obj->import_attach) {
ret = -EINVAL;
break;
}
/* Hide internal kernel-only flags: */
args->value = to_msm_bo(obj)->flags & MSM_BO_FLAGS;
ret = 0;
break;
case MSM_INFO_SET_NAME:
/* length check should leave room for terminating null: */
if (args->len >= sizeof(msm_obj->name)) {
ret = -EINVAL;
break;
}
if (copy_from_user(msm_obj->name, u64_to_user_ptr(args->value),
args->len)) {
msm_obj->name[0] = '\0';
ret = -EFAULT;
break;
}
msm_obj->name[args->len] = '\0';
for (i = 0; i < args->len; i++) {
if (!isprint(msm_obj->name[i])) {
msm_obj->name[i] = '\0';
break;
}
}
break;
case MSM_INFO_GET_NAME:
if (args->value && (args->len < strlen(msm_obj->name))) {
ret = -EINVAL;
break;
}
args->len = strlen(msm_obj->name);
if (args->value) {
if (copy_to_user(u64_to_user_ptr(args->value),
msm_obj->name, args->len))
ret = -EFAULT;
}
break;
}
drm_gem_object_put(obj);
return ret;
}
static int wait_fence(struct msm_gpu_submitqueue *queue, uint32_t fence_id,
ktime_t timeout, uint32_t flags)
{
struct dma_fence *fence;
int ret;
if (fence_after(fence_id, queue->last_fence)) {
DRM_ERROR_RATELIMITED("waiting on invalid fence: %u (of %u)\n",
fence_id, queue->last_fence);
return -EINVAL;
}
/*
* Map submitqueue scoped "seqno" (which is actually an idr key)
* back to underlying dma-fence
*
* The fence is removed from the fence_idr when the submit is
* retired, so if the fence is not found it means there is nothing
* to wait for
*/
spin_lock(&queue->idr_lock);
fence = idr_find(&queue->fence_idr, fence_id);
if (fence)
fence = dma_fence_get_rcu(fence);
spin_unlock(&queue->idr_lock);
if (!fence)
return 0;
if (flags & MSM_WAIT_FENCE_BOOST)
dma_fence_set_deadline(fence, ktime_get());
ret = dma_fence_wait_timeout(fence, true, timeout_to_jiffies(&timeout));
if (ret == 0) {
ret = -ETIMEDOUT;
} else if (ret != -ERESTARTSYS) {
ret = 0;
}
dma_fence_put(fence);
return ret;
}
static int msm_ioctl_wait_fence(struct drm_device *dev, void *data,
struct drm_file *file)
{
struct msm_drm_private *priv = dev->dev_private;
struct drm_msm_wait_fence *args = data;
struct msm_gpu_submitqueue *queue;
int ret;
if (args->flags & ~MSM_WAIT_FENCE_FLAGS) {
DRM_ERROR("invalid flags: %08x\n", args->flags);
return -EINVAL;
}
if (!priv->gpu)
return 0;
queue = msm_submitqueue_get(file->driver_priv, args->queueid);
if (!queue)
return -ENOENT;
ret = wait_fence(queue, args->fence, to_ktime(args->timeout), args->flags);
msm_submitqueue_put(queue);
return ret;
}
static int msm_ioctl_gem_madvise(struct drm_device *dev, void *data,
struct drm_file *file)
{
struct drm_msm_gem_madvise *args = data;
struct drm_gem_object *obj;
int ret;
switch (args->madv) {
case MSM_MADV_DONTNEED:
case MSM_MADV_WILLNEED:
break;
default:
return -EINVAL;
}
obj = drm_gem_object_lookup(file, args->handle);
if (!obj) {
return -ENOENT;
}
ret = msm_gem_madvise(obj, args->madv);
if (ret >= 0) {
args->retained = ret;
ret = 0;
}
drm_gem_object_put(obj);
return ret;
}
static int msm_ioctl_submitqueue_new(struct drm_device *dev, void *data,
struct drm_file *file)
{
struct drm_msm_submitqueue *args = data;
if (args->flags & ~MSM_SUBMITQUEUE_FLAGS)
return -EINVAL;
return msm_submitqueue_create(dev, file->driver_priv, args->prio,
args->flags, &args->id);
}
static int msm_ioctl_submitqueue_query(struct drm_device *dev, void *data,
struct drm_file *file)
{
return msm_submitqueue_query(dev, file->driver_priv, data);
}
static int msm_ioctl_submitqueue_close(struct drm_device *dev, void *data,
struct drm_file *file)
{
u32 id = *(u32 *) data;
return msm_submitqueue_remove(file->driver_priv, id);
}
static const struct drm_ioctl_desc msm_ioctls[] = {
DRM_IOCTL_DEF_DRV(MSM_GET_PARAM, msm_ioctl_get_param, DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(MSM_SET_PARAM, msm_ioctl_set_param, DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(MSM_GEM_NEW, msm_ioctl_gem_new, DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(MSM_GEM_INFO, msm_ioctl_gem_info, DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(MSM_GEM_CPU_PREP, msm_ioctl_gem_cpu_prep, DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(MSM_GEM_CPU_FINI, msm_ioctl_gem_cpu_fini, DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(MSM_GEM_SUBMIT, msm_ioctl_gem_submit, DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(MSM_WAIT_FENCE, msm_ioctl_wait_fence, DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(MSM_GEM_MADVISE, msm_ioctl_gem_madvise, DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(MSM_SUBMITQUEUE_NEW, msm_ioctl_submitqueue_new, DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(MSM_SUBMITQUEUE_CLOSE, msm_ioctl_submitqueue_close, DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(MSM_SUBMITQUEUE_QUERY, msm_ioctl_submitqueue_query, DRM_RENDER_ALLOW),
};
static void msm_show_fdinfo(struct drm_printer *p, struct drm_file *file)
{
struct drm_device *dev = file->minor->dev;
struct msm_drm_private *priv = dev->dev_private;
if (!priv->gpu)
return;
msm_gpu_show_fdinfo(priv->gpu, file->driver_priv, p);
drm_show_memory_stats(p, file);
}
static const struct file_operations fops = {
.owner = THIS_MODULE,
DRM_GEM_FOPS,
.show_fdinfo = drm_show_fdinfo,
};
static const struct drm_driver msm_driver = {
.driver_features = DRIVER_GEM |
DRIVER_RENDER |
DRIVER_ATOMIC |
DRIVER_MODESET |
DRIVER_SYNCOBJ,
.open = msm_open,
.postclose = msm_postclose,
.dumb_create = msm_gem_dumb_create,
.dumb_map_offset = msm_gem_dumb_map_offset,
.gem_prime_import_sg_table = msm_gem_prime_import_sg_table,
#ifdef CONFIG_DEBUG_FS
.debugfs_init = msm_debugfs_init,
#endif
.show_fdinfo = msm_show_fdinfo,
.ioctls = msm_ioctls,
.num_ioctls = ARRAY_SIZE(msm_ioctls),
.fops = &fops,
.name = "msm",
.desc = "MSM Snapdragon DRM",
.date = "20130625",
.major = MSM_VERSION_MAJOR,
.minor = MSM_VERSION_MINOR,
.patchlevel = MSM_VERSION_PATCHLEVEL,
};
int msm_pm_prepare(struct device *dev)
{
struct msm_drm_private *priv = dev_get_drvdata(dev);
struct drm_device *ddev = priv ? priv->dev : NULL;
if (!priv || !priv->kms)
return 0;
return drm_mode_config_helper_suspend(ddev);
}
void msm_pm_complete(struct device *dev)
{
struct msm_drm_private *priv = dev_get_drvdata(dev);
struct drm_device *ddev = priv ? priv->dev : NULL;
if (!priv || !priv->kms)
return;
drm_mode_config_helper_resume(ddev);
}
static const struct dev_pm_ops msm_pm_ops = {
.prepare = msm_pm_prepare,
.complete = msm_pm_complete,
};
/*
* Componentized driver support:
*/
/*
* Identify what components need to be added by parsing what remote-endpoints
* our MDP output ports are connected to. In the case of LVDS on MDP4, there
* is no external component that we need to add since LVDS is within MDP4
* itself.
*/
static int add_components_mdp(struct device *master_dev,
struct component_match **matchptr)
{
struct device_node *np = master_dev->of_node;
struct device_node *ep_node;
for_each_endpoint_of_node(np, ep_node) {
struct device_node *intf;
struct of_endpoint ep;
int ret;
ret = of_graph_parse_endpoint(ep_node, &ep);
if (ret) {
DRM_DEV_ERROR(master_dev, "unable to parse port endpoint\n");
of_node_put(ep_node);
return ret;
}
/*
* The LCDC/LVDS port on MDP4 is a speacial case where the
* remote-endpoint isn't a component that we need to add
*/
if (of_device_is_compatible(np, "qcom,mdp4") &&
ep.port == 0)
continue;
/*
* It's okay if some of the ports don't have a remote endpoint
* specified. It just means that the port isn't connected to
* any external interface.
*/
intf = of_graph_get_remote_port_parent(ep_node);
if (!intf)
continue;
if (of_device_is_available(intf))
drm_of_component_match_add(master_dev, matchptr,
component_compare_of, intf);
of_node_put(intf);
}
return 0;
}
/*
* We don't know what's the best binding to link the gpu with the drm device.
* Fow now, we just hunt for all the possible gpus that we support, and add them
* as components.
*/
static const struct of_device_id msm_gpu_match[] = {
{ .compatible = "qcom,adreno" },
{ .compatible = "qcom,adreno-3xx" },
{ .compatible = "amd,imageon" },
{ .compatible = "qcom,kgsl-3d0" },
{ },
};
static int add_gpu_components(struct device *dev,
struct component_match **matchptr)
{
struct device_node *np;
np = of_find_matching_node(NULL, msm_gpu_match);
if (!np)
return 0;
if (of_device_is_available(np))
drm_of_component_match_add(dev, matchptr, component_compare_of, np);
of_node_put(np);
return 0;
}
static int msm_drm_bind(struct device *dev)
{
return msm_drm_init(dev, &msm_driver);
}
static void msm_drm_unbind(struct device *dev)
{
msm_drm_uninit(dev);
}
const struct component_master_ops msm_drm_ops = {
.bind = msm_drm_bind,
.unbind = msm_drm_unbind,
};
int msm_drv_probe(struct device *master_dev,
int (*kms_init)(struct drm_device *dev))
{
struct msm_drm_private *priv;
struct component_match *match = NULL;
int ret;
priv = devm_kzalloc(master_dev, sizeof(*priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
priv->kms_init = kms_init;
dev_set_drvdata(master_dev, priv);
/* Add mdp components if we have KMS. */
if (kms_init) {
ret = add_components_mdp(master_dev, &match);
if (ret)
return ret;
}
ret = add_gpu_components(master_dev, &match);
if (ret)
return ret;
/* on all devices that I am aware of, iommu's which can map
* any address the cpu can see are used:
*/
ret = dma_set_mask_and_coherent(master_dev, ~0);
if (ret)
return ret;
ret = component_master_add_with_match(master_dev, &msm_drm_ops, match);
if (ret)
return ret;
return 0;
}
/*
* Platform driver:
* Used only for headlesss GPU instances
*/
static int msm_pdev_probe(struct platform_device *pdev)
{
return msm_drv_probe(&pdev->dev, NULL);
}
static int msm_pdev_remove(struct platform_device *pdev)
{
component_master_del(&pdev->dev, &msm_drm_ops);
return 0;
}
void msm_drv_shutdown(struct platform_device *pdev)
{
struct msm_drm_private *priv = platform_get_drvdata(pdev);
struct drm_device *drm = priv ? priv->dev : NULL;
/*
* Shutdown the hw if we're far enough along where things might be on.
* If we run this too early, we'll end up panicking in any variety of
* places. Since we don't register the drm device until late in
* msm_drm_init, drm_dev->registered is used as an indicator that the
* shutdown will be successful.
*/
if (drm && drm->registered && priv->kms)
drm_atomic_helper_shutdown(drm);
}
static struct platform_driver msm_platform_driver = {
.probe = msm_pdev_probe,
.remove = msm_pdev_remove,
.shutdown = msm_drv_shutdown,
.driver = {
.name = "msm",
.pm = &msm_pm_ops,
},
};
static int __init msm_drm_register(void)
{
if (!modeset)
return -EINVAL;
DBG("init");
msm_mdp_register();
msm_dpu_register();
msm_dsi_register();
msm_hdmi_register();
msm_dp_register();
adreno_register();
msm_mdp4_register();
msm_mdss_register();
return platform_driver_register(&msm_platform_driver);
}
static void __exit msm_drm_unregister(void)
{
DBG("fini");
platform_driver_unregister(&msm_platform_driver);
msm_mdss_unregister();
msm_mdp4_unregister();
msm_dp_unregister();
msm_hdmi_unregister();
adreno_unregister();
msm_dsi_unregister();
msm_mdp_unregister();
msm_dpu_unregister();
}
module_init(msm_drm_register);
module_exit(msm_drm_unregister);
MODULE_AUTHOR("Rob Clark <[email protected]");
MODULE_DESCRIPTION("MSM DRM Driver");
MODULE_LICENSE("GPL");
| linux-master | drivers/gpu/drm/msm/msm_drv.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2013-2016 Red Hat
* Author: Rob Clark <[email protected]>
*/
#include <linux/dma-fence.h>
#include "msm_drv.h"
#include "msm_fence.h"
#include "msm_gpu.h"
static struct msm_gpu *fctx2gpu(struct msm_fence_context *fctx)
{
struct msm_drm_private *priv = fctx->dev->dev_private;
return priv->gpu;
}
static enum hrtimer_restart deadline_timer(struct hrtimer *t)
{
struct msm_fence_context *fctx = container_of(t,
struct msm_fence_context, deadline_timer);
kthread_queue_work(fctx2gpu(fctx)->worker, &fctx->deadline_work);
return HRTIMER_NORESTART;
}
static void deadline_work(struct kthread_work *work)
{
struct msm_fence_context *fctx = container_of(work,
struct msm_fence_context, deadline_work);
/* If deadline fence has already passed, nothing to do: */
if (msm_fence_completed(fctx, fctx->next_deadline_fence))
return;
msm_devfreq_boost(fctx2gpu(fctx), 2);
}
struct msm_fence_context *
msm_fence_context_alloc(struct drm_device *dev, volatile uint32_t *fenceptr,
const char *name)
{
struct msm_fence_context *fctx;
static int index = 0;
fctx = kzalloc(sizeof(*fctx), GFP_KERNEL);
if (!fctx)
return ERR_PTR(-ENOMEM);
fctx->dev = dev;
strscpy(fctx->name, name, sizeof(fctx->name));
fctx->context = dma_fence_context_alloc(1);
fctx->index = index++;
fctx->fenceptr = fenceptr;
spin_lock_init(&fctx->spinlock);
/*
* Start out close to the 32b fence rollover point, so we can
* catch bugs with fence comparisons.
*/
fctx->last_fence = 0xffffff00;
fctx->completed_fence = fctx->last_fence;
*fctx->fenceptr = fctx->last_fence;
hrtimer_init(&fctx->deadline_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
fctx->deadline_timer.function = deadline_timer;
kthread_init_work(&fctx->deadline_work, deadline_work);
fctx->next_deadline = ktime_get();
return fctx;
}
void msm_fence_context_free(struct msm_fence_context *fctx)
{
kfree(fctx);
}
bool msm_fence_completed(struct msm_fence_context *fctx, uint32_t fence)
{
/*
* Note: Check completed_fence first, as fenceptr is in a write-combine
* mapping, so it will be more expensive to read.
*/
return (int32_t)(fctx->completed_fence - fence) >= 0 ||
(int32_t)(*fctx->fenceptr - fence) >= 0;
}
/* called from irq handler and workqueue (in recover path) */
void msm_update_fence(struct msm_fence_context *fctx, uint32_t fence)
{
unsigned long flags;
spin_lock_irqsave(&fctx->spinlock, flags);
if (fence_after(fence, fctx->completed_fence))
fctx->completed_fence = fence;
if (msm_fence_completed(fctx, fctx->next_deadline_fence))
hrtimer_cancel(&fctx->deadline_timer);
spin_unlock_irqrestore(&fctx->spinlock, flags);
}
struct msm_fence {
struct dma_fence base;
struct msm_fence_context *fctx;
};
static inline struct msm_fence *to_msm_fence(struct dma_fence *fence)
{
return container_of(fence, struct msm_fence, base);
}
static const char *msm_fence_get_driver_name(struct dma_fence *fence)
{
return "msm";
}
static const char *msm_fence_get_timeline_name(struct dma_fence *fence)
{
struct msm_fence *f = to_msm_fence(fence);
return f->fctx->name;
}
static bool msm_fence_signaled(struct dma_fence *fence)
{
struct msm_fence *f = to_msm_fence(fence);
return msm_fence_completed(f->fctx, f->base.seqno);
}
static void msm_fence_set_deadline(struct dma_fence *fence, ktime_t deadline)
{
struct msm_fence *f = to_msm_fence(fence);
struct msm_fence_context *fctx = f->fctx;
unsigned long flags;
ktime_t now;
spin_lock_irqsave(&fctx->spinlock, flags);
now = ktime_get();
if (ktime_after(now, fctx->next_deadline) ||
ktime_before(deadline, fctx->next_deadline)) {
fctx->next_deadline = deadline;
fctx->next_deadline_fence =
max(fctx->next_deadline_fence, (uint32_t)fence->seqno);
/*
* Set timer to trigger boost 3ms before deadline, or
* if we are already less than 3ms before the deadline
* schedule boost work immediately.
*/
deadline = ktime_sub(deadline, ms_to_ktime(3));
if (ktime_after(now, deadline)) {
kthread_queue_work(fctx2gpu(fctx)->worker,
&fctx->deadline_work);
} else {
hrtimer_start(&fctx->deadline_timer, deadline,
HRTIMER_MODE_ABS);
}
}
spin_unlock_irqrestore(&fctx->spinlock, flags);
}
static const struct dma_fence_ops msm_fence_ops = {
.get_driver_name = msm_fence_get_driver_name,
.get_timeline_name = msm_fence_get_timeline_name,
.signaled = msm_fence_signaled,
.set_deadline = msm_fence_set_deadline,
};
struct dma_fence *
msm_fence_alloc(void)
{
struct msm_fence *f;
f = kzalloc(sizeof(*f), GFP_KERNEL);
if (!f)
return ERR_PTR(-ENOMEM);
return &f->base;
}
void
msm_fence_init(struct dma_fence *fence, struct msm_fence_context *fctx)
{
struct msm_fence *f = to_msm_fence(fence);
f->fctx = fctx;
/*
* Until this point, the fence was just some pre-allocated memory,
* no-one should have taken a reference to it yet.
*/
WARN_ON(kref_read(&fence->refcount));
dma_fence_init(&f->base, &msm_fence_ops, &fctx->spinlock,
fctx->context, ++fctx->last_fence);
}
| linux-master | drivers/gpu/drm/msm/msm_fence.c |
// SPDX-License-Identifier: GPL-2.0-only
/* Copyright (c) 2017 The Linux Foundation. All rights reserved.
*/
#include <linux/kref.h>
#include <linux/uaccess.h>
#include "msm_gpu.h"
int msm_file_private_set_sysprof(struct msm_file_private *ctx,
struct msm_gpu *gpu, int sysprof)
{
/*
* Since pm_runtime and sysprof_active are both refcounts, we
* call apply the new value first, and then unwind the previous
* value
*/
switch (sysprof) {
default:
return -EINVAL;
case 2:
pm_runtime_get_sync(&gpu->pdev->dev);
fallthrough;
case 1:
refcount_inc(&gpu->sysprof_active);
fallthrough;
case 0:
break;
}
/* unwind old value: */
switch (ctx->sysprof) {
case 2:
pm_runtime_put_autosuspend(&gpu->pdev->dev);
fallthrough;
case 1:
refcount_dec(&gpu->sysprof_active);
fallthrough;
case 0:
break;
}
ctx->sysprof = sysprof;
return 0;
}
void __msm_file_private_destroy(struct kref *kref)
{
struct msm_file_private *ctx = container_of(kref,
struct msm_file_private, ref);
int i;
for (i = 0; i < ARRAY_SIZE(ctx->entities); i++) {
if (!ctx->entities[i])
continue;
drm_sched_entity_destroy(ctx->entities[i]);
kfree(ctx->entities[i]);
}
msm_gem_address_space_put(ctx->aspace);
kfree(ctx->comm);
kfree(ctx->cmdline);
kfree(ctx);
}
void msm_submitqueue_destroy(struct kref *kref)
{
struct msm_gpu_submitqueue *queue = container_of(kref,
struct msm_gpu_submitqueue, ref);
idr_destroy(&queue->fence_idr);
msm_file_private_put(queue->ctx);
kfree(queue);
}
struct msm_gpu_submitqueue *msm_submitqueue_get(struct msm_file_private *ctx,
u32 id)
{
struct msm_gpu_submitqueue *entry;
if (!ctx)
return NULL;
read_lock(&ctx->queuelock);
list_for_each_entry(entry, &ctx->submitqueues, node) {
if (entry->id == id) {
kref_get(&entry->ref);
read_unlock(&ctx->queuelock);
return entry;
}
}
read_unlock(&ctx->queuelock);
return NULL;
}
void msm_submitqueue_close(struct msm_file_private *ctx)
{
struct msm_gpu_submitqueue *entry, *tmp;
if (!ctx)
return;
/*
* No lock needed in close and there won't
* be any more user ioctls coming our way
*/
list_for_each_entry_safe(entry, tmp, &ctx->submitqueues, node) {
list_del(&entry->node);
msm_submitqueue_put(entry);
}
}
static struct drm_sched_entity *
get_sched_entity(struct msm_file_private *ctx, struct msm_ringbuffer *ring,
unsigned ring_nr, enum drm_sched_priority sched_prio)
{
static DEFINE_MUTEX(entity_lock);
unsigned idx = (ring_nr * NR_SCHED_PRIORITIES) + sched_prio;
/* We should have already validated that the requested priority is
* valid by the time we get here.
*/
if (WARN_ON(idx >= ARRAY_SIZE(ctx->entities)))
return ERR_PTR(-EINVAL);
mutex_lock(&entity_lock);
if (!ctx->entities[idx]) {
struct drm_sched_entity *entity;
struct drm_gpu_scheduler *sched = &ring->sched;
int ret;
entity = kzalloc(sizeof(*ctx->entities[idx]), GFP_KERNEL);
ret = drm_sched_entity_init(entity, sched_prio, &sched, 1, NULL);
if (ret) {
mutex_unlock(&entity_lock);
kfree(entity);
return ERR_PTR(ret);
}
ctx->entities[idx] = entity;
}
mutex_unlock(&entity_lock);
return ctx->entities[idx];
}
int msm_submitqueue_create(struct drm_device *drm, struct msm_file_private *ctx,
u32 prio, u32 flags, u32 *id)
{
struct msm_drm_private *priv = drm->dev_private;
struct msm_gpu_submitqueue *queue;
enum drm_sched_priority sched_prio;
unsigned ring_nr;
int ret;
if (!ctx)
return -ENODEV;
if (!priv->gpu)
return -ENODEV;
ret = msm_gpu_convert_priority(priv->gpu, prio, &ring_nr, &sched_prio);
if (ret)
return ret;
queue = kzalloc(sizeof(*queue), GFP_KERNEL);
if (!queue)
return -ENOMEM;
kref_init(&queue->ref);
queue->flags = flags;
queue->ring_nr = ring_nr;
queue->entity = get_sched_entity(ctx, priv->gpu->rb[ring_nr],
ring_nr, sched_prio);
if (IS_ERR(queue->entity)) {
ret = PTR_ERR(queue->entity);
kfree(queue);
return ret;
}
write_lock(&ctx->queuelock);
queue->ctx = msm_file_private_get(ctx);
queue->id = ctx->queueid++;
if (id)
*id = queue->id;
idr_init(&queue->fence_idr);
spin_lock_init(&queue->idr_lock);
mutex_init(&queue->lock);
list_add_tail(&queue->node, &ctx->submitqueues);
write_unlock(&ctx->queuelock);
return 0;
}
/*
* Create the default submit-queue (id==0), used for backwards compatibility
* for userspace that pre-dates the introduction of submitqueues.
*/
int msm_submitqueue_init(struct drm_device *drm, struct msm_file_private *ctx)
{
struct msm_drm_private *priv = drm->dev_private;
int default_prio, max_priority;
if (!priv->gpu)
return -ENODEV;
max_priority = (priv->gpu->nr_rings * NR_SCHED_PRIORITIES) - 1;
/*
* Pick a medium priority level as default. Lower numeric value is
* higher priority, so round-up to pick a priority that is not higher
* than the middle priority level.
*/
default_prio = DIV_ROUND_UP(max_priority, 2);
return msm_submitqueue_create(drm, ctx, default_prio, 0, NULL);
}
static int msm_submitqueue_query_faults(struct msm_gpu_submitqueue *queue,
struct drm_msm_submitqueue_query *args)
{
size_t size = min_t(size_t, args->len, sizeof(queue->faults));
int ret;
/* If a zero length was passed in, return the data size we expect */
if (!args->len) {
args->len = sizeof(queue->faults);
return 0;
}
/* Set the length to the actual size of the data */
args->len = size;
ret = copy_to_user(u64_to_user_ptr(args->data), &queue->faults, size);
return ret ? -EFAULT : 0;
}
int msm_submitqueue_query(struct drm_device *drm, struct msm_file_private *ctx,
struct drm_msm_submitqueue_query *args)
{
struct msm_gpu_submitqueue *queue;
int ret = -EINVAL;
if (args->pad)
return -EINVAL;
queue = msm_submitqueue_get(ctx, args->id);
if (!queue)
return -ENOENT;
if (args->param == MSM_SUBMITQUEUE_PARAM_FAULTS)
ret = msm_submitqueue_query_faults(queue, args);
msm_submitqueue_put(queue);
return ret;
}
int msm_submitqueue_remove(struct msm_file_private *ctx, u32 id)
{
struct msm_gpu_submitqueue *entry;
if (!ctx)
return 0;
/*
* id 0 is the "default" queue and can't be destroyed
* by the user
*/
if (!id)
return -ENOENT;
write_lock(&ctx->queuelock);
list_for_each_entry(entry, &ctx->submitqueues, node) {
if (entry->id == id) {
list_del(&entry->node);
write_unlock(&ctx->queuelock);
msm_submitqueue_put(entry);
return 0;
}
}
write_unlock(&ctx->queuelock);
return -ENOENT;
}
| linux-master | drivers/gpu/drm/msm/msm_submitqueue.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2013 Red Hat
* Author: Rob Clark <[email protected]>
*/
#include <linux/file.h>
#include <linux/sync_file.h>
#include <linux/uaccess.h>
#include <drm/drm_drv.h>
#include <drm/drm_file.h>
#include <drm/drm_syncobj.h>
#include "msm_drv.h"
#include "msm_gpu.h"
#include "msm_gem.h"
#include "msm_gpu_trace.h"
/*
* Cmdstream submission:
*/
static struct msm_gem_submit *submit_create(struct drm_device *dev,
struct msm_gpu *gpu,
struct msm_gpu_submitqueue *queue, uint32_t nr_bos,
uint32_t nr_cmds)
{
static atomic_t ident = ATOMIC_INIT(0);
struct msm_gem_submit *submit;
uint64_t sz;
int ret;
sz = struct_size(submit, bos, nr_bos) +
((u64)nr_cmds * sizeof(submit->cmd[0]));
if (sz > SIZE_MAX)
return ERR_PTR(-ENOMEM);
submit = kzalloc(sz, GFP_KERNEL);
if (!submit)
return ERR_PTR(-ENOMEM);
submit->hw_fence = msm_fence_alloc();
if (IS_ERR(submit->hw_fence)) {
ret = PTR_ERR(submit->hw_fence);
kfree(submit);
return ERR_PTR(ret);
}
ret = drm_sched_job_init(&submit->base, queue->entity, queue);
if (ret) {
kfree(submit->hw_fence);
kfree(submit);
return ERR_PTR(ret);
}
kref_init(&submit->ref);
submit->dev = dev;
submit->aspace = queue->ctx->aspace;
submit->gpu = gpu;
submit->cmd = (void *)&submit->bos[nr_bos];
submit->queue = queue;
submit->pid = get_pid(task_pid(current));
submit->ring = gpu->rb[queue->ring_nr];
submit->fault_dumped = false;
/* Get a unique identifier for the submission for logging purposes */
submit->ident = atomic_inc_return(&ident) - 1;
INIT_LIST_HEAD(&submit->node);
return submit;
}
void __msm_gem_submit_destroy(struct kref *kref)
{
struct msm_gem_submit *submit =
container_of(kref, struct msm_gem_submit, ref);
unsigned i;
if (submit->fence_id) {
spin_lock(&submit->queue->idr_lock);
idr_remove(&submit->queue->fence_idr, submit->fence_id);
spin_unlock(&submit->queue->idr_lock);
}
dma_fence_put(submit->user_fence);
/*
* If the submit is freed before msm_job_run(), then hw_fence is
* just some pre-allocated memory, not a reference counted fence.
* Once the job runs and the hw_fence is initialized, it will
* have a refcount of at least one, since the submit holds a ref
* to the hw_fence.
*/
if (kref_read(&submit->hw_fence->refcount) == 0) {
kfree(submit->hw_fence);
} else {
dma_fence_put(submit->hw_fence);
}
put_pid(submit->pid);
msm_submitqueue_put(submit->queue);
for (i = 0; i < submit->nr_cmds; i++)
kfree(submit->cmd[i].relocs);
kfree(submit);
}
static int submit_lookup_objects(struct msm_gem_submit *submit,
struct drm_msm_gem_submit *args, struct drm_file *file)
{
unsigned i;
int ret = 0;
for (i = 0; i < args->nr_bos; i++) {
struct drm_msm_gem_submit_bo submit_bo;
void __user *userptr =
u64_to_user_ptr(args->bos + (i * sizeof(submit_bo)));
/* make sure we don't have garbage flags, in case we hit
* error path before flags is initialized:
*/
submit->bos[i].flags = 0;
if (copy_from_user(&submit_bo, userptr, sizeof(submit_bo))) {
ret = -EFAULT;
i = 0;
goto out;
}
/* at least one of READ and/or WRITE flags should be set: */
#define MANDATORY_FLAGS (MSM_SUBMIT_BO_READ | MSM_SUBMIT_BO_WRITE)
if ((submit_bo.flags & ~MSM_SUBMIT_BO_FLAGS) ||
!(submit_bo.flags & MANDATORY_FLAGS)) {
DRM_ERROR("invalid flags: %x\n", submit_bo.flags);
ret = -EINVAL;
i = 0;
goto out;
}
submit->bos[i].handle = submit_bo.handle;
submit->bos[i].flags = submit_bo.flags;
/* in validate_objects() we figure out if this is true: */
submit->bos[i].iova = submit_bo.presumed;
}
spin_lock(&file->table_lock);
for (i = 0; i < args->nr_bos; i++) {
struct drm_gem_object *obj;
/* normally use drm_gem_object_lookup(), but for bulk lookup
* all under single table_lock just hit object_idr directly:
*/
obj = idr_find(&file->object_idr, submit->bos[i].handle);
if (!obj) {
DRM_ERROR("invalid handle %u at index %u\n", submit->bos[i].handle, i);
ret = -EINVAL;
goto out_unlock;
}
drm_gem_object_get(obj);
submit->bos[i].obj = obj;
}
out_unlock:
spin_unlock(&file->table_lock);
out:
submit->nr_bos = i;
return ret;
}
static int submit_lookup_cmds(struct msm_gem_submit *submit,
struct drm_msm_gem_submit *args, struct drm_file *file)
{
unsigned i;
size_t sz;
int ret = 0;
for (i = 0; i < args->nr_cmds; i++) {
struct drm_msm_gem_submit_cmd submit_cmd;
void __user *userptr =
u64_to_user_ptr(args->cmds + (i * sizeof(submit_cmd)));
ret = copy_from_user(&submit_cmd, userptr, sizeof(submit_cmd));
if (ret) {
ret = -EFAULT;
goto out;
}
/* validate input from userspace: */
switch (submit_cmd.type) {
case MSM_SUBMIT_CMD_BUF:
case MSM_SUBMIT_CMD_IB_TARGET_BUF:
case MSM_SUBMIT_CMD_CTX_RESTORE_BUF:
break;
default:
DRM_ERROR("invalid type: %08x\n", submit_cmd.type);
return -EINVAL;
}
if (submit_cmd.size % 4) {
DRM_ERROR("non-aligned cmdstream buffer size: %u\n",
submit_cmd.size);
ret = -EINVAL;
goto out;
}
submit->cmd[i].type = submit_cmd.type;
submit->cmd[i].size = submit_cmd.size / 4;
submit->cmd[i].offset = submit_cmd.submit_offset / 4;
submit->cmd[i].idx = submit_cmd.submit_idx;
submit->cmd[i].nr_relocs = submit_cmd.nr_relocs;
userptr = u64_to_user_ptr(submit_cmd.relocs);
sz = array_size(submit_cmd.nr_relocs,
sizeof(struct drm_msm_gem_submit_reloc));
/* check for overflow: */
if (sz == SIZE_MAX) {
ret = -ENOMEM;
goto out;
}
submit->cmd[i].relocs = kmalloc(sz, GFP_KERNEL);
if (!submit->cmd[i].relocs) {
ret = -ENOMEM;
goto out;
}
ret = copy_from_user(submit->cmd[i].relocs, userptr, sz);
if (ret) {
ret = -EFAULT;
goto out;
}
}
out:
return ret;
}
/* Unwind bo state, according to cleanup_flags. In the success case, only
* the lock is dropped at the end of the submit (and active/pin ref is dropped
* later when the submit is retired).
*/
static void submit_cleanup_bo(struct msm_gem_submit *submit, int i,
unsigned cleanup_flags)
{
struct drm_gem_object *obj = submit->bos[i].obj;
unsigned flags = submit->bos[i].flags & cleanup_flags;
/*
* Clear flags bit before dropping lock, so that the msm_job_run()
* path isn't racing with submit_cleanup() (ie. the read/modify/
* write is protected by the obj lock in all paths)
*/
submit->bos[i].flags &= ~cleanup_flags;
if (flags & BO_PINNED)
msm_gem_unpin_locked(obj);
if (flags & BO_LOCKED)
dma_resv_unlock(obj->resv);
}
static void submit_unlock_unpin_bo(struct msm_gem_submit *submit, int i)
{
unsigned cleanup_flags = BO_PINNED | BO_LOCKED;
submit_cleanup_bo(submit, i, cleanup_flags);
if (!(submit->bos[i].flags & BO_VALID))
submit->bos[i].iova = 0;
}
/* This is where we make sure all the bo's are reserved and pin'd: */
static int submit_lock_objects(struct msm_gem_submit *submit)
{
int contended, slow_locked = -1, i, ret = 0;
retry:
for (i = 0; i < submit->nr_bos; i++) {
struct drm_gem_object *obj = submit->bos[i].obj;
if (slow_locked == i)
slow_locked = -1;
contended = i;
if (!(submit->bos[i].flags & BO_LOCKED)) {
ret = dma_resv_lock_interruptible(obj->resv,
&submit->ticket);
if (ret)
goto fail;
submit->bos[i].flags |= BO_LOCKED;
}
}
ww_acquire_done(&submit->ticket);
return 0;
fail:
if (ret == -EALREADY) {
DRM_ERROR("handle %u at index %u already on submit list\n",
submit->bos[i].handle, i);
ret = -EINVAL;
}
for (; i >= 0; i--)
submit_unlock_unpin_bo(submit, i);
if (slow_locked > 0)
submit_unlock_unpin_bo(submit, slow_locked);
if (ret == -EDEADLK) {
struct drm_gem_object *obj = submit->bos[contended].obj;
/* we lost out in a seqno race, lock and retry.. */
ret = dma_resv_lock_slow_interruptible(obj->resv,
&submit->ticket);
if (!ret) {
submit->bos[contended].flags |= BO_LOCKED;
slow_locked = contended;
goto retry;
}
/* Not expecting -EALREADY here, if the bo was already
* locked, we should have gotten -EALREADY already from
* the dma_resv_lock_interruptable() call.
*/
WARN_ON_ONCE(ret == -EALREADY);
}
return ret;
}
static int submit_fence_sync(struct msm_gem_submit *submit, bool no_implicit)
{
int i, ret = 0;
for (i = 0; i < submit->nr_bos; i++) {
struct drm_gem_object *obj = submit->bos[i].obj;
bool write = submit->bos[i].flags & MSM_SUBMIT_BO_WRITE;
/* NOTE: _reserve_shared() must happen before
* _add_shared_fence(), which makes this a slightly
* strange place to call it. OTOH this is a
* convenient can-fail point to hook it in.
*/
ret = dma_resv_reserve_fences(obj->resv, 1);
if (ret)
return ret;
/* If userspace has determined that explicit fencing is
* used, it can disable implicit sync on the entire
* submit:
*/
if (no_implicit)
continue;
/* Otherwise userspace can ask for implicit sync to be
* disabled on specific buffers. This is useful for internal
* usermode driver managed buffers, suballocation, etc.
*/
if (submit->bos[i].flags & MSM_SUBMIT_BO_NO_IMPLICIT)
continue;
ret = drm_sched_job_add_implicit_dependencies(&submit->base,
obj,
write);
if (ret)
break;
}
return ret;
}
static int submit_pin_objects(struct msm_gem_submit *submit)
{
struct msm_drm_private *priv = submit->dev->dev_private;
int i, ret = 0;
submit->valid = true;
for (i = 0; i < submit->nr_bos; i++) {
struct drm_gem_object *obj = submit->bos[i].obj;
struct msm_gem_vma *vma;
/* if locking succeeded, pin bo: */
vma = msm_gem_get_vma_locked(obj, submit->aspace);
if (IS_ERR(vma)) {
ret = PTR_ERR(vma);
break;
}
ret = msm_gem_pin_vma_locked(obj, vma);
if (ret)
break;
if (vma->iova == submit->bos[i].iova) {
submit->bos[i].flags |= BO_VALID;
} else {
submit->bos[i].iova = vma->iova;
/* iova changed, so address in cmdstream is not valid: */
submit->bos[i].flags &= ~BO_VALID;
submit->valid = false;
}
}
/*
* A second loop while holding the LRU lock (a) avoids acquiring/dropping
* the LRU lock for each individual bo, while (b) avoiding holding the
* LRU lock while calling msm_gem_pin_vma_locked() (which could trigger
* get_pages() which could trigger reclaim.. and if we held the LRU lock
* could trigger deadlock with the shrinker).
*/
mutex_lock(&priv->lru.lock);
for (i = 0; i < submit->nr_bos; i++) {
msm_gem_pin_obj_locked(submit->bos[i].obj);
submit->bos[i].flags |= BO_PINNED;
}
mutex_unlock(&priv->lru.lock);
return ret;
}
static void submit_attach_object_fences(struct msm_gem_submit *submit)
{
int i;
for (i = 0; i < submit->nr_bos; i++) {
struct drm_gem_object *obj = submit->bos[i].obj;
if (submit->bos[i].flags & MSM_SUBMIT_BO_WRITE)
dma_resv_add_fence(obj->resv, submit->user_fence,
DMA_RESV_USAGE_WRITE);
else if (submit->bos[i].flags & MSM_SUBMIT_BO_READ)
dma_resv_add_fence(obj->resv, submit->user_fence,
DMA_RESV_USAGE_READ);
}
}
static int submit_bo(struct msm_gem_submit *submit, uint32_t idx,
struct drm_gem_object **obj, uint64_t *iova, bool *valid)
{
if (idx >= submit->nr_bos) {
DRM_ERROR("invalid buffer index: %u (out of %u)\n",
idx, submit->nr_bos);
return -EINVAL;
}
if (obj)
*obj = submit->bos[idx].obj;
if (iova)
*iova = submit->bos[idx].iova;
if (valid)
*valid = !!(submit->bos[idx].flags & BO_VALID);
return 0;
}
/* process the reloc's and patch up the cmdstream as needed: */
static int submit_reloc(struct msm_gem_submit *submit, struct drm_gem_object *obj,
uint32_t offset, uint32_t nr_relocs, struct drm_msm_gem_submit_reloc *relocs)
{
uint32_t i, last_offset = 0;
uint32_t *ptr;
int ret = 0;
if (!nr_relocs)
return 0;
if (offset % 4) {
DRM_ERROR("non-aligned cmdstream buffer: %u\n", offset);
return -EINVAL;
}
/* For now, just map the entire thing. Eventually we probably
* to do it page-by-page, w/ kmap() if not vmap()d..
*/
ptr = msm_gem_get_vaddr_locked(obj);
if (IS_ERR(ptr)) {
ret = PTR_ERR(ptr);
DBG("failed to map: %d", ret);
return ret;
}
for (i = 0; i < nr_relocs; i++) {
struct drm_msm_gem_submit_reloc submit_reloc = relocs[i];
uint32_t off;
uint64_t iova;
bool valid;
if (submit_reloc.submit_offset % 4) {
DRM_ERROR("non-aligned reloc offset: %u\n",
submit_reloc.submit_offset);
ret = -EINVAL;
goto out;
}
/* offset in dwords: */
off = submit_reloc.submit_offset / 4;
if ((off >= (obj->size / 4)) ||
(off < last_offset)) {
DRM_ERROR("invalid offset %u at reloc %u\n", off, i);
ret = -EINVAL;
goto out;
}
ret = submit_bo(submit, submit_reloc.reloc_idx, NULL, &iova, &valid);
if (ret)
goto out;
if (valid)
continue;
iova += submit_reloc.reloc_offset;
if (submit_reloc.shift < 0)
iova >>= -submit_reloc.shift;
else
iova <<= submit_reloc.shift;
ptr[off] = iova | submit_reloc.or;
last_offset = off;
}
out:
msm_gem_put_vaddr_locked(obj);
return ret;
}
/* Cleanup submit at end of ioctl. In the error case, this also drops
* references, unpins, and drops active refcnt. In the non-error case,
* this is done when the submit is retired.
*/
static void submit_cleanup(struct msm_gem_submit *submit, bool error)
{
unsigned cleanup_flags = BO_LOCKED;
unsigned i;
if (error)
cleanup_flags |= BO_PINNED;
for (i = 0; i < submit->nr_bos; i++) {
struct drm_gem_object *obj = submit->bos[i].obj;
submit_cleanup_bo(submit, i, cleanup_flags);
if (error)
drm_gem_object_put(obj);
}
}
void msm_submit_retire(struct msm_gem_submit *submit)
{
int i;
for (i = 0; i < submit->nr_bos; i++) {
struct drm_gem_object *obj = submit->bos[i].obj;
drm_gem_object_put(obj);
}
}
struct msm_submit_post_dep {
struct drm_syncobj *syncobj;
uint64_t point;
struct dma_fence_chain *chain;
};
static struct drm_syncobj **msm_parse_deps(struct msm_gem_submit *submit,
struct drm_file *file,
uint64_t in_syncobjs_addr,
uint32_t nr_in_syncobjs,
size_t syncobj_stride)
{
struct drm_syncobj **syncobjs = NULL;
struct drm_msm_gem_submit_syncobj syncobj_desc = {0};
int ret = 0;
uint32_t i, j;
syncobjs = kcalloc(nr_in_syncobjs, sizeof(*syncobjs),
GFP_KERNEL | __GFP_NOWARN | __GFP_NORETRY);
if (!syncobjs)
return ERR_PTR(-ENOMEM);
for (i = 0; i < nr_in_syncobjs; ++i) {
uint64_t address = in_syncobjs_addr + i * syncobj_stride;
if (copy_from_user(&syncobj_desc,
u64_to_user_ptr(address),
min(syncobj_stride, sizeof(syncobj_desc)))) {
ret = -EFAULT;
break;
}
if (syncobj_desc.point &&
!drm_core_check_feature(submit->dev, DRIVER_SYNCOBJ_TIMELINE)) {
ret = -EOPNOTSUPP;
break;
}
if (syncobj_desc.flags & ~MSM_SUBMIT_SYNCOBJ_FLAGS) {
ret = -EINVAL;
break;
}
ret = drm_sched_job_add_syncobj_dependency(&submit->base, file,
syncobj_desc.handle, syncobj_desc.point);
if (ret)
break;
if (syncobj_desc.flags & MSM_SUBMIT_SYNCOBJ_RESET) {
syncobjs[i] =
drm_syncobj_find(file, syncobj_desc.handle);
if (!syncobjs[i]) {
ret = -EINVAL;
break;
}
}
}
if (ret) {
for (j = 0; j <= i; ++j) {
if (syncobjs[j])
drm_syncobj_put(syncobjs[j]);
}
kfree(syncobjs);
return ERR_PTR(ret);
}
return syncobjs;
}
static void msm_reset_syncobjs(struct drm_syncobj **syncobjs,
uint32_t nr_syncobjs)
{
uint32_t i;
for (i = 0; syncobjs && i < nr_syncobjs; ++i) {
if (syncobjs[i])
drm_syncobj_replace_fence(syncobjs[i], NULL);
}
}
static struct msm_submit_post_dep *msm_parse_post_deps(struct drm_device *dev,
struct drm_file *file,
uint64_t syncobjs_addr,
uint32_t nr_syncobjs,
size_t syncobj_stride)
{
struct msm_submit_post_dep *post_deps;
struct drm_msm_gem_submit_syncobj syncobj_desc = {0};
int ret = 0;
uint32_t i, j;
post_deps = kcalloc(nr_syncobjs, sizeof(*post_deps),
GFP_KERNEL | __GFP_NOWARN | __GFP_NORETRY);
if (!post_deps)
return ERR_PTR(-ENOMEM);
for (i = 0; i < nr_syncobjs; ++i) {
uint64_t address = syncobjs_addr + i * syncobj_stride;
if (copy_from_user(&syncobj_desc,
u64_to_user_ptr(address),
min(syncobj_stride, sizeof(syncobj_desc)))) {
ret = -EFAULT;
break;
}
post_deps[i].point = syncobj_desc.point;
if (syncobj_desc.flags) {
ret = -EINVAL;
break;
}
if (syncobj_desc.point) {
if (!drm_core_check_feature(dev,
DRIVER_SYNCOBJ_TIMELINE)) {
ret = -EOPNOTSUPP;
break;
}
post_deps[i].chain = dma_fence_chain_alloc();
if (!post_deps[i].chain) {
ret = -ENOMEM;
break;
}
}
post_deps[i].syncobj =
drm_syncobj_find(file, syncobj_desc.handle);
if (!post_deps[i].syncobj) {
ret = -EINVAL;
break;
}
}
if (ret) {
for (j = 0; j <= i; ++j) {
dma_fence_chain_free(post_deps[j].chain);
if (post_deps[j].syncobj)
drm_syncobj_put(post_deps[j].syncobj);
}
kfree(post_deps);
return ERR_PTR(ret);
}
return post_deps;
}
static void msm_process_post_deps(struct msm_submit_post_dep *post_deps,
uint32_t count, struct dma_fence *fence)
{
uint32_t i;
for (i = 0; post_deps && i < count; ++i) {
if (post_deps[i].chain) {
drm_syncobj_add_point(post_deps[i].syncobj,
post_deps[i].chain,
fence, post_deps[i].point);
post_deps[i].chain = NULL;
} else {
drm_syncobj_replace_fence(post_deps[i].syncobj,
fence);
}
}
}
int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
struct drm_file *file)
{
struct msm_drm_private *priv = dev->dev_private;
struct drm_msm_gem_submit *args = data;
struct msm_file_private *ctx = file->driver_priv;
struct msm_gem_submit *submit = NULL;
struct msm_gpu *gpu = priv->gpu;
struct msm_gpu_submitqueue *queue;
struct msm_ringbuffer *ring;
struct msm_submit_post_dep *post_deps = NULL;
struct drm_syncobj **syncobjs_to_reset = NULL;
int out_fence_fd = -1;
bool has_ww_ticket = false;
unsigned i;
int ret;
if (!gpu)
return -ENXIO;
if (args->pad)
return -EINVAL;
if (unlikely(!ctx->aspace) && !capable(CAP_SYS_RAWIO)) {
DRM_ERROR_RATELIMITED("IOMMU support or CAP_SYS_RAWIO required!\n");
return -EPERM;
}
/* for now, we just have 3d pipe.. eventually this would need to
* be more clever to dispatch to appropriate gpu module:
*/
if (MSM_PIPE_ID(args->flags) != MSM_PIPE_3D0)
return -EINVAL;
if (MSM_PIPE_FLAGS(args->flags) & ~MSM_SUBMIT_FLAGS)
return -EINVAL;
if (args->flags & MSM_SUBMIT_SUDO) {
if (!IS_ENABLED(CONFIG_DRM_MSM_GPU_SUDO) ||
!capable(CAP_SYS_RAWIO))
return -EINVAL;
}
queue = msm_submitqueue_get(ctx, args->queueid);
if (!queue)
return -ENOENT;
ring = gpu->rb[queue->ring_nr];
if (args->flags & MSM_SUBMIT_FENCE_FD_OUT) {
out_fence_fd = get_unused_fd_flags(O_CLOEXEC);
if (out_fence_fd < 0) {
ret = out_fence_fd;
goto out_post_unlock;
}
}
submit = submit_create(dev, gpu, queue, args->nr_bos, args->nr_cmds);
if (IS_ERR(submit)) {
ret = PTR_ERR(submit);
goto out_post_unlock;
}
trace_msm_gpu_submit(pid_nr(submit->pid), ring->id, submit->ident,
args->nr_bos, args->nr_cmds);
ret = mutex_lock_interruptible(&queue->lock);
if (ret)
goto out_post_unlock;
if (args->flags & MSM_SUBMIT_SUDO)
submit->in_rb = true;
if (args->flags & MSM_SUBMIT_FENCE_FD_IN) {
struct dma_fence *in_fence;
in_fence = sync_file_get_fence(args->fence_fd);
if (!in_fence) {
ret = -EINVAL;
goto out_unlock;
}
ret = drm_sched_job_add_dependency(&submit->base, in_fence);
if (ret)
goto out_unlock;
}
if (args->flags & MSM_SUBMIT_SYNCOBJ_IN) {
syncobjs_to_reset = msm_parse_deps(submit, file,
args->in_syncobjs,
args->nr_in_syncobjs,
args->syncobj_stride);
if (IS_ERR(syncobjs_to_reset)) {
ret = PTR_ERR(syncobjs_to_reset);
goto out_unlock;
}
}
if (args->flags & MSM_SUBMIT_SYNCOBJ_OUT) {
post_deps = msm_parse_post_deps(dev, file,
args->out_syncobjs,
args->nr_out_syncobjs,
args->syncobj_stride);
if (IS_ERR(post_deps)) {
ret = PTR_ERR(post_deps);
goto out_unlock;
}
}
ret = submit_lookup_objects(submit, args, file);
if (ret)
goto out;
ret = submit_lookup_cmds(submit, args, file);
if (ret)
goto out;
/* copy_*_user while holding a ww ticket upsets lockdep */
ww_acquire_init(&submit->ticket, &reservation_ww_class);
has_ww_ticket = true;
ret = submit_lock_objects(submit);
if (ret)
goto out;
ret = submit_fence_sync(submit, !!(args->flags & MSM_SUBMIT_NO_IMPLICIT));
if (ret)
goto out;
ret = submit_pin_objects(submit);
if (ret)
goto out;
for (i = 0; i < args->nr_cmds; i++) {
struct drm_gem_object *obj;
uint64_t iova;
ret = submit_bo(submit, submit->cmd[i].idx,
&obj, &iova, NULL);
if (ret)
goto out;
if (!submit->cmd[i].size ||
((submit->cmd[i].size + submit->cmd[i].offset) >
obj->size / 4)) {
DRM_ERROR("invalid cmdstream size: %u\n", submit->cmd[i].size * 4);
ret = -EINVAL;
goto out;
}
submit->cmd[i].iova = iova + (submit->cmd[i].offset * 4);
if (submit->valid)
continue;
if (!gpu->allow_relocs) {
if (submit->cmd[i].nr_relocs) {
DRM_ERROR("relocs not allowed\n");
ret = -EINVAL;
goto out;
}
continue;
}
ret = submit_reloc(submit, obj, submit->cmd[i].offset * 4,
submit->cmd[i].nr_relocs, submit->cmd[i].relocs);
if (ret)
goto out;
}
submit->nr_cmds = i;
idr_preload(GFP_KERNEL);
spin_lock(&queue->idr_lock);
/*
* If using userspace provided seqno fence, validate that the id
* is available before arming sched job. Since access to fence_idr
* is serialized on the queue lock, the slot should be still avail
* after the job is armed
*/
if ((args->flags & MSM_SUBMIT_FENCE_SN_IN) &&
(!args->fence || idr_find(&queue->fence_idr, args->fence))) {
spin_unlock(&queue->idr_lock);
idr_preload_end();
ret = -EINVAL;
goto out;
}
drm_sched_job_arm(&submit->base);
submit->user_fence = dma_fence_get(&submit->base.s_fence->finished);
if (args->flags & MSM_SUBMIT_FENCE_SN_IN) {
/*
* Userspace has assigned the seqno fence that it wants
* us to use. It is an error to pick a fence sequence
* number that is not available.
*/
submit->fence_id = args->fence;
ret = idr_alloc_u32(&queue->fence_idr, submit->user_fence,
&submit->fence_id, submit->fence_id,
GFP_NOWAIT);
/*
* We've already validated that the fence_id slot is valid,
* so if idr_alloc_u32 failed, it is a kernel bug
*/
WARN_ON(ret);
} else {
/*
* Allocate an id which can be used by WAIT_FENCE ioctl to map
* back to the underlying fence.
*/
submit->fence_id = idr_alloc_cyclic(&queue->fence_idr,
submit->user_fence, 1,
INT_MAX, GFP_NOWAIT);
}
spin_unlock(&queue->idr_lock);
idr_preload_end();
if (submit->fence_id < 0) {
ret = submit->fence_id;
submit->fence_id = 0;
}
if (ret == 0 && args->flags & MSM_SUBMIT_FENCE_FD_OUT) {
struct sync_file *sync_file = sync_file_create(submit->user_fence);
if (!sync_file) {
ret = -ENOMEM;
} else {
fd_install(out_fence_fd, sync_file->file);
args->fence_fd = out_fence_fd;
}
}
submit_attach_object_fences(submit);
/* The scheduler owns a ref now: */
msm_gem_submit_get(submit);
msm_rd_dump_submit(priv->rd, submit, NULL);
drm_sched_entity_push_job(&submit->base);
args->fence = submit->fence_id;
queue->last_fence = submit->fence_id;
msm_reset_syncobjs(syncobjs_to_reset, args->nr_in_syncobjs);
msm_process_post_deps(post_deps, args->nr_out_syncobjs,
submit->user_fence);
out:
submit_cleanup(submit, !!ret);
if (has_ww_ticket)
ww_acquire_fini(&submit->ticket);
out_unlock:
mutex_unlock(&queue->lock);
out_post_unlock:
if (ret && (out_fence_fd >= 0))
put_unused_fd(out_fence_fd);
if (!IS_ERR_OR_NULL(submit)) {
msm_gem_submit_put(submit);
} else {
/*
* If the submit hasn't yet taken ownership of the queue
* then we need to drop the reference ourself:
*/
msm_submitqueue_put(queue);
}
if (!IS_ERR_OR_NULL(post_deps)) {
for (i = 0; i < args->nr_out_syncobjs; ++i) {
kfree(post_deps[i].chain);
drm_syncobj_put(post_deps[i].syncobj);
}
kfree(post_deps);
}
if (!IS_ERR_OR_NULL(syncobjs_to_reset)) {
for (i = 0; i < args->nr_in_syncobjs; ++i) {
if (syncobjs_to_reset[i])
drm_syncobj_put(syncobjs_to_reset[i]);
}
kfree(syncobjs_to_reset);
}
return ret;
}
| linux-master | drivers/gpu/drm/msm/msm_gem_submit.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2016-2018, 2020-2021 The Linux Foundation. All rights reserved.
* Copyright (C) 2013 Red Hat
* Author: Rob Clark <[email protected]>
*/
#include <linux/interconnect.h>
#include <linux/io.h>
#include "msm_drv.h"
/*
* Util/helpers:
*/
struct clk *msm_clk_bulk_get_clock(struct clk_bulk_data *bulk, int count,
const char *name)
{
int i;
char n[32];
snprintf(n, sizeof(n), "%s_clk", name);
for (i = 0; bulk && i < count; i++) {
if (!strcmp(bulk[i].id, name) || !strcmp(bulk[i].id, n))
return bulk[i].clk;
}
return NULL;
}
struct clk *msm_clk_get(struct platform_device *pdev, const char *name)
{
struct clk *clk;
char name2[32];
clk = devm_clk_get(&pdev->dev, name);
if (!IS_ERR(clk) || PTR_ERR(clk) == -EPROBE_DEFER)
return clk;
snprintf(name2, sizeof(name2), "%s_clk", name);
clk = devm_clk_get(&pdev->dev, name2);
if (!IS_ERR(clk))
dev_warn(&pdev->dev, "Using legacy clk name binding. Use "
"\"%s\" instead of \"%s\"\n", name, name2);
return clk;
}
static void __iomem *_msm_ioremap(struct platform_device *pdev, const char *name,
bool quiet, phys_addr_t *psize)
{
struct resource *res;
unsigned long size;
void __iomem *ptr;
if (name)
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, name);
else
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!res) {
if (!quiet)
DRM_DEV_ERROR(&pdev->dev, "failed to get memory resource: %s\n", name);
return ERR_PTR(-EINVAL);
}
size = resource_size(res);
ptr = devm_ioremap(&pdev->dev, res->start, size);
if (!ptr) {
if (!quiet)
DRM_DEV_ERROR(&pdev->dev, "failed to ioremap: %s\n", name);
return ERR_PTR(-ENOMEM);
}
if (psize)
*psize = size;
return ptr;
}
void __iomem *msm_ioremap(struct platform_device *pdev, const char *name)
{
return _msm_ioremap(pdev, name, false, NULL);
}
void __iomem *msm_ioremap_quiet(struct platform_device *pdev, const char *name)
{
return _msm_ioremap(pdev, name, true, NULL);
}
void __iomem *msm_ioremap_size(struct platform_device *pdev, const char *name,
phys_addr_t *psize)
{
return _msm_ioremap(pdev, name, false, psize);
}
static enum hrtimer_restart msm_hrtimer_worktimer(struct hrtimer *t)
{
struct msm_hrtimer_work *work = container_of(t,
struct msm_hrtimer_work, timer);
kthread_queue_work(work->worker, &work->work);
return HRTIMER_NORESTART;
}
void msm_hrtimer_queue_work(struct msm_hrtimer_work *work,
ktime_t wakeup_time,
enum hrtimer_mode mode)
{
hrtimer_start(&work->timer, wakeup_time, mode);
}
void msm_hrtimer_work_init(struct msm_hrtimer_work *work,
struct kthread_worker *worker,
kthread_work_func_t fn,
clockid_t clock_id,
enum hrtimer_mode mode)
{
hrtimer_init(&work->timer, clock_id, mode);
work->timer.function = msm_hrtimer_worktimer;
work->worker = worker;
kthread_init_work(&work->work, fn);
}
struct icc_path *msm_icc_get(struct device *dev, const char *name)
{
struct device *mdss_dev = dev->parent;
struct icc_path *path;
path = of_icc_get(dev, name);
if (path)
return path;
/*
* If there are no interconnects attached to the corresponding device
* node, of_icc_get() will return NULL.
*
* If the MDP5/DPU device node doesn't have interconnects, lookup the
* path in the parent (MDSS) device.
*/
return of_icc_get(mdss_dev, name);
}
| linux-master | drivers/gpu/drm/msm/msm_io_utils.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2016 Red Hat
* Author: Rob Clark <[email protected]>
*/
#include "msm_drv.h"
#include "msm_fence.h"
#include "msm_gem.h"
#include "msm_mmu.h"
static void
msm_gem_address_space_destroy(struct kref *kref)
{
struct msm_gem_address_space *aspace = container_of(kref,
struct msm_gem_address_space, kref);
drm_mm_takedown(&aspace->mm);
if (aspace->mmu)
aspace->mmu->funcs->destroy(aspace->mmu);
put_pid(aspace->pid);
kfree(aspace);
}
void msm_gem_address_space_put(struct msm_gem_address_space *aspace)
{
if (aspace)
kref_put(&aspace->kref, msm_gem_address_space_destroy);
}
struct msm_gem_address_space *
msm_gem_address_space_get(struct msm_gem_address_space *aspace)
{
if (!IS_ERR_OR_NULL(aspace))
kref_get(&aspace->kref);
return aspace;
}
/* Actually unmap memory for the vma */
void msm_gem_vma_purge(struct msm_gem_vma *vma)
{
struct msm_gem_address_space *aspace = vma->aspace;
unsigned size = vma->node.size;
/* Don't do anything if the memory isn't mapped */
if (!vma->mapped)
return;
aspace->mmu->funcs->unmap(aspace->mmu, vma->iova, size);
vma->mapped = false;
}
/* Map and pin vma: */
int
msm_gem_vma_map(struct msm_gem_vma *vma, int prot,
struct sg_table *sgt, int size)
{
struct msm_gem_address_space *aspace = vma->aspace;
int ret;
if (GEM_WARN_ON(!vma->iova))
return -EINVAL;
if (vma->mapped)
return 0;
vma->mapped = true;
if (!aspace)
return 0;
/*
* NOTE: iommu/io-pgtable can allocate pages, so we cannot hold
* a lock across map/unmap which is also used in the job_run()
* path, as this can cause deadlock in job_run() vs shrinker/
* reclaim.
*
* Revisit this if we can come up with a scheme to pre-alloc pages
* for the pgtable in map/unmap ops.
*/
ret = aspace->mmu->funcs->map(aspace->mmu, vma->iova, sgt, size, prot);
if (ret) {
vma->mapped = false;
}
return ret;
}
/* Close an iova. Warn if it is still in use */
void msm_gem_vma_close(struct msm_gem_vma *vma)
{
struct msm_gem_address_space *aspace = vma->aspace;
GEM_WARN_ON(vma->mapped);
spin_lock(&aspace->lock);
if (vma->iova)
drm_mm_remove_node(&vma->node);
spin_unlock(&aspace->lock);
vma->iova = 0;
msm_gem_address_space_put(aspace);
}
struct msm_gem_vma *msm_gem_vma_new(struct msm_gem_address_space *aspace)
{
struct msm_gem_vma *vma;
vma = kzalloc(sizeof(*vma), GFP_KERNEL);
if (!vma)
return NULL;
vma->aspace = aspace;
return vma;
}
/* Initialize a new vma and allocate an iova for it */
int msm_gem_vma_init(struct msm_gem_vma *vma, int size,
u64 range_start, u64 range_end)
{
struct msm_gem_address_space *aspace = vma->aspace;
int ret;
if (GEM_WARN_ON(!aspace))
return -EINVAL;
if (GEM_WARN_ON(vma->iova))
return -EBUSY;
spin_lock(&aspace->lock);
ret = drm_mm_insert_node_in_range(&aspace->mm, &vma->node,
size, PAGE_SIZE, 0,
range_start, range_end, 0);
spin_unlock(&aspace->lock);
if (ret)
return ret;
vma->iova = vma->node.start;
vma->mapped = false;
kref_get(&aspace->kref);
return 0;
}
struct msm_gem_address_space *
msm_gem_address_space_create(struct msm_mmu *mmu, const char *name,
u64 va_start, u64 size)
{
struct msm_gem_address_space *aspace;
if (IS_ERR(mmu))
return ERR_CAST(mmu);
aspace = kzalloc(sizeof(*aspace), GFP_KERNEL);
if (!aspace)
return ERR_PTR(-ENOMEM);
spin_lock_init(&aspace->lock);
aspace->name = name;
aspace->mmu = mmu;
aspace->va_start = va_start;
aspace->va_size = size;
drm_mm_init(&aspace->mm, va_start, size);
kref_init(&aspace->kref);
return aspace;
}
| linux-master | drivers/gpu/drm/msm/msm_gem_vma.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2013-2016 Red Hat
* Author: Rob Clark <[email protected]>
*/
#ifdef CONFIG_DEBUG_FS
#include <linux/debugfs.h>
#include <linux/fault-inject.h>
#include <drm/drm_debugfs.h>
#include <drm/drm_fb_helper.h>
#include <drm/drm_file.h>
#include <drm/drm_framebuffer.h>
#include "msm_drv.h"
#include "msm_gpu.h"
#include "msm_kms.h"
#include "msm_debugfs.h"
#include "disp/msm_disp_snapshot.h"
/*
* GPU Snapshot:
*/
struct msm_gpu_show_priv {
struct msm_gpu_state *state;
struct drm_device *dev;
};
static int msm_gpu_show(struct seq_file *m, void *arg)
{
struct drm_printer p = drm_seq_file_printer(m);
struct msm_gpu_show_priv *show_priv = m->private;
struct msm_drm_private *priv = show_priv->dev->dev_private;
struct msm_gpu *gpu = priv->gpu;
int ret;
ret = mutex_lock_interruptible(&gpu->lock);
if (ret)
return ret;
drm_printf(&p, "%s Status:\n", gpu->name);
gpu->funcs->show(gpu, show_priv->state, &p);
mutex_unlock(&gpu->lock);
return 0;
}
static int msm_gpu_release(struct inode *inode, struct file *file)
{
struct seq_file *m = file->private_data;
struct msm_gpu_show_priv *show_priv = m->private;
struct msm_drm_private *priv = show_priv->dev->dev_private;
struct msm_gpu *gpu = priv->gpu;
mutex_lock(&gpu->lock);
gpu->funcs->gpu_state_put(show_priv->state);
mutex_unlock(&gpu->lock);
kfree(show_priv);
return single_release(inode, file);
}
static int msm_gpu_open(struct inode *inode, struct file *file)
{
struct drm_device *dev = inode->i_private;
struct msm_drm_private *priv = dev->dev_private;
struct msm_gpu *gpu = priv->gpu;
struct msm_gpu_show_priv *show_priv;
int ret;
if (!gpu || !gpu->funcs->gpu_state_get)
return -ENODEV;
show_priv = kmalloc(sizeof(*show_priv), GFP_KERNEL);
if (!show_priv)
return -ENOMEM;
ret = mutex_lock_interruptible(&gpu->lock);
if (ret)
goto free_priv;
pm_runtime_get_sync(&gpu->pdev->dev);
msm_gpu_hw_init(gpu);
show_priv->state = gpu->funcs->gpu_state_get(gpu);
pm_runtime_put_sync(&gpu->pdev->dev);
mutex_unlock(&gpu->lock);
if (IS_ERR(show_priv->state)) {
ret = PTR_ERR(show_priv->state);
goto free_priv;
}
show_priv->dev = dev;
ret = single_open(file, msm_gpu_show, show_priv);
if (ret)
goto free_priv;
return 0;
free_priv:
kfree(show_priv);
return ret;
}
static const struct file_operations msm_gpu_fops = {
.owner = THIS_MODULE,
.open = msm_gpu_open,
.read = seq_read,
.llseek = seq_lseek,
.release = msm_gpu_release,
};
/*
* Display Snapshot:
*/
static int msm_kms_show(struct seq_file *m, void *arg)
{
struct drm_printer p = drm_seq_file_printer(m);
struct msm_disp_state *state = m->private;
msm_disp_state_print(state, &p);
return 0;
}
static int msm_kms_release(struct inode *inode, struct file *file)
{
struct seq_file *m = file->private_data;
struct msm_disp_state *state = m->private;
msm_disp_state_free(state);
return single_release(inode, file);
}
static int msm_kms_open(struct inode *inode, struct file *file)
{
struct drm_device *dev = inode->i_private;
struct msm_drm_private *priv = dev->dev_private;
struct msm_disp_state *state;
int ret;
if (!priv->kms)
return -ENODEV;
ret = mutex_lock_interruptible(&priv->kms->dump_mutex);
if (ret)
return ret;
state = msm_disp_snapshot_state_sync(priv->kms);
mutex_unlock(&priv->kms->dump_mutex);
if (IS_ERR(state)) {
return PTR_ERR(state);
}
ret = single_open(file, msm_kms_show, state);
if (ret) {
msm_disp_state_free(state);
return ret;
}
return 0;
}
static const struct file_operations msm_kms_fops = {
.owner = THIS_MODULE,
.open = msm_kms_open,
.read = seq_read,
.llseek = seq_lseek,
.release = msm_kms_release,
};
/*
* Other debugfs:
*/
static unsigned long last_shrink_freed;
static int
shrink_get(void *data, u64 *val)
{
*val = last_shrink_freed;
return 0;
}
static int
shrink_set(void *data, u64 val)
{
struct drm_device *dev = data;
last_shrink_freed = msm_gem_shrinker_shrink(dev, val);
return 0;
}
DEFINE_DEBUGFS_ATTRIBUTE(shrink_fops,
shrink_get, shrink_set,
"0x%08llx\n");
static int msm_gem_show(struct seq_file *m, void *arg)
{
struct drm_info_node *node = m->private;
struct drm_device *dev = node->minor->dev;
struct msm_drm_private *priv = dev->dev_private;
int ret;
ret = mutex_lock_interruptible(&priv->obj_lock);
if (ret)
return ret;
msm_gem_describe_objects(&priv->objects, m);
mutex_unlock(&priv->obj_lock);
return 0;
}
static int msm_mm_show(struct seq_file *m, void *arg)
{
struct drm_info_node *node = m->private;
struct drm_device *dev = node->minor->dev;
struct drm_printer p = drm_seq_file_printer(m);
drm_mm_print(&dev->vma_offset_manager->vm_addr_space_mm, &p);
return 0;
}
static int msm_fb_show(struct seq_file *m, void *arg)
{
struct drm_info_node *node = m->private;
struct drm_device *dev = node->minor->dev;
struct drm_framebuffer *fb, *fbdev_fb = NULL;
if (dev->fb_helper && dev->fb_helper->fb) {
seq_printf(m, "fbcon ");
fbdev_fb = dev->fb_helper->fb;
msm_framebuffer_describe(fbdev_fb, m);
}
mutex_lock(&dev->mode_config.fb_lock);
list_for_each_entry(fb, &dev->mode_config.fb_list, head) {
if (fb == fbdev_fb)
continue;
seq_printf(m, "user ");
msm_framebuffer_describe(fb, m);
}
mutex_unlock(&dev->mode_config.fb_lock);
return 0;
}
static struct drm_info_list msm_debugfs_list[] = {
{"gem", msm_gem_show},
{ "mm", msm_mm_show },
{ "fb", msm_fb_show },
};
static int late_init_minor(struct drm_minor *minor)
{
int ret;
if (!minor)
return 0;
ret = msm_rd_debugfs_init(minor);
if (ret) {
DRM_DEV_ERROR(minor->dev->dev, "could not install rd debugfs\n");
return ret;
}
ret = msm_perf_debugfs_init(minor);
if (ret) {
DRM_DEV_ERROR(minor->dev->dev, "could not install perf debugfs\n");
return ret;
}
return 0;
}
int msm_debugfs_late_init(struct drm_device *dev)
{
int ret;
ret = late_init_minor(dev->primary);
if (ret)
return ret;
ret = late_init_minor(dev->render);
return ret;
}
void msm_debugfs_init(struct drm_minor *minor)
{
struct drm_device *dev = minor->dev;
struct msm_drm_private *priv = dev->dev_private;
struct dentry *gpu_devfreq;
drm_debugfs_create_files(msm_debugfs_list,
ARRAY_SIZE(msm_debugfs_list),
minor->debugfs_root, minor);
debugfs_create_file("gpu", S_IRUSR, minor->debugfs_root,
dev, &msm_gpu_fops);
debugfs_create_file("kms", S_IRUSR, minor->debugfs_root,
dev, &msm_kms_fops);
debugfs_create_u32("hangcheck_period_ms", 0600, minor->debugfs_root,
&priv->hangcheck_period);
debugfs_create_bool("disable_err_irq", 0600, minor->debugfs_root,
&priv->disable_err_irq);
debugfs_create_file("shrink", S_IRWXU, minor->debugfs_root,
dev, &shrink_fops);
gpu_devfreq = debugfs_create_dir("devfreq", minor->debugfs_root);
debugfs_create_bool("idle_clamp",0600, gpu_devfreq,
&priv->gpu_clamp_to_idle);
debugfs_create_u32("upthreshold",0600, gpu_devfreq,
&priv->gpu_devfreq_config.upthreshold);
debugfs_create_u32("downdifferential",0600, gpu_devfreq,
&priv->gpu_devfreq_config.downdifferential);
if (priv->kms && priv->kms->funcs->debugfs_init)
priv->kms->funcs->debugfs_init(priv->kms, minor);
#ifdef CONFIG_FAULT_INJECTION
fault_create_debugfs_attr("fail_gem_alloc", minor->debugfs_root,
&fail_gem_alloc);
fault_create_debugfs_attr("fail_gem_iova", minor->debugfs_root,
&fail_gem_iova);
#endif
}
#endif
| linux-master | drivers/gpu/drm/msm/msm_debugfs.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2013 Red Hat
* Author: Rob Clark <[email protected]>
*/
#include <linux/fb.h>
#include <drm/drm_drv.h>
#include <drm/drm_crtc_helper.h>
#include <drm/drm_fb_helper.h>
#include <drm/drm_fourcc.h>
#include <drm/drm_framebuffer.h>
#include <drm/drm_prime.h>
#include "msm_drv.h"
#include "msm_gem.h"
#include "msm_kms.h"
static bool fbdev = true;
MODULE_PARM_DESC(fbdev, "Enable fbdev compat layer");
module_param(fbdev, bool, 0600);
/*
* fbdev funcs, to implement legacy fbdev interface on top of drm driver
*/
FB_GEN_DEFAULT_DEFERRED_SYSMEM_OPS(msm_fbdev,
drm_fb_helper_damage_range,
drm_fb_helper_damage_area)
static int msm_fbdev_mmap(struct fb_info *info, struct vm_area_struct *vma)
{
struct drm_fb_helper *helper = (struct drm_fb_helper *)info->par;
struct drm_gem_object *bo = msm_framebuffer_bo(helper->fb, 0);
return drm_gem_prime_mmap(bo, vma);
}
static void msm_fbdev_fb_destroy(struct fb_info *info)
{
struct drm_fb_helper *helper = (struct drm_fb_helper *)info->par;
struct drm_framebuffer *fb = helper->fb;
struct drm_gem_object *bo = msm_framebuffer_bo(fb, 0);
DBG();
drm_fb_helper_fini(helper);
/* this will free the backing object */
msm_gem_put_vaddr(bo);
drm_framebuffer_remove(fb);
drm_client_release(&helper->client);
drm_fb_helper_unprepare(helper);
kfree(helper);
}
static const struct fb_ops msm_fb_ops = {
.owner = THIS_MODULE,
__FB_DEFAULT_DEFERRED_OPS_RDWR(msm_fbdev),
DRM_FB_HELPER_DEFAULT_OPS,
__FB_DEFAULT_DEFERRED_OPS_DRAW(msm_fbdev),
.fb_mmap = msm_fbdev_mmap,
.fb_destroy = msm_fbdev_fb_destroy,
};
static int msm_fbdev_create(struct drm_fb_helper *helper,
struct drm_fb_helper_surface_size *sizes)
{
struct drm_device *dev = helper->dev;
struct msm_drm_private *priv = dev->dev_private;
struct drm_framebuffer *fb = NULL;
struct drm_gem_object *bo;
struct fb_info *fbi = NULL;
uint64_t paddr;
uint32_t format;
int ret, pitch;
format = drm_mode_legacy_fb_format(sizes->surface_bpp, sizes->surface_depth);
DBG("create fbdev: %dx%d@%d (%dx%d)", sizes->surface_width,
sizes->surface_height, sizes->surface_bpp,
sizes->fb_width, sizes->fb_height);
pitch = align_pitch(sizes->surface_width, sizes->surface_bpp);
fb = msm_alloc_stolen_fb(dev, sizes->surface_width,
sizes->surface_height, pitch, format);
if (IS_ERR(fb)) {
DRM_DEV_ERROR(dev->dev, "failed to allocate fb\n");
return PTR_ERR(fb);
}
bo = msm_framebuffer_bo(fb, 0);
/*
* NOTE: if we can be guaranteed to be able to map buffer
* in panic (ie. lock-safe, etc) we could avoid pinning the
* buffer now:
*/
ret = msm_gem_get_and_pin_iova(bo, priv->kms->aspace, &paddr);
if (ret) {
DRM_DEV_ERROR(dev->dev, "failed to get buffer obj iova: %d\n", ret);
goto fail;
}
fbi = drm_fb_helper_alloc_info(helper);
if (IS_ERR(fbi)) {
DRM_DEV_ERROR(dev->dev, "failed to allocate fb info\n");
ret = PTR_ERR(fbi);
goto fail;
}
DBG("fbi=%p, dev=%p", fbi, dev);
helper->fb = fb;
fbi->fbops = &msm_fb_ops;
drm_fb_helper_fill_info(fbi, helper, sizes);
fbi->screen_buffer = msm_gem_get_vaddr(bo);
if (IS_ERR(fbi->screen_buffer)) {
ret = PTR_ERR(fbi->screen_buffer);
goto fail;
}
fbi->screen_size = bo->size;
fbi->fix.smem_start = paddr;
fbi->fix.smem_len = bo->size;
DBG("par=%p, %dx%d", fbi->par, fbi->var.xres, fbi->var.yres);
DBG("allocated %dx%d fb", fb->width, fb->height);
return 0;
fail:
drm_framebuffer_remove(fb);
return ret;
}
static int msm_fbdev_fb_dirty(struct drm_fb_helper *helper,
struct drm_clip_rect *clip)
{
struct drm_device *dev = helper->dev;
int ret;
/* Call damage handlers only if necessary */
if (!(clip->x1 < clip->x2 && clip->y1 < clip->y2))
return 0;
if (helper->fb->funcs->dirty) {
ret = helper->fb->funcs->dirty(helper->fb, NULL, 0, 0, clip, 1);
if (drm_WARN_ONCE(dev, ret, "Dirty helper failed: ret=%d\n", ret))
return ret;
}
return 0;
}
static const struct drm_fb_helper_funcs msm_fb_helper_funcs = {
.fb_probe = msm_fbdev_create,
.fb_dirty = msm_fbdev_fb_dirty,
};
/*
* struct drm_client
*/
static void msm_fbdev_client_unregister(struct drm_client_dev *client)
{
struct drm_fb_helper *fb_helper = drm_fb_helper_from_client(client);
if (fb_helper->info) {
drm_fb_helper_unregister_info(fb_helper);
} else {
drm_client_release(&fb_helper->client);
drm_fb_helper_unprepare(fb_helper);
kfree(fb_helper);
}
}
static int msm_fbdev_client_restore(struct drm_client_dev *client)
{
drm_fb_helper_lastclose(client->dev);
return 0;
}
static int msm_fbdev_client_hotplug(struct drm_client_dev *client)
{
struct drm_fb_helper *fb_helper = drm_fb_helper_from_client(client);
struct drm_device *dev = client->dev;
int ret;
if (dev->fb_helper)
return drm_fb_helper_hotplug_event(dev->fb_helper);
ret = drm_fb_helper_init(dev, fb_helper);
if (ret)
goto err_drm_err;
if (!drm_drv_uses_atomic_modeset(dev))
drm_helper_disable_unused_functions(dev);
ret = drm_fb_helper_initial_config(fb_helper);
if (ret)
goto err_drm_fb_helper_fini;
return 0;
err_drm_fb_helper_fini:
drm_fb_helper_fini(fb_helper);
err_drm_err:
drm_err(dev, "Failed to setup fbdev emulation (ret=%d)\n", ret);
return ret;
}
static const struct drm_client_funcs msm_fbdev_client_funcs = {
.owner = THIS_MODULE,
.unregister = msm_fbdev_client_unregister,
.restore = msm_fbdev_client_restore,
.hotplug = msm_fbdev_client_hotplug,
};
/* initialize fbdev helper */
void msm_fbdev_setup(struct drm_device *dev)
{
struct drm_fb_helper *helper;
int ret;
if (!fbdev)
return;
drm_WARN(dev, !dev->registered, "Device has not been registered.\n");
drm_WARN(dev, dev->fb_helper, "fb_helper is already set!\n");
helper = kzalloc(sizeof(*helper), GFP_KERNEL);
if (!helper)
return;
drm_fb_helper_prepare(dev, helper, 32, &msm_fb_helper_funcs);
ret = drm_client_init(dev, &helper->client, "fbdev", &msm_fbdev_client_funcs);
if (ret) {
drm_err(dev, "Failed to register client: %d\n", ret);
goto err_drm_fb_helper_unprepare;
}
drm_client_register(&helper->client);
return;
err_drm_fb_helper_unprepare:
drm_fb_helper_unprepare(helper);
kfree(helper);
}
| linux-master | drivers/gpu/drm/msm/msm_fbdev.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2014 Red Hat
* Author: Rob Clark <[email protected]>
*/
#include <drm/drm_atomic_uapi.h>
#include <drm/drm_vblank.h>
#include "msm_atomic_trace.h"
#include "msm_drv.h"
#include "msm_gem.h"
#include "msm_kms.h"
/*
* Helpers to control vblanks while we flush.. basically just to ensure
* that vblank accounting is switched on, so we get valid seqn/timestamp
* on pageflip events (if requested)
*/
static void vblank_get(struct msm_kms *kms, unsigned crtc_mask)
{
struct drm_crtc *crtc;
for_each_crtc_mask(kms->dev, crtc, crtc_mask) {
if (!crtc->state->active)
continue;
drm_crtc_vblank_get(crtc);
}
}
static void vblank_put(struct msm_kms *kms, unsigned crtc_mask)
{
struct drm_crtc *crtc;
for_each_crtc_mask(kms->dev, crtc, crtc_mask) {
if (!crtc->state->active)
continue;
drm_crtc_vblank_put(crtc);
}
}
static void lock_crtcs(struct msm_kms *kms, unsigned int crtc_mask)
{
int crtc_index;
struct drm_crtc *crtc;
for_each_crtc_mask(kms->dev, crtc, crtc_mask) {
crtc_index = drm_crtc_index(crtc);
mutex_lock_nested(&kms->commit_lock[crtc_index], crtc_index);
}
}
static void unlock_crtcs(struct msm_kms *kms, unsigned int crtc_mask)
{
struct drm_crtc *crtc;
for_each_crtc_mask_reverse(kms->dev, crtc, crtc_mask)
mutex_unlock(&kms->commit_lock[drm_crtc_index(crtc)]);
}
static void msm_atomic_async_commit(struct msm_kms *kms, int crtc_idx)
{
unsigned crtc_mask = BIT(crtc_idx);
trace_msm_atomic_async_commit_start(crtc_mask);
lock_crtcs(kms, crtc_mask);
if (!(kms->pending_crtc_mask & crtc_mask)) {
unlock_crtcs(kms, crtc_mask);
goto out;
}
kms->pending_crtc_mask &= ~crtc_mask;
kms->funcs->enable_commit(kms);
vblank_get(kms, crtc_mask);
/*
* Flush hardware updates:
*/
trace_msm_atomic_flush_commit(crtc_mask);
kms->funcs->flush_commit(kms, crtc_mask);
/*
* Wait for flush to complete:
*/
trace_msm_atomic_wait_flush_start(crtc_mask);
kms->funcs->wait_flush(kms, crtc_mask);
trace_msm_atomic_wait_flush_finish(crtc_mask);
vblank_put(kms, crtc_mask);
kms->funcs->complete_commit(kms, crtc_mask);
unlock_crtcs(kms, crtc_mask);
kms->funcs->disable_commit(kms);
out:
trace_msm_atomic_async_commit_finish(crtc_mask);
}
static void msm_atomic_pending_work(struct kthread_work *work)
{
struct msm_pending_timer *timer = container_of(work,
struct msm_pending_timer, work.work);
msm_atomic_async_commit(timer->kms, timer->crtc_idx);
}
int msm_atomic_init_pending_timer(struct msm_pending_timer *timer,
struct msm_kms *kms, int crtc_idx)
{
timer->kms = kms;
timer->crtc_idx = crtc_idx;
timer->worker = kthread_create_worker(0, "atomic-worker-%d", crtc_idx);
if (IS_ERR(timer->worker)) {
int ret = PTR_ERR(timer->worker);
timer->worker = NULL;
return ret;
}
sched_set_fifo(timer->worker->task);
msm_hrtimer_work_init(&timer->work, timer->worker,
msm_atomic_pending_work,
CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
return 0;
}
void msm_atomic_destroy_pending_timer(struct msm_pending_timer *timer)
{
if (timer->worker)
kthread_destroy_worker(timer->worker);
}
static bool can_do_async(struct drm_atomic_state *state,
struct drm_crtc **async_crtc)
{
struct drm_connector_state *connector_state;
struct drm_connector *connector;
struct drm_crtc_state *crtc_state;
struct drm_crtc *crtc;
int i, num_crtcs = 0;
if (!(state->legacy_cursor_update || state->async_update))
return false;
/* any connector change, means slow path: */
for_each_new_connector_in_state(state, connector, connector_state, i)
return false;
for_each_new_crtc_in_state(state, crtc, crtc_state, i) {
if (drm_atomic_crtc_needs_modeset(crtc_state))
return false;
if (!crtc_state->active)
return false;
if (++num_crtcs > 1)
return false;
*async_crtc = crtc;
}
return true;
}
/* Get bitmask of crtcs that will need to be flushed. The bitmask
* can be used with for_each_crtc_mask() iterator, to iterate
* effected crtcs without needing to preserve the atomic state.
*/
static unsigned get_crtc_mask(struct drm_atomic_state *state)
{
struct drm_crtc_state *crtc_state;
struct drm_crtc *crtc;
unsigned i, mask = 0;
for_each_new_crtc_in_state(state, crtc, crtc_state, i)
mask |= drm_crtc_mask(crtc);
return mask;
}
int msm_atomic_check(struct drm_device *dev, struct drm_atomic_state *state)
{
struct drm_crtc_state *old_crtc_state, *new_crtc_state;
struct drm_crtc *crtc;
int i;
for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state,
new_crtc_state, i) {
if ((old_crtc_state->ctm && !new_crtc_state->ctm) ||
(!old_crtc_state->ctm && new_crtc_state->ctm)) {
new_crtc_state->mode_changed = true;
state->allow_modeset = true;
}
}
return drm_atomic_helper_check(dev, state);
}
void msm_atomic_commit_tail(struct drm_atomic_state *state)
{
struct drm_device *dev = state->dev;
struct msm_drm_private *priv = dev->dev_private;
struct msm_kms *kms = priv->kms;
struct drm_crtc *async_crtc = NULL;
unsigned crtc_mask = get_crtc_mask(state);
bool async = can_do_async(state, &async_crtc);
trace_msm_atomic_commit_tail_start(async, crtc_mask);
kms->funcs->enable_commit(kms);
/*
* Ensure any previous (potentially async) commit has
* completed:
*/
lock_crtcs(kms, crtc_mask);
trace_msm_atomic_wait_flush_start(crtc_mask);
kms->funcs->wait_flush(kms, crtc_mask);
trace_msm_atomic_wait_flush_finish(crtc_mask);
/*
* Now that there is no in-progress flush, prepare the
* current update:
*/
if (kms->funcs->prepare_commit)
kms->funcs->prepare_commit(kms, state);
/*
* Push atomic updates down to hardware:
*/
drm_atomic_helper_commit_modeset_disables(dev, state);
drm_atomic_helper_commit_planes(dev, state, 0);
drm_atomic_helper_commit_modeset_enables(dev, state);
if (async) {
struct msm_pending_timer *timer =
&kms->pending_timers[drm_crtc_index(async_crtc)];
/* async updates are limited to single-crtc updates: */
WARN_ON(crtc_mask != drm_crtc_mask(async_crtc));
/*
* Start timer if we don't already have an update pending
* on this crtc:
*/
if (!(kms->pending_crtc_mask & crtc_mask)) {
ktime_t vsync_time, wakeup_time;
kms->pending_crtc_mask |= crtc_mask;
if (drm_crtc_next_vblank_start(async_crtc, &vsync_time))
goto fallback;
wakeup_time = ktime_sub(vsync_time, ms_to_ktime(1));
msm_hrtimer_queue_work(&timer->work, wakeup_time,
HRTIMER_MODE_ABS);
}
kms->funcs->disable_commit(kms);
unlock_crtcs(kms, crtc_mask);
/*
* At this point, from drm core's perspective, we
* are done with the atomic update, so we can just
* go ahead and signal that it is done:
*/
drm_atomic_helper_commit_hw_done(state);
drm_atomic_helper_cleanup_planes(dev, state);
trace_msm_atomic_commit_tail_finish(async, crtc_mask);
return;
}
fallback:
/*
* If there is any async flush pending on updated crtcs, fold
* them into the current flush.
*/
kms->pending_crtc_mask &= ~crtc_mask;
vblank_get(kms, crtc_mask);
/*
* Flush hardware updates:
*/
trace_msm_atomic_flush_commit(crtc_mask);
kms->funcs->flush_commit(kms, crtc_mask);
unlock_crtcs(kms, crtc_mask);
/*
* Wait for flush to complete:
*/
trace_msm_atomic_wait_flush_start(crtc_mask);
kms->funcs->wait_flush(kms, crtc_mask);
trace_msm_atomic_wait_flush_finish(crtc_mask);
vblank_put(kms, crtc_mask);
lock_crtcs(kms, crtc_mask);
kms->funcs->complete_commit(kms, crtc_mask);
unlock_crtcs(kms, crtc_mask);
kms->funcs->disable_commit(kms);
drm_atomic_helper_commit_hw_done(state);
drm_atomic_helper_cleanup_planes(dev, state);
trace_msm_atomic_commit_tail_finish(async, crtc_mask);
}
| linux-master | drivers/gpu/drm/msm/msm_atomic.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2013 Red Hat
* Author: Rob Clark <[email protected]>
*/
#include "drm/drm_drv.h"
#include "msm_gpu.h"
#include "msm_gem.h"
#include "msm_mmu.h"
#include "msm_fence.h"
#include "msm_gpu_trace.h"
#include "adreno/adreno_gpu.h"
#include <generated/utsrelease.h>
#include <linux/string_helpers.h>
#include <linux/devcoredump.h>
#include <linux/sched/task.h>
/*
* Power Management:
*/
static int enable_pwrrail(struct msm_gpu *gpu)
{
struct drm_device *dev = gpu->dev;
int ret = 0;
if (gpu->gpu_reg) {
ret = regulator_enable(gpu->gpu_reg);
if (ret) {
DRM_DEV_ERROR(dev->dev, "failed to enable 'gpu_reg': %d\n", ret);
return ret;
}
}
if (gpu->gpu_cx) {
ret = regulator_enable(gpu->gpu_cx);
if (ret) {
DRM_DEV_ERROR(dev->dev, "failed to enable 'gpu_cx': %d\n", ret);
return ret;
}
}
return 0;
}
static int disable_pwrrail(struct msm_gpu *gpu)
{
if (gpu->gpu_cx)
regulator_disable(gpu->gpu_cx);
if (gpu->gpu_reg)
regulator_disable(gpu->gpu_reg);
return 0;
}
static int enable_clk(struct msm_gpu *gpu)
{
if (gpu->core_clk && gpu->fast_rate)
dev_pm_opp_set_rate(&gpu->pdev->dev, gpu->fast_rate);
/* Set the RBBM timer rate to 19.2Mhz */
if (gpu->rbbmtimer_clk)
clk_set_rate(gpu->rbbmtimer_clk, 19200000);
return clk_bulk_prepare_enable(gpu->nr_clocks, gpu->grp_clks);
}
static int disable_clk(struct msm_gpu *gpu)
{
clk_bulk_disable_unprepare(gpu->nr_clocks, gpu->grp_clks);
/*
* Set the clock to a deliberately low rate. On older targets the clock
* speed had to be non zero to avoid problems. On newer targets this
* will be rounded down to zero anyway so it all works out.
*/
if (gpu->core_clk)
dev_pm_opp_set_rate(&gpu->pdev->dev, 27000000);
if (gpu->rbbmtimer_clk)
clk_set_rate(gpu->rbbmtimer_clk, 0);
return 0;
}
static int enable_axi(struct msm_gpu *gpu)
{
return clk_prepare_enable(gpu->ebi1_clk);
}
static int disable_axi(struct msm_gpu *gpu)
{
clk_disable_unprepare(gpu->ebi1_clk);
return 0;
}
int msm_gpu_pm_resume(struct msm_gpu *gpu)
{
int ret;
DBG("%s", gpu->name);
trace_msm_gpu_resume(0);
ret = enable_pwrrail(gpu);
if (ret)
return ret;
ret = enable_clk(gpu);
if (ret)
return ret;
ret = enable_axi(gpu);
if (ret)
return ret;
msm_devfreq_resume(gpu);
gpu->needs_hw_init = true;
return 0;
}
int msm_gpu_pm_suspend(struct msm_gpu *gpu)
{
int ret;
DBG("%s", gpu->name);
trace_msm_gpu_suspend(0);
msm_devfreq_suspend(gpu);
ret = disable_axi(gpu);
if (ret)
return ret;
ret = disable_clk(gpu);
if (ret)
return ret;
ret = disable_pwrrail(gpu);
if (ret)
return ret;
gpu->suspend_count++;
return 0;
}
void msm_gpu_show_fdinfo(struct msm_gpu *gpu, struct msm_file_private *ctx,
struct drm_printer *p)
{
drm_printf(p, "drm-engine-gpu:\t%llu ns\n", ctx->elapsed_ns);
drm_printf(p, "drm-cycles-gpu:\t%llu\n", ctx->cycles);
drm_printf(p, "drm-maxfreq-gpu:\t%u Hz\n", gpu->fast_rate);
}
int msm_gpu_hw_init(struct msm_gpu *gpu)
{
int ret;
WARN_ON(!mutex_is_locked(&gpu->lock));
if (!gpu->needs_hw_init)
return 0;
disable_irq(gpu->irq);
ret = gpu->funcs->hw_init(gpu);
if (!ret)
gpu->needs_hw_init = false;
enable_irq(gpu->irq);
return ret;
}
#ifdef CONFIG_DEV_COREDUMP
static ssize_t msm_gpu_devcoredump_read(char *buffer, loff_t offset,
size_t count, void *data, size_t datalen)
{
struct msm_gpu *gpu = data;
struct drm_print_iterator iter;
struct drm_printer p;
struct msm_gpu_state *state;
state = msm_gpu_crashstate_get(gpu);
if (!state)
return 0;
iter.data = buffer;
iter.offset = 0;
iter.start = offset;
iter.remain = count;
p = drm_coredump_printer(&iter);
drm_printf(&p, "---\n");
drm_printf(&p, "kernel: " UTS_RELEASE "\n");
drm_printf(&p, "module: " KBUILD_MODNAME "\n");
drm_printf(&p, "time: %lld.%09ld\n",
state->time.tv_sec, state->time.tv_nsec);
if (state->comm)
drm_printf(&p, "comm: %s\n", state->comm);
if (state->cmd)
drm_printf(&p, "cmdline: %s\n", state->cmd);
gpu->funcs->show(gpu, state, &p);
msm_gpu_crashstate_put(gpu);
return count - iter.remain;
}
static void msm_gpu_devcoredump_free(void *data)
{
struct msm_gpu *gpu = data;
msm_gpu_crashstate_put(gpu);
}
static void msm_gpu_crashstate_get_bo(struct msm_gpu_state *state,
struct drm_gem_object *obj, u64 iova, bool full)
{
struct msm_gpu_state_bo *state_bo = &state->bos[state->nr_bos];
/* Don't record write only objects */
state_bo->size = obj->size;
state_bo->iova = iova;
BUILD_BUG_ON(sizeof(state_bo->name) != sizeof(to_msm_bo(obj)->name));
memcpy(state_bo->name, to_msm_bo(obj)->name, sizeof(state_bo->name));
if (full) {
void *ptr;
state_bo->data = kvmalloc(obj->size, GFP_KERNEL);
if (!state_bo->data)
goto out;
msm_gem_lock(obj);
ptr = msm_gem_get_vaddr_active(obj);
msm_gem_unlock(obj);
if (IS_ERR(ptr)) {
kvfree(state_bo->data);
state_bo->data = NULL;
goto out;
}
memcpy(state_bo->data, ptr, obj->size);
msm_gem_put_vaddr(obj);
}
out:
state->nr_bos++;
}
static void msm_gpu_crashstate_capture(struct msm_gpu *gpu,
struct msm_gem_submit *submit, char *comm, char *cmd)
{
struct msm_gpu_state *state;
/* Check if the target supports capturing crash state */
if (!gpu->funcs->gpu_state_get)
return;
/* Only save one crash state at a time */
if (gpu->crashstate)
return;
state = gpu->funcs->gpu_state_get(gpu);
if (IS_ERR_OR_NULL(state))
return;
/* Fill in the additional crash state information */
state->comm = kstrdup(comm, GFP_KERNEL);
state->cmd = kstrdup(cmd, GFP_KERNEL);
state->fault_info = gpu->fault_info;
if (submit) {
int i;
state->bos = kcalloc(submit->nr_bos,
sizeof(struct msm_gpu_state_bo), GFP_KERNEL);
for (i = 0; state->bos && i < submit->nr_bos; i++) {
msm_gpu_crashstate_get_bo(state, submit->bos[i].obj,
submit->bos[i].iova,
should_dump(submit, i));
}
}
/* Set the active crash state to be dumped on failure */
gpu->crashstate = state;
/* FIXME: Release the crashstate if this errors out? */
dev_coredumpm(gpu->dev->dev, THIS_MODULE, gpu, 0, GFP_KERNEL,
msm_gpu_devcoredump_read, msm_gpu_devcoredump_free);
}
#else
static void msm_gpu_crashstate_capture(struct msm_gpu *gpu,
struct msm_gem_submit *submit, char *comm, char *cmd)
{
}
#endif
/*
* Hangcheck detection for locked gpu:
*/
static struct msm_gem_submit *
find_submit(struct msm_ringbuffer *ring, uint32_t fence)
{
struct msm_gem_submit *submit;
unsigned long flags;
spin_lock_irqsave(&ring->submit_lock, flags);
list_for_each_entry(submit, &ring->submits, node) {
if (submit->seqno == fence) {
spin_unlock_irqrestore(&ring->submit_lock, flags);
return submit;
}
}
spin_unlock_irqrestore(&ring->submit_lock, flags);
return NULL;
}
static void retire_submits(struct msm_gpu *gpu);
static void get_comm_cmdline(struct msm_gem_submit *submit, char **comm, char **cmd)
{
struct msm_file_private *ctx = submit->queue->ctx;
struct task_struct *task;
WARN_ON(!mutex_is_locked(&submit->gpu->lock));
/* Note that kstrdup will return NULL if argument is NULL: */
*comm = kstrdup(ctx->comm, GFP_KERNEL);
*cmd = kstrdup(ctx->cmdline, GFP_KERNEL);
task = get_pid_task(submit->pid, PIDTYPE_PID);
if (!task)
return;
if (!*comm)
*comm = kstrdup(task->comm, GFP_KERNEL);
if (!*cmd)
*cmd = kstrdup_quotable_cmdline(task, GFP_KERNEL);
put_task_struct(task);
}
static void recover_worker(struct kthread_work *work)
{
struct msm_gpu *gpu = container_of(work, struct msm_gpu, recover_work);
struct drm_device *dev = gpu->dev;
struct msm_drm_private *priv = dev->dev_private;
struct msm_gem_submit *submit;
struct msm_ringbuffer *cur_ring = gpu->funcs->active_ring(gpu);
char *comm = NULL, *cmd = NULL;
int i;
mutex_lock(&gpu->lock);
DRM_DEV_ERROR(dev->dev, "%s: hangcheck recover!\n", gpu->name);
submit = find_submit(cur_ring, cur_ring->memptrs->fence + 1);
if (submit) {
/* Increment the fault counts */
submit->queue->faults++;
if (submit->aspace)
submit->aspace->faults++;
get_comm_cmdline(submit, &comm, &cmd);
if (comm && cmd) {
DRM_DEV_ERROR(dev->dev, "%s: offending task: %s (%s)\n",
gpu->name, comm, cmd);
msm_rd_dump_submit(priv->hangrd, submit,
"offending task: %s (%s)", comm, cmd);
} else {
msm_rd_dump_submit(priv->hangrd, submit, NULL);
}
} else {
/*
* We couldn't attribute this fault to any particular context,
* so increment the global fault count instead.
*/
gpu->global_faults++;
}
/* Record the crash state */
pm_runtime_get_sync(&gpu->pdev->dev);
msm_gpu_crashstate_capture(gpu, submit, comm, cmd);
kfree(cmd);
kfree(comm);
/*
* Update all the rings with the latest and greatest fence.. this
* needs to happen after msm_rd_dump_submit() to ensure that the
* bo's referenced by the offending submit are still around.
*/
for (i = 0; i < gpu->nr_rings; i++) {
struct msm_ringbuffer *ring = gpu->rb[i];
uint32_t fence = ring->memptrs->fence;
/*
* For the current (faulting?) ring/submit advance the fence by
* one more to clear the faulting submit
*/
if (ring == cur_ring)
ring->memptrs->fence = ++fence;
msm_update_fence(ring->fctx, fence);
}
if (msm_gpu_active(gpu)) {
/* retire completed submits, plus the one that hung: */
retire_submits(gpu);
gpu->funcs->recover(gpu);
/*
* Replay all remaining submits starting with highest priority
* ring
*/
for (i = 0; i < gpu->nr_rings; i++) {
struct msm_ringbuffer *ring = gpu->rb[i];
unsigned long flags;
spin_lock_irqsave(&ring->submit_lock, flags);
list_for_each_entry(submit, &ring->submits, node)
gpu->funcs->submit(gpu, submit);
spin_unlock_irqrestore(&ring->submit_lock, flags);
}
}
pm_runtime_put(&gpu->pdev->dev);
mutex_unlock(&gpu->lock);
msm_gpu_retire(gpu);
}
static void fault_worker(struct kthread_work *work)
{
struct msm_gpu *gpu = container_of(work, struct msm_gpu, fault_work);
struct msm_gem_submit *submit;
struct msm_ringbuffer *cur_ring = gpu->funcs->active_ring(gpu);
char *comm = NULL, *cmd = NULL;
mutex_lock(&gpu->lock);
submit = find_submit(cur_ring, cur_ring->memptrs->fence + 1);
if (submit && submit->fault_dumped)
goto resume_smmu;
if (submit) {
get_comm_cmdline(submit, &comm, &cmd);
/*
* When we get GPU iova faults, we can get 1000s of them,
* but we really only want to log the first one.
*/
submit->fault_dumped = true;
}
/* Record the crash state */
pm_runtime_get_sync(&gpu->pdev->dev);
msm_gpu_crashstate_capture(gpu, submit, comm, cmd);
pm_runtime_put_sync(&gpu->pdev->dev);
kfree(cmd);
kfree(comm);
resume_smmu:
memset(&gpu->fault_info, 0, sizeof(gpu->fault_info));
gpu->aspace->mmu->funcs->resume_translation(gpu->aspace->mmu);
mutex_unlock(&gpu->lock);
}
static void hangcheck_timer_reset(struct msm_gpu *gpu)
{
struct msm_drm_private *priv = gpu->dev->dev_private;
mod_timer(&gpu->hangcheck_timer,
round_jiffies_up(jiffies + msecs_to_jiffies(priv->hangcheck_period)));
}
static bool made_progress(struct msm_gpu *gpu, struct msm_ringbuffer *ring)
{
if (ring->hangcheck_progress_retries >= DRM_MSM_HANGCHECK_PROGRESS_RETRIES)
return false;
if (!gpu->funcs->progress)
return false;
if (!gpu->funcs->progress(gpu, ring))
return false;
ring->hangcheck_progress_retries++;
return true;
}
static void hangcheck_handler(struct timer_list *t)
{
struct msm_gpu *gpu = from_timer(gpu, t, hangcheck_timer);
struct drm_device *dev = gpu->dev;
struct msm_ringbuffer *ring = gpu->funcs->active_ring(gpu);
uint32_t fence = ring->memptrs->fence;
if (fence != ring->hangcheck_fence) {
/* some progress has been made.. ya! */
ring->hangcheck_fence = fence;
ring->hangcheck_progress_retries = 0;
} else if (fence_before(fence, ring->fctx->last_fence) &&
!made_progress(gpu, ring)) {
/* no progress and not done.. hung! */
ring->hangcheck_fence = fence;
ring->hangcheck_progress_retries = 0;
DRM_DEV_ERROR(dev->dev, "%s: hangcheck detected gpu lockup rb %d!\n",
gpu->name, ring->id);
DRM_DEV_ERROR(dev->dev, "%s: completed fence: %u\n",
gpu->name, fence);
DRM_DEV_ERROR(dev->dev, "%s: submitted fence: %u\n",
gpu->name, ring->fctx->last_fence);
kthread_queue_work(gpu->worker, &gpu->recover_work);
}
/* if still more pending work, reset the hangcheck timer: */
if (fence_after(ring->fctx->last_fence, ring->hangcheck_fence))
hangcheck_timer_reset(gpu);
/* workaround for missing irq: */
msm_gpu_retire(gpu);
}
/*
* Performance Counters:
*/
/* called under perf_lock */
static int update_hw_cntrs(struct msm_gpu *gpu, uint32_t ncntrs, uint32_t *cntrs)
{
uint32_t current_cntrs[ARRAY_SIZE(gpu->last_cntrs)];
int i, n = min(ncntrs, gpu->num_perfcntrs);
/* read current values: */
for (i = 0; i < gpu->num_perfcntrs; i++)
current_cntrs[i] = gpu_read(gpu, gpu->perfcntrs[i].sample_reg);
/* update cntrs: */
for (i = 0; i < n; i++)
cntrs[i] = current_cntrs[i] - gpu->last_cntrs[i];
/* save current values: */
for (i = 0; i < gpu->num_perfcntrs; i++)
gpu->last_cntrs[i] = current_cntrs[i];
return n;
}
static void update_sw_cntrs(struct msm_gpu *gpu)
{
ktime_t time;
uint32_t elapsed;
unsigned long flags;
spin_lock_irqsave(&gpu->perf_lock, flags);
if (!gpu->perfcntr_active)
goto out;
time = ktime_get();
elapsed = ktime_to_us(ktime_sub(time, gpu->last_sample.time));
gpu->totaltime += elapsed;
if (gpu->last_sample.active)
gpu->activetime += elapsed;
gpu->last_sample.active = msm_gpu_active(gpu);
gpu->last_sample.time = time;
out:
spin_unlock_irqrestore(&gpu->perf_lock, flags);
}
void msm_gpu_perfcntr_start(struct msm_gpu *gpu)
{
unsigned long flags;
pm_runtime_get_sync(&gpu->pdev->dev);
spin_lock_irqsave(&gpu->perf_lock, flags);
/* we could dynamically enable/disable perfcntr registers too.. */
gpu->last_sample.active = msm_gpu_active(gpu);
gpu->last_sample.time = ktime_get();
gpu->activetime = gpu->totaltime = 0;
gpu->perfcntr_active = true;
update_hw_cntrs(gpu, 0, NULL);
spin_unlock_irqrestore(&gpu->perf_lock, flags);
}
void msm_gpu_perfcntr_stop(struct msm_gpu *gpu)
{
gpu->perfcntr_active = false;
pm_runtime_put_sync(&gpu->pdev->dev);
}
/* returns -errno or # of cntrs sampled */
int msm_gpu_perfcntr_sample(struct msm_gpu *gpu, uint32_t *activetime,
uint32_t *totaltime, uint32_t ncntrs, uint32_t *cntrs)
{
unsigned long flags;
int ret;
spin_lock_irqsave(&gpu->perf_lock, flags);
if (!gpu->perfcntr_active) {
ret = -EINVAL;
goto out;
}
*activetime = gpu->activetime;
*totaltime = gpu->totaltime;
gpu->activetime = gpu->totaltime = 0;
ret = update_hw_cntrs(gpu, ncntrs, cntrs);
out:
spin_unlock_irqrestore(&gpu->perf_lock, flags);
return ret;
}
/*
* Cmdstream submission/retirement:
*/
static void retire_submit(struct msm_gpu *gpu, struct msm_ringbuffer *ring,
struct msm_gem_submit *submit)
{
int index = submit->seqno % MSM_GPU_SUBMIT_STATS_COUNT;
volatile struct msm_gpu_submit_stats *stats;
u64 elapsed, clock = 0, cycles;
unsigned long flags;
stats = &ring->memptrs->stats[index];
/* Convert 19.2Mhz alwayson ticks to nanoseconds for elapsed time */
elapsed = (stats->alwayson_end - stats->alwayson_start) * 10000;
do_div(elapsed, 192);
cycles = stats->cpcycles_end - stats->cpcycles_start;
/* Calculate the clock frequency from the number of CP cycles */
if (elapsed) {
clock = cycles * 1000;
do_div(clock, elapsed);
}
submit->queue->ctx->elapsed_ns += elapsed;
submit->queue->ctx->cycles += cycles;
trace_msm_gpu_submit_retired(submit, elapsed, clock,
stats->alwayson_start, stats->alwayson_end);
msm_submit_retire(submit);
pm_runtime_mark_last_busy(&gpu->pdev->dev);
spin_lock_irqsave(&ring->submit_lock, flags);
list_del(&submit->node);
spin_unlock_irqrestore(&ring->submit_lock, flags);
/* Update devfreq on transition from active->idle: */
mutex_lock(&gpu->active_lock);
gpu->active_submits--;
WARN_ON(gpu->active_submits < 0);
if (!gpu->active_submits) {
msm_devfreq_idle(gpu);
pm_runtime_put_autosuspend(&gpu->pdev->dev);
}
mutex_unlock(&gpu->active_lock);
msm_gem_submit_put(submit);
}
static void retire_submits(struct msm_gpu *gpu)
{
int i;
/* Retire the commits starting with highest priority */
for (i = 0; i < gpu->nr_rings; i++) {
struct msm_ringbuffer *ring = gpu->rb[i];
while (true) {
struct msm_gem_submit *submit = NULL;
unsigned long flags;
spin_lock_irqsave(&ring->submit_lock, flags);
submit = list_first_entry_or_null(&ring->submits,
struct msm_gem_submit, node);
spin_unlock_irqrestore(&ring->submit_lock, flags);
/*
* If no submit, we are done. If submit->fence hasn't
* been signalled, then later submits are not signalled
* either, so we are also done.
*/
if (submit && dma_fence_is_signaled(submit->hw_fence)) {
retire_submit(gpu, ring, submit);
} else {
break;
}
}
}
wake_up_all(&gpu->retire_event);
}
static void retire_worker(struct kthread_work *work)
{
struct msm_gpu *gpu = container_of(work, struct msm_gpu, retire_work);
retire_submits(gpu);
}
/* call from irq handler to schedule work to retire bo's */
void msm_gpu_retire(struct msm_gpu *gpu)
{
int i;
for (i = 0; i < gpu->nr_rings; i++)
msm_update_fence(gpu->rb[i]->fctx, gpu->rb[i]->memptrs->fence);
kthread_queue_work(gpu->worker, &gpu->retire_work);
update_sw_cntrs(gpu);
}
/* add bo's to gpu's ring, and kick gpu: */
void msm_gpu_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit)
{
struct msm_ringbuffer *ring = submit->ring;
unsigned long flags;
pm_runtime_get_sync(&gpu->pdev->dev);
mutex_lock(&gpu->lock);
msm_gpu_hw_init(gpu);
update_sw_cntrs(gpu);
/*
* ring->submits holds a ref to the submit, to deal with the case
* that a submit completes before msm_ioctl_gem_submit() returns.
*/
msm_gem_submit_get(submit);
spin_lock_irqsave(&ring->submit_lock, flags);
list_add_tail(&submit->node, &ring->submits);
spin_unlock_irqrestore(&ring->submit_lock, flags);
/* Update devfreq on transition from idle->active: */
mutex_lock(&gpu->active_lock);
if (!gpu->active_submits) {
pm_runtime_get(&gpu->pdev->dev);
msm_devfreq_active(gpu);
}
gpu->active_submits++;
mutex_unlock(&gpu->active_lock);
gpu->funcs->submit(gpu, submit);
gpu->cur_ctx_seqno = submit->queue->ctx->seqno;
hangcheck_timer_reset(gpu);
mutex_unlock(&gpu->lock);
pm_runtime_put(&gpu->pdev->dev);
}
/*
* Init/Cleanup:
*/
static irqreturn_t irq_handler(int irq, void *data)
{
struct msm_gpu *gpu = data;
return gpu->funcs->irq(gpu);
}
static int get_clocks(struct platform_device *pdev, struct msm_gpu *gpu)
{
int ret = devm_clk_bulk_get_all(&pdev->dev, &gpu->grp_clks);
if (ret < 1) {
gpu->nr_clocks = 0;
return ret;
}
gpu->nr_clocks = ret;
gpu->core_clk = msm_clk_bulk_get_clock(gpu->grp_clks,
gpu->nr_clocks, "core");
gpu->rbbmtimer_clk = msm_clk_bulk_get_clock(gpu->grp_clks,
gpu->nr_clocks, "rbbmtimer");
return 0;
}
/* Return a new address space for a msm_drm_private instance */
struct msm_gem_address_space *
msm_gpu_create_private_address_space(struct msm_gpu *gpu, struct task_struct *task)
{
struct msm_gem_address_space *aspace = NULL;
if (!gpu)
return NULL;
/*
* If the target doesn't support private address spaces then return
* the global one
*/
if (gpu->funcs->create_private_address_space) {
aspace = gpu->funcs->create_private_address_space(gpu);
if (!IS_ERR(aspace))
aspace->pid = get_pid(task_pid(task));
}
if (IS_ERR_OR_NULL(aspace))
aspace = msm_gem_address_space_get(gpu->aspace);
return aspace;
}
int msm_gpu_init(struct drm_device *drm, struct platform_device *pdev,
struct msm_gpu *gpu, const struct msm_gpu_funcs *funcs,
const char *name, struct msm_gpu_config *config)
{
struct msm_drm_private *priv = drm->dev_private;
int i, ret, nr_rings = config->nr_rings;
void *memptrs;
uint64_t memptrs_iova;
if (WARN_ON(gpu->num_perfcntrs > ARRAY_SIZE(gpu->last_cntrs)))
gpu->num_perfcntrs = ARRAY_SIZE(gpu->last_cntrs);
gpu->dev = drm;
gpu->funcs = funcs;
gpu->name = name;
gpu->worker = kthread_create_worker(0, "gpu-worker");
if (IS_ERR(gpu->worker)) {
ret = PTR_ERR(gpu->worker);
gpu->worker = NULL;
goto fail;
}
sched_set_fifo_low(gpu->worker->task);
mutex_init(&gpu->active_lock);
mutex_init(&gpu->lock);
init_waitqueue_head(&gpu->retire_event);
kthread_init_work(&gpu->retire_work, retire_worker);
kthread_init_work(&gpu->recover_work, recover_worker);
kthread_init_work(&gpu->fault_work, fault_worker);
priv->hangcheck_period = DRM_MSM_HANGCHECK_DEFAULT_PERIOD;
/*
* If progress detection is supported, halve the hangcheck timer
* duration, as it takes two iterations of the hangcheck handler
* to detect a hang.
*/
if (funcs->progress)
priv->hangcheck_period /= 2;
timer_setup(&gpu->hangcheck_timer, hangcheck_handler, 0);
spin_lock_init(&gpu->perf_lock);
/* Map registers: */
gpu->mmio = msm_ioremap(pdev, config->ioname);
if (IS_ERR(gpu->mmio)) {
ret = PTR_ERR(gpu->mmio);
goto fail;
}
/* Get Interrupt: */
gpu->irq = platform_get_irq(pdev, 0);
if (gpu->irq < 0) {
ret = gpu->irq;
goto fail;
}
ret = devm_request_irq(&pdev->dev, gpu->irq, irq_handler,
IRQF_TRIGGER_HIGH, "gpu-irq", gpu);
if (ret) {
DRM_DEV_ERROR(drm->dev, "failed to request IRQ%u: %d\n", gpu->irq, ret);
goto fail;
}
ret = get_clocks(pdev, gpu);
if (ret)
goto fail;
gpu->ebi1_clk = msm_clk_get(pdev, "bus");
DBG("ebi1_clk: %p", gpu->ebi1_clk);
if (IS_ERR(gpu->ebi1_clk))
gpu->ebi1_clk = NULL;
/* Acquire regulators: */
gpu->gpu_reg = devm_regulator_get(&pdev->dev, "vdd");
DBG("gpu_reg: %p", gpu->gpu_reg);
if (IS_ERR(gpu->gpu_reg))
gpu->gpu_reg = NULL;
gpu->gpu_cx = devm_regulator_get(&pdev->dev, "vddcx");
DBG("gpu_cx: %p", gpu->gpu_cx);
if (IS_ERR(gpu->gpu_cx))
gpu->gpu_cx = NULL;
gpu->pdev = pdev;
platform_set_drvdata(pdev, &gpu->adreno_smmu);
msm_devfreq_init(gpu);
gpu->aspace = gpu->funcs->create_address_space(gpu, pdev);
if (gpu->aspace == NULL)
DRM_DEV_INFO(drm->dev, "%s: no IOMMU, fallback to VRAM carveout!\n", name);
else if (IS_ERR(gpu->aspace)) {
ret = PTR_ERR(gpu->aspace);
goto fail;
}
memptrs = msm_gem_kernel_new(drm,
sizeof(struct msm_rbmemptrs) * nr_rings,
check_apriv(gpu, MSM_BO_WC), gpu->aspace, &gpu->memptrs_bo,
&memptrs_iova);
if (IS_ERR(memptrs)) {
ret = PTR_ERR(memptrs);
DRM_DEV_ERROR(drm->dev, "could not allocate memptrs: %d\n", ret);
goto fail;
}
msm_gem_object_set_name(gpu->memptrs_bo, "memptrs");
if (nr_rings > ARRAY_SIZE(gpu->rb)) {
DRM_DEV_INFO_ONCE(drm->dev, "Only creating %zu ringbuffers\n",
ARRAY_SIZE(gpu->rb));
nr_rings = ARRAY_SIZE(gpu->rb);
}
/* Create ringbuffer(s): */
for (i = 0; i < nr_rings; i++) {
gpu->rb[i] = msm_ringbuffer_new(gpu, i, memptrs, memptrs_iova);
if (IS_ERR(gpu->rb[i])) {
ret = PTR_ERR(gpu->rb[i]);
DRM_DEV_ERROR(drm->dev,
"could not create ringbuffer %d: %d\n", i, ret);
goto fail;
}
memptrs += sizeof(struct msm_rbmemptrs);
memptrs_iova += sizeof(struct msm_rbmemptrs);
}
gpu->nr_rings = nr_rings;
refcount_set(&gpu->sysprof_active, 1);
return 0;
fail:
for (i = 0; i < ARRAY_SIZE(gpu->rb); i++) {
msm_ringbuffer_destroy(gpu->rb[i]);
gpu->rb[i] = NULL;
}
msm_gem_kernel_put(gpu->memptrs_bo, gpu->aspace);
platform_set_drvdata(pdev, NULL);
return ret;
}
void msm_gpu_cleanup(struct msm_gpu *gpu)
{
int i;
DBG("%s", gpu->name);
for (i = 0; i < ARRAY_SIZE(gpu->rb); i++) {
msm_ringbuffer_destroy(gpu->rb[i]);
gpu->rb[i] = NULL;
}
msm_gem_kernel_put(gpu->memptrs_bo, gpu->aspace);
if (!IS_ERR_OR_NULL(gpu->aspace)) {
gpu->aspace->mmu->funcs->detach(gpu->aspace->mmu);
msm_gem_address_space_put(gpu->aspace);
}
if (gpu->worker) {
kthread_destroy_worker(gpu->worker);
}
msm_devfreq_cleanup(gpu);
platform_set_drvdata(gpu->pdev, NULL);
}
| linux-master | drivers/gpu/drm/msm/msm_gpu.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2013 Red Hat
* Author: Rob Clark <[email protected]>
*/
/* For profiling, userspace can:
*
* tail -f /sys/kernel/debug/dri/<minor>/gpu
*
* This will enable performance counters/profiling to track the busy time
* and any gpu specific performance counters that are supported.
*/
#ifdef CONFIG_DEBUG_FS
#include <linux/debugfs.h>
#include <linux/uaccess.h>
#include <drm/drm_file.h>
#include "msm_drv.h"
#include "msm_gpu.h"
struct msm_perf_state {
struct drm_device *dev;
bool open;
int cnt;
struct mutex read_lock;
char buf[256];
int buftot, bufpos;
unsigned long next_jiffies;
};
#define SAMPLE_TIME (HZ/4)
/* wait for next sample time: */
static int wait_sample(struct msm_perf_state *perf)
{
unsigned long start_jiffies = jiffies;
if (time_after(perf->next_jiffies, start_jiffies)) {
unsigned long remaining_jiffies =
perf->next_jiffies - start_jiffies;
int ret = schedule_timeout_interruptible(remaining_jiffies);
if (ret > 0) {
/* interrupted */
return -ERESTARTSYS;
}
}
perf->next_jiffies += SAMPLE_TIME;
return 0;
}
static int refill_buf(struct msm_perf_state *perf)
{
struct msm_drm_private *priv = perf->dev->dev_private;
struct msm_gpu *gpu = priv->gpu;
char *ptr = perf->buf;
int rem = sizeof(perf->buf);
int i, n;
if ((perf->cnt++ % 32) == 0) {
/* Header line: */
n = snprintf(ptr, rem, "%%BUSY");
ptr += n;
rem -= n;
for (i = 0; i < gpu->num_perfcntrs; i++) {
const struct msm_gpu_perfcntr *perfcntr = &gpu->perfcntrs[i];
n = snprintf(ptr, rem, "\t%s", perfcntr->name);
ptr += n;
rem -= n;
}
} else {
/* Sample line: */
uint32_t activetime = 0, totaltime = 0;
uint32_t cntrs[5];
uint32_t val;
int ret;
/* sleep until next sample time: */
ret = wait_sample(perf);
if (ret)
return ret;
ret = msm_gpu_perfcntr_sample(gpu, &activetime, &totaltime,
ARRAY_SIZE(cntrs), cntrs);
if (ret < 0)
return ret;
val = totaltime ? 1000 * activetime / totaltime : 0;
n = snprintf(ptr, rem, "%3d.%d%%", val / 10, val % 10);
ptr += n;
rem -= n;
for (i = 0; i < ret; i++) {
/* cycle counters (I think).. convert to MHz.. */
val = cntrs[i] / 10000;
n = snprintf(ptr, rem, "\t%5d.%02d",
val / 100, val % 100);
ptr += n;
rem -= n;
}
}
n = snprintf(ptr, rem, "\n");
ptr += n;
rem -= n;
perf->bufpos = 0;
perf->buftot = ptr - perf->buf;
return 0;
}
static ssize_t perf_read(struct file *file, char __user *buf,
size_t sz, loff_t *ppos)
{
struct msm_perf_state *perf = file->private_data;
int n = 0, ret = 0;
mutex_lock(&perf->read_lock);
if (perf->bufpos >= perf->buftot) {
ret = refill_buf(perf);
if (ret)
goto out;
}
n = min((int)sz, perf->buftot - perf->bufpos);
if (copy_to_user(buf, &perf->buf[perf->bufpos], n)) {
ret = -EFAULT;
goto out;
}
perf->bufpos += n;
*ppos += n;
out:
mutex_unlock(&perf->read_lock);
if (ret)
return ret;
return n;
}
static int perf_open(struct inode *inode, struct file *file)
{
struct msm_perf_state *perf = inode->i_private;
struct drm_device *dev = perf->dev;
struct msm_drm_private *priv = dev->dev_private;
struct msm_gpu *gpu = priv->gpu;
int ret = 0;
if (!gpu)
return -ENODEV;
mutex_lock(&gpu->lock);
if (perf->open) {
ret = -EBUSY;
goto out;
}
file->private_data = perf;
perf->open = true;
perf->cnt = 0;
perf->buftot = 0;
perf->bufpos = 0;
msm_gpu_perfcntr_start(gpu);
perf->next_jiffies = jiffies + SAMPLE_TIME;
out:
mutex_unlock(&gpu->lock);
return ret;
}
static int perf_release(struct inode *inode, struct file *file)
{
struct msm_perf_state *perf = inode->i_private;
struct msm_drm_private *priv = perf->dev->dev_private;
msm_gpu_perfcntr_stop(priv->gpu);
perf->open = false;
return 0;
}
static const struct file_operations perf_debugfs_fops = {
.owner = THIS_MODULE,
.open = perf_open,
.read = perf_read,
.llseek = no_llseek,
.release = perf_release,
};
int msm_perf_debugfs_init(struct drm_minor *minor)
{
struct msm_drm_private *priv = minor->dev->dev_private;
struct msm_perf_state *perf;
/* only create on first minor: */
if (priv->perf)
return 0;
perf = kzalloc(sizeof(*perf), GFP_KERNEL);
if (!perf)
return -ENOMEM;
perf->dev = minor->dev;
mutex_init(&perf->read_lock);
priv->perf = perf;
debugfs_create_file("perf", S_IFREG | S_IRUGO, minor->debugfs_root,
perf, &perf_debugfs_fops);
return 0;
}
void msm_perf_debugfs_cleanup(struct msm_drm_private *priv)
{
struct msm_perf_state *perf = priv->perf;
if (!perf)
return;
priv->perf = NULL;
mutex_destroy(&perf->read_lock);
kfree(perf);
}
#endif
| linux-master | drivers/gpu/drm/msm/msm_perf.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2013 Red Hat
* Author: Rob Clark <[email protected]>
*/
#include <linux/dma-buf.h>
#include <drm/drm_prime.h>
#include "msm_drv.h"
#include "msm_gem.h"
struct sg_table *msm_gem_prime_get_sg_table(struct drm_gem_object *obj)
{
struct msm_gem_object *msm_obj = to_msm_bo(obj);
int npages = obj->size >> PAGE_SHIFT;
if (WARN_ON(!msm_obj->pages)) /* should have already pinned! */
return ERR_PTR(-ENOMEM);
return drm_prime_pages_to_sg(obj->dev, msm_obj->pages, npages);
}
int msm_gem_prime_vmap(struct drm_gem_object *obj, struct iosys_map *map)
{
void *vaddr;
vaddr = msm_gem_get_vaddr(obj);
if (IS_ERR(vaddr))
return PTR_ERR(vaddr);
iosys_map_set_vaddr(map, vaddr);
return 0;
}
void msm_gem_prime_vunmap(struct drm_gem_object *obj, struct iosys_map *map)
{
msm_gem_put_vaddr(obj);
}
struct drm_gem_object *msm_gem_prime_import_sg_table(struct drm_device *dev,
struct dma_buf_attachment *attach, struct sg_table *sg)
{
return msm_gem_import(dev, attach->dmabuf, sg);
}
int msm_gem_prime_pin(struct drm_gem_object *obj)
{
if (!obj->import_attach)
msm_gem_pin_pages(obj);
return 0;
}
void msm_gem_prime_unpin(struct drm_gem_object *obj)
{
if (!obj->import_attach)
msm_gem_unpin_pages(obj);
}
| linux-master | drivers/gpu/drm/msm/msm_gem_prime.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2016 Red Hat
* Author: Rob Clark <[email protected]>
*/
#include <linux/vmalloc.h>
#include <linux/sched/mm.h>
#include "msm_drv.h"
#include "msm_gem.h"
#include "msm_gpu.h"
#include "msm_gpu_trace.h"
/* Default disabled for now until it has some more testing on the different
* iommu combinations that can be paired with the driver:
*/
static bool enable_eviction = true;
MODULE_PARM_DESC(enable_eviction, "Enable swappable GEM buffers");
module_param(enable_eviction, bool, 0600);
static bool can_swap(void)
{
return enable_eviction && get_nr_swap_pages() > 0;
}
static bool can_block(struct shrink_control *sc)
{
if (!(sc->gfp_mask & __GFP_DIRECT_RECLAIM))
return false;
return current_is_kswapd() || (sc->gfp_mask & __GFP_RECLAIM);
}
static unsigned long
msm_gem_shrinker_count(struct shrinker *shrinker, struct shrink_control *sc)
{
struct msm_drm_private *priv =
container_of(shrinker, struct msm_drm_private, shrinker);
unsigned count = priv->lru.dontneed.count;
if (can_swap())
count += priv->lru.willneed.count;
return count;
}
static bool
purge(struct drm_gem_object *obj)
{
if (!is_purgeable(to_msm_bo(obj)))
return false;
if (msm_gem_active(obj))
return false;
msm_gem_purge(obj);
return true;
}
static bool
evict(struct drm_gem_object *obj)
{
if (is_unevictable(to_msm_bo(obj)))
return false;
if (msm_gem_active(obj))
return false;
msm_gem_evict(obj);
return true;
}
static bool
wait_for_idle(struct drm_gem_object *obj)
{
enum dma_resv_usage usage = dma_resv_usage_rw(true);
return dma_resv_wait_timeout(obj->resv, usage, false, 1000) > 0;
}
static bool
active_purge(struct drm_gem_object *obj)
{
if (!wait_for_idle(obj))
return false;
return purge(obj);
}
static bool
active_evict(struct drm_gem_object *obj)
{
if (!wait_for_idle(obj))
return false;
return evict(obj);
}
static unsigned long
msm_gem_shrinker_scan(struct shrinker *shrinker, struct shrink_control *sc)
{
struct msm_drm_private *priv =
container_of(shrinker, struct msm_drm_private, shrinker);
struct {
struct drm_gem_lru *lru;
bool (*shrink)(struct drm_gem_object *obj);
bool cond;
unsigned long freed;
unsigned long remaining;
} stages[] = {
/* Stages of progressively more aggressive/expensive reclaim: */
{ &priv->lru.dontneed, purge, true },
{ &priv->lru.willneed, evict, can_swap() },
{ &priv->lru.dontneed, active_purge, can_block(sc) },
{ &priv->lru.willneed, active_evict, can_swap() && can_block(sc) },
};
long nr = sc->nr_to_scan;
unsigned long freed = 0;
unsigned long remaining = 0;
for (unsigned i = 0; (nr > 0) && (i < ARRAY_SIZE(stages)); i++) {
if (!stages[i].cond)
continue;
stages[i].freed =
drm_gem_lru_scan(stages[i].lru, nr,
&stages[i].remaining,
stages[i].shrink);
nr -= stages[i].freed;
freed += stages[i].freed;
remaining += stages[i].remaining;
}
if (freed) {
trace_msm_gem_shrink(sc->nr_to_scan, stages[0].freed,
stages[1].freed, stages[2].freed,
stages[3].freed);
}
return (freed > 0 && remaining > 0) ? freed : SHRINK_STOP;
}
#ifdef CONFIG_DEBUG_FS
unsigned long
msm_gem_shrinker_shrink(struct drm_device *dev, unsigned long nr_to_scan)
{
struct msm_drm_private *priv = dev->dev_private;
struct shrink_control sc = {
.nr_to_scan = nr_to_scan,
};
int ret;
fs_reclaim_acquire(GFP_KERNEL);
ret = msm_gem_shrinker_scan(&priv->shrinker, &sc);
fs_reclaim_release(GFP_KERNEL);
return ret;
}
#endif
/* since we don't know any better, lets bail after a few
* and if necessary the shrinker will be invoked again.
* Seems better than unmapping *everything*
*/
static const int vmap_shrink_limit = 15;
static bool
vmap_shrink(struct drm_gem_object *obj)
{
if (!is_vunmapable(to_msm_bo(obj)))
return false;
msm_gem_vunmap(obj);
return true;
}
static int
msm_gem_shrinker_vmap(struct notifier_block *nb, unsigned long event, void *ptr)
{
struct msm_drm_private *priv =
container_of(nb, struct msm_drm_private, vmap_notifier);
struct drm_gem_lru *lrus[] = {
&priv->lru.dontneed,
&priv->lru.willneed,
&priv->lru.pinned,
NULL,
};
unsigned idx, unmapped = 0;
unsigned long remaining = 0;
for (idx = 0; lrus[idx] && unmapped < vmap_shrink_limit; idx++) {
unmapped += drm_gem_lru_scan(lrus[idx],
vmap_shrink_limit - unmapped,
&remaining,
vmap_shrink);
}
*(unsigned long *)ptr += unmapped;
if (unmapped > 0)
trace_msm_gem_purge_vmaps(unmapped);
return NOTIFY_DONE;
}
/**
* msm_gem_shrinker_init - Initialize msm shrinker
* @dev: drm device
*
* This function registers and sets up the msm shrinker.
*/
void msm_gem_shrinker_init(struct drm_device *dev)
{
struct msm_drm_private *priv = dev->dev_private;
priv->shrinker.count_objects = msm_gem_shrinker_count;
priv->shrinker.scan_objects = msm_gem_shrinker_scan;
priv->shrinker.seeks = DEFAULT_SEEKS;
WARN_ON(register_shrinker(&priv->shrinker, "drm-msm_gem"));
priv->vmap_notifier.notifier_call = msm_gem_shrinker_vmap;
WARN_ON(register_vmap_purge_notifier(&priv->vmap_notifier));
}
/**
* msm_gem_shrinker_cleanup - Clean up msm shrinker
* @dev: drm device
*
* This function unregisters the msm shrinker.
*/
void msm_gem_shrinker_cleanup(struct drm_device *dev)
{
struct msm_drm_private *priv = dev->dev_private;
if (priv->shrinker.nr_deferred) {
WARN_ON(unregister_vmap_purge_notifier(&priv->vmap_notifier));
unregister_shrinker(&priv->shrinker);
}
}
| linux-master | drivers/gpu/drm/msm/msm_gem_shrinker.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2013 Red Hat
* Author: Rob Clark <[email protected]>
*/
#include "msm_ringbuffer.h"
#include "msm_gpu.h"
static uint num_hw_submissions = 8;
MODULE_PARM_DESC(num_hw_submissions, "The max # of jobs to write into ringbuffer (default 8)");
module_param(num_hw_submissions, uint, 0600);
static struct dma_fence *msm_job_run(struct drm_sched_job *job)
{
struct msm_gem_submit *submit = to_msm_submit(job);
struct msm_fence_context *fctx = submit->ring->fctx;
struct msm_gpu *gpu = submit->gpu;
struct msm_drm_private *priv = gpu->dev->dev_private;
int i;
msm_fence_init(submit->hw_fence, fctx);
submit->seqno = submit->hw_fence->seqno;
mutex_lock(&priv->lru.lock);
for (i = 0; i < submit->nr_bos; i++) {
struct drm_gem_object *obj = submit->bos[i].obj;
msm_gem_unpin_active(obj);
submit->bos[i].flags &= ~BO_PINNED;
}
mutex_unlock(&priv->lru.lock);
msm_gpu_submit(gpu, submit);
return dma_fence_get(submit->hw_fence);
}
static void msm_job_free(struct drm_sched_job *job)
{
struct msm_gem_submit *submit = to_msm_submit(job);
drm_sched_job_cleanup(job);
msm_gem_submit_put(submit);
}
static const struct drm_sched_backend_ops msm_sched_ops = {
.run_job = msm_job_run,
.free_job = msm_job_free
};
struct msm_ringbuffer *msm_ringbuffer_new(struct msm_gpu *gpu, int id,
void *memptrs, uint64_t memptrs_iova)
{
struct msm_ringbuffer *ring;
long sched_timeout;
char name[32];
int ret;
/* We assume everwhere that MSM_GPU_RINGBUFFER_SZ is a power of 2 */
BUILD_BUG_ON(!is_power_of_2(MSM_GPU_RINGBUFFER_SZ));
ring = kzalloc(sizeof(*ring), GFP_KERNEL);
if (!ring) {
ret = -ENOMEM;
goto fail;
}
ring->gpu = gpu;
ring->id = id;
ring->start = msm_gem_kernel_new(gpu->dev, MSM_GPU_RINGBUFFER_SZ,
check_apriv(gpu, MSM_BO_WC | MSM_BO_GPU_READONLY),
gpu->aspace, &ring->bo, &ring->iova);
if (IS_ERR(ring->start)) {
ret = PTR_ERR(ring->start);
ring->start = NULL;
goto fail;
}
msm_gem_object_set_name(ring->bo, "ring%d", id);
ring->end = ring->start + (MSM_GPU_RINGBUFFER_SZ >> 2);
ring->next = ring->start;
ring->cur = ring->start;
ring->memptrs = memptrs;
ring->memptrs_iova = memptrs_iova;
/* currently managing hangcheck ourselves: */
sched_timeout = MAX_SCHEDULE_TIMEOUT;
ret = drm_sched_init(&ring->sched, &msm_sched_ops,
num_hw_submissions, 0, sched_timeout,
NULL, NULL, to_msm_bo(ring->bo)->name, gpu->dev->dev);
if (ret) {
goto fail;
}
INIT_LIST_HEAD(&ring->submits);
spin_lock_init(&ring->submit_lock);
spin_lock_init(&ring->preempt_lock);
snprintf(name, sizeof(name), "gpu-ring-%d", ring->id);
ring->fctx = msm_fence_context_alloc(gpu->dev, &ring->memptrs->fence, name);
return ring;
fail:
msm_ringbuffer_destroy(ring);
return ERR_PTR(ret);
}
void msm_ringbuffer_destroy(struct msm_ringbuffer *ring)
{
if (IS_ERR_OR_NULL(ring))
return;
drm_sched_fini(&ring->sched);
msm_fence_context_free(ring->fctx);
msm_gem_kernel_put(ring->bo, ring->gpu->aspace);
kfree(ring);
}
| linux-master | drivers/gpu/drm/msm/msm_ringbuffer.c |
// SPDX-License-Identifier: GPL-2.0-only
/* Copyright (c) 2017 The Linux Foundation. All rights reserved.
*/
#include "msm_gem.h"
#include "a5xx_gpu.h"
/*
* Try to transition the preemption state from old to new. Return
* true on success or false if the original state wasn't 'old'
*/
static inline bool try_preempt_state(struct a5xx_gpu *a5xx_gpu,
enum preempt_state old, enum preempt_state new)
{
enum preempt_state cur = atomic_cmpxchg(&a5xx_gpu->preempt_state,
old, new);
return (cur == old);
}
/*
* Force the preemption state to the specified state. This is used in cases
* where the current state is known and won't change
*/
static inline void set_preempt_state(struct a5xx_gpu *gpu,
enum preempt_state new)
{
/*
* preempt_state may be read by other cores trying to trigger a
* preemption or in the interrupt handler so barriers are needed
* before...
*/
smp_mb__before_atomic();
atomic_set(&gpu->preempt_state, new);
/* ... and after*/
smp_mb__after_atomic();
}
/* Write the most recent wptr for the given ring into the hardware */
static inline void update_wptr(struct msm_gpu *gpu, struct msm_ringbuffer *ring)
{
unsigned long flags;
uint32_t wptr;
if (!ring)
return;
spin_lock_irqsave(&ring->preempt_lock, flags);
wptr = get_wptr(ring);
spin_unlock_irqrestore(&ring->preempt_lock, flags);
gpu_write(gpu, REG_A5XX_CP_RB_WPTR, wptr);
}
/* Return the highest priority ringbuffer with something in it */
static struct msm_ringbuffer *get_next_ring(struct msm_gpu *gpu)
{
unsigned long flags;
int i;
for (i = 0; i < gpu->nr_rings; i++) {
bool empty;
struct msm_ringbuffer *ring = gpu->rb[i];
spin_lock_irqsave(&ring->preempt_lock, flags);
empty = (get_wptr(ring) == gpu->funcs->get_rptr(gpu, ring));
spin_unlock_irqrestore(&ring->preempt_lock, flags);
if (!empty)
return ring;
}
return NULL;
}
static void a5xx_preempt_timer(struct timer_list *t)
{
struct a5xx_gpu *a5xx_gpu = from_timer(a5xx_gpu, t, preempt_timer);
struct msm_gpu *gpu = &a5xx_gpu->base.base;
struct drm_device *dev = gpu->dev;
if (!try_preempt_state(a5xx_gpu, PREEMPT_TRIGGERED, PREEMPT_FAULTED))
return;
DRM_DEV_ERROR(dev->dev, "%s: preemption timed out\n", gpu->name);
kthread_queue_work(gpu->worker, &gpu->recover_work);
}
/* Try to trigger a preemption switch */
void a5xx_preempt_trigger(struct msm_gpu *gpu)
{
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
struct a5xx_gpu *a5xx_gpu = to_a5xx_gpu(adreno_gpu);
unsigned long flags;
struct msm_ringbuffer *ring;
if (gpu->nr_rings == 1)
return;
/*
* Try to start preemption by moving from NONE to START. If
* unsuccessful, a preemption is already in flight
*/
if (!try_preempt_state(a5xx_gpu, PREEMPT_NONE, PREEMPT_START))
return;
/* Get the next ring to preempt to */
ring = get_next_ring(gpu);
/*
* If no ring is populated or the highest priority ring is the current
* one do nothing except to update the wptr to the latest and greatest
*/
if (!ring || (a5xx_gpu->cur_ring == ring)) {
/*
* Its possible that while a preemption request is in progress
* from an irq context, a user context trying to submit might
* fail to update the write pointer, because it determines
* that the preempt state is not PREEMPT_NONE.
*
* Close the race by introducing an intermediate
* state PREEMPT_ABORT to let the submit path
* know that the ringbuffer is not going to change
* and can safely update the write pointer.
*/
set_preempt_state(a5xx_gpu, PREEMPT_ABORT);
update_wptr(gpu, a5xx_gpu->cur_ring);
set_preempt_state(a5xx_gpu, PREEMPT_NONE);
return;
}
/* Make sure the wptr doesn't update while we're in motion */
spin_lock_irqsave(&ring->preempt_lock, flags);
a5xx_gpu->preempt[ring->id]->wptr = get_wptr(ring);
spin_unlock_irqrestore(&ring->preempt_lock, flags);
/* Set the address of the incoming preemption record */
gpu_write64(gpu, REG_A5XX_CP_CONTEXT_SWITCH_RESTORE_ADDR_LO,
a5xx_gpu->preempt_iova[ring->id]);
a5xx_gpu->next_ring = ring;
/* Start a timer to catch a stuck preemption */
mod_timer(&a5xx_gpu->preempt_timer, jiffies + msecs_to_jiffies(10000));
/* Set the preemption state to triggered */
set_preempt_state(a5xx_gpu, PREEMPT_TRIGGERED);
/* Make sure everything is written before hitting the button */
wmb();
/* And actually start the preemption */
gpu_write(gpu, REG_A5XX_CP_CONTEXT_SWITCH_CNTL, 1);
}
void a5xx_preempt_irq(struct msm_gpu *gpu)
{
uint32_t status;
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
struct a5xx_gpu *a5xx_gpu = to_a5xx_gpu(adreno_gpu);
struct drm_device *dev = gpu->dev;
if (!try_preempt_state(a5xx_gpu, PREEMPT_TRIGGERED, PREEMPT_PENDING))
return;
/* Delete the preemption watchdog timer */
del_timer(&a5xx_gpu->preempt_timer);
/*
* The hardware should be setting CP_CONTEXT_SWITCH_CNTL to zero before
* firing the interrupt, but there is a non zero chance of a hardware
* condition or a software race that could set it again before we have a
* chance to finish. If that happens, log and go for recovery
*/
status = gpu_read(gpu, REG_A5XX_CP_CONTEXT_SWITCH_CNTL);
if (unlikely(status)) {
set_preempt_state(a5xx_gpu, PREEMPT_FAULTED);
DRM_DEV_ERROR(dev->dev, "%s: Preemption failed to complete\n",
gpu->name);
kthread_queue_work(gpu->worker, &gpu->recover_work);
return;
}
a5xx_gpu->cur_ring = a5xx_gpu->next_ring;
a5xx_gpu->next_ring = NULL;
update_wptr(gpu, a5xx_gpu->cur_ring);
set_preempt_state(a5xx_gpu, PREEMPT_NONE);
}
void a5xx_preempt_hw_init(struct msm_gpu *gpu)
{
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
struct a5xx_gpu *a5xx_gpu = to_a5xx_gpu(adreno_gpu);
int i;
/* Always come up on rb 0 */
a5xx_gpu->cur_ring = gpu->rb[0];
/* No preemption if we only have one ring */
if (gpu->nr_rings == 1)
return;
for (i = 0; i < gpu->nr_rings; i++) {
a5xx_gpu->preempt[i]->wptr = 0;
a5xx_gpu->preempt[i]->rptr = 0;
a5xx_gpu->preempt[i]->rbase = gpu->rb[i]->iova;
a5xx_gpu->preempt[i]->rptr_addr = shadowptr(a5xx_gpu, gpu->rb[i]);
}
/* Write a 0 to signal that we aren't switching pagetables */
gpu_write64(gpu, REG_A5XX_CP_CONTEXT_SWITCH_SMMU_INFO_LO, 0);
/* Reset the preemption state */
set_preempt_state(a5xx_gpu, PREEMPT_NONE);
}
static int preempt_init_ring(struct a5xx_gpu *a5xx_gpu,
struct msm_ringbuffer *ring)
{
struct adreno_gpu *adreno_gpu = &a5xx_gpu->base;
struct msm_gpu *gpu = &adreno_gpu->base;
struct a5xx_preempt_record *ptr;
void *counters;
struct drm_gem_object *bo = NULL, *counters_bo = NULL;
u64 iova = 0, counters_iova = 0;
ptr = msm_gem_kernel_new(gpu->dev,
A5XX_PREEMPT_RECORD_SIZE + A5XX_PREEMPT_COUNTER_SIZE,
MSM_BO_WC | MSM_BO_MAP_PRIV, gpu->aspace, &bo, &iova);
if (IS_ERR(ptr))
return PTR_ERR(ptr);
/* The buffer to store counters needs to be unprivileged */
counters = msm_gem_kernel_new(gpu->dev,
A5XX_PREEMPT_COUNTER_SIZE,
MSM_BO_WC, gpu->aspace, &counters_bo, &counters_iova);
if (IS_ERR(counters)) {
msm_gem_kernel_put(bo, gpu->aspace);
return PTR_ERR(counters);
}
msm_gem_object_set_name(bo, "preempt");
msm_gem_object_set_name(counters_bo, "preempt_counters");
a5xx_gpu->preempt_bo[ring->id] = bo;
a5xx_gpu->preempt_counters_bo[ring->id] = counters_bo;
a5xx_gpu->preempt_iova[ring->id] = iova;
a5xx_gpu->preempt[ring->id] = ptr;
/* Set up the defaults on the preemption record */
ptr->magic = A5XX_PREEMPT_RECORD_MAGIC;
ptr->info = 0;
ptr->data = 0;
ptr->cntl = MSM_GPU_RB_CNTL_DEFAULT | AXXX_CP_RB_CNTL_NO_UPDATE;
ptr->counter = counters_iova;
return 0;
}
void a5xx_preempt_fini(struct msm_gpu *gpu)
{
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
struct a5xx_gpu *a5xx_gpu = to_a5xx_gpu(adreno_gpu);
int i;
for (i = 0; i < gpu->nr_rings; i++) {
msm_gem_kernel_put(a5xx_gpu->preempt_bo[i], gpu->aspace);
msm_gem_kernel_put(a5xx_gpu->preempt_counters_bo[i], gpu->aspace);
}
}
void a5xx_preempt_init(struct msm_gpu *gpu)
{
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
struct a5xx_gpu *a5xx_gpu = to_a5xx_gpu(adreno_gpu);
int i;
/* No preemption if we only have one ring */
if (gpu->nr_rings <= 1)
return;
for (i = 0; i < gpu->nr_rings; i++) {
if (preempt_init_ring(a5xx_gpu, gpu->rb[i])) {
/*
* On any failure our adventure is over. Clean up and
* set nr_rings to 1 to force preemption off
*/
a5xx_preempt_fini(gpu);
gpu->nr_rings = 1;
return;
}
}
timer_setup(&a5xx_gpu->preempt_timer, a5xx_preempt_timer, 0);
}
| linux-master | drivers/gpu/drm/msm/adreno/a5xx_preempt.c |
// SPDX-License-Identifier: GPL-2.0-only
/* Copyright (c) 2014 The Linux Foundation. All rights reserved.
*/
#include "a4xx_gpu.h"
#define A4XX_INT0_MASK \
(A4XX_INT0_RBBM_AHB_ERROR | \
A4XX_INT0_RBBM_ATB_BUS_OVERFLOW | \
A4XX_INT0_CP_T0_PACKET_IN_IB | \
A4XX_INT0_CP_OPCODE_ERROR | \
A4XX_INT0_CP_RESERVED_BIT_ERROR | \
A4XX_INT0_CP_HW_FAULT | \
A4XX_INT0_CP_IB1_INT | \
A4XX_INT0_CP_IB2_INT | \
A4XX_INT0_CP_RB_INT | \
A4XX_INT0_CP_REG_PROTECT_FAULT | \
A4XX_INT0_CP_AHB_ERROR_HALT | \
A4XX_INT0_CACHE_FLUSH_TS | \
A4XX_INT0_UCHE_OOB_ACCESS)
extern bool hang_debug;
static void a4xx_dump(struct msm_gpu *gpu);
static bool a4xx_idle(struct msm_gpu *gpu);
static void a4xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit)
{
struct msm_ringbuffer *ring = submit->ring;
unsigned int i;
for (i = 0; i < submit->nr_cmds; i++) {
switch (submit->cmd[i].type) {
case MSM_SUBMIT_CMD_IB_TARGET_BUF:
/* ignore IB-targets */
break;
case MSM_SUBMIT_CMD_CTX_RESTORE_BUF:
/* ignore if there has not been a ctx switch: */
if (gpu->cur_ctx_seqno == submit->queue->ctx->seqno)
break;
fallthrough;
case MSM_SUBMIT_CMD_BUF:
OUT_PKT3(ring, CP_INDIRECT_BUFFER_PFE, 2);
OUT_RING(ring, lower_32_bits(submit->cmd[i].iova));
OUT_RING(ring, submit->cmd[i].size);
OUT_PKT2(ring);
break;
}
}
OUT_PKT0(ring, REG_AXXX_CP_SCRATCH_REG2, 1);
OUT_RING(ring, submit->seqno);
/* Flush HLSQ lazy updates to make sure there is nothing
* pending for indirect loads after the timestamp has
* passed:
*/
OUT_PKT3(ring, CP_EVENT_WRITE, 1);
OUT_RING(ring, HLSQ_FLUSH);
/* wait for idle before cache flush/interrupt */
OUT_PKT3(ring, CP_WAIT_FOR_IDLE, 1);
OUT_RING(ring, 0x00000000);
/* BIT(31) of CACHE_FLUSH_TS triggers CACHE_FLUSH_TS IRQ from GPU */
OUT_PKT3(ring, CP_EVENT_WRITE, 3);
OUT_RING(ring, CACHE_FLUSH_TS | CP_EVENT_WRITE_0_IRQ);
OUT_RING(ring, rbmemptr(ring, fence));
OUT_RING(ring, submit->seqno);
adreno_flush(gpu, ring, REG_A4XX_CP_RB_WPTR);
}
/*
* a4xx_enable_hwcg() - Program the clock control registers
* @device: The adreno device pointer
*/
static void a4xx_enable_hwcg(struct msm_gpu *gpu)
{
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
unsigned int i;
for (i = 0; i < 4; i++)
gpu_write(gpu, REG_A4XX_RBBM_CLOCK_CTL_TP(i), 0x02222202);
for (i = 0; i < 4; i++)
gpu_write(gpu, REG_A4XX_RBBM_CLOCK_CTL2_TP(i), 0x00002222);
for (i = 0; i < 4; i++)
gpu_write(gpu, REG_A4XX_RBBM_CLOCK_HYST_TP(i), 0x0E739CE7);
for (i = 0; i < 4; i++)
gpu_write(gpu, REG_A4XX_RBBM_CLOCK_DELAY_TP(i), 0x00111111);
for (i = 0; i < 4; i++)
gpu_write(gpu, REG_A4XX_RBBM_CLOCK_CTL_SP(i), 0x22222222);
for (i = 0; i < 4; i++)
gpu_write(gpu, REG_A4XX_RBBM_CLOCK_CTL2_SP(i), 0x00222222);
for (i = 0; i < 4; i++)
gpu_write(gpu, REG_A4XX_RBBM_CLOCK_HYST_SP(i), 0x00000104);
for (i = 0; i < 4; i++)
gpu_write(gpu, REG_A4XX_RBBM_CLOCK_DELAY_SP(i), 0x00000081);
gpu_write(gpu, REG_A4XX_RBBM_CLOCK_CTL_UCHE, 0x22222222);
gpu_write(gpu, REG_A4XX_RBBM_CLOCK_CTL2_UCHE, 0x02222222);
gpu_write(gpu, REG_A4XX_RBBM_CLOCK_CTL3_UCHE, 0x00000000);
gpu_write(gpu, REG_A4XX_RBBM_CLOCK_CTL4_UCHE, 0x00000000);
gpu_write(gpu, REG_A4XX_RBBM_CLOCK_HYST_UCHE, 0x00004444);
gpu_write(gpu, REG_A4XX_RBBM_CLOCK_DELAY_UCHE, 0x00001112);
for (i = 0; i < 4; i++)
gpu_write(gpu, REG_A4XX_RBBM_CLOCK_CTL_RB(i), 0x22222222);
/* Disable L1 clocking in A420 due to CCU issues with it */
for (i = 0; i < 4; i++) {
if (adreno_is_a420(adreno_gpu)) {
gpu_write(gpu, REG_A4XX_RBBM_CLOCK_CTL2_RB(i),
0x00002020);
} else {
gpu_write(gpu, REG_A4XX_RBBM_CLOCK_CTL2_RB(i),
0x00022020);
}
}
/* No CCU for A405 */
if (!adreno_is_a405(adreno_gpu)) {
for (i = 0; i < 4; i++) {
gpu_write(gpu, REG_A4XX_RBBM_CLOCK_CTL_MARB_CCU(i),
0x00000922);
}
for (i = 0; i < 4; i++) {
gpu_write(gpu, REG_A4XX_RBBM_CLOCK_HYST_RB_MARB_CCU(i),
0x00000000);
}
for (i = 0; i < 4; i++) {
gpu_write(gpu, REG_A4XX_RBBM_CLOCK_DELAY_RB_MARB_CCU_L1(i),
0x00000001);
}
}
gpu_write(gpu, REG_A4XX_RBBM_CLOCK_MODE_GPC, 0x02222222);
gpu_write(gpu, REG_A4XX_RBBM_CLOCK_HYST_GPC, 0x04100104);
gpu_write(gpu, REG_A4XX_RBBM_CLOCK_DELAY_GPC, 0x00022222);
gpu_write(gpu, REG_A4XX_RBBM_CLOCK_CTL_COM_DCOM, 0x00000022);
gpu_write(gpu, REG_A4XX_RBBM_CLOCK_HYST_COM_DCOM, 0x0000010F);
gpu_write(gpu, REG_A4XX_RBBM_CLOCK_DELAY_COM_DCOM, 0x00000022);
gpu_write(gpu, REG_A4XX_RBBM_CLOCK_CTL_TSE_RAS_RBBM, 0x00222222);
gpu_write(gpu, REG_A4XX_RBBM_CLOCK_HYST_TSE_RAS_RBBM, 0x00004104);
gpu_write(gpu, REG_A4XX_RBBM_CLOCK_DELAY_TSE_RAS_RBBM, 0x00000222);
gpu_write(gpu, REG_A4XX_RBBM_CLOCK_CTL_HLSQ , 0x00000000);
gpu_write(gpu, REG_A4XX_RBBM_CLOCK_HYST_HLSQ, 0x00000000);
gpu_write(gpu, REG_A4XX_RBBM_CLOCK_DELAY_HLSQ, 0x00220000);
/* Early A430's have a timing issue with SP/TP power collapse;
disabling HW clock gating prevents it. */
if (adreno_is_a430(adreno_gpu) && adreno_patchid(adreno_gpu) < 2)
gpu_write(gpu, REG_A4XX_RBBM_CLOCK_CTL, 0);
else
gpu_write(gpu, REG_A4XX_RBBM_CLOCK_CTL, 0xAAAAAAAA);
gpu_write(gpu, REG_A4XX_RBBM_CLOCK_CTL2, 0);
}
static bool a4xx_me_init(struct msm_gpu *gpu)
{
struct msm_ringbuffer *ring = gpu->rb[0];
OUT_PKT3(ring, CP_ME_INIT, 17);
OUT_RING(ring, 0x000003f7);
OUT_RING(ring, 0x00000000);
OUT_RING(ring, 0x00000000);
OUT_RING(ring, 0x00000000);
OUT_RING(ring, 0x00000080);
OUT_RING(ring, 0x00000100);
OUT_RING(ring, 0x00000180);
OUT_RING(ring, 0x00006600);
OUT_RING(ring, 0x00000150);
OUT_RING(ring, 0x0000014e);
OUT_RING(ring, 0x00000154);
OUT_RING(ring, 0x00000001);
OUT_RING(ring, 0x00000000);
OUT_RING(ring, 0x00000000);
OUT_RING(ring, 0x00000000);
OUT_RING(ring, 0x00000000);
OUT_RING(ring, 0x00000000);
adreno_flush(gpu, ring, REG_A4XX_CP_RB_WPTR);
return a4xx_idle(gpu);
}
static int a4xx_hw_init(struct msm_gpu *gpu)
{
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
struct a4xx_gpu *a4xx_gpu = to_a4xx_gpu(adreno_gpu);
uint32_t *ptr, len;
int i, ret;
if (adreno_is_a405(adreno_gpu)) {
gpu_write(gpu, REG_A4XX_VBIF_ROUND_ROBIN_QOS_ARB, 0x00000003);
} else if (adreno_is_a420(adreno_gpu)) {
gpu_write(gpu, REG_A4XX_VBIF_ABIT_SORT, 0x0001001F);
gpu_write(gpu, REG_A4XX_VBIF_ABIT_SORT_CONF, 0x000000A4);
gpu_write(gpu, REG_A4XX_VBIF_GATE_OFF_WRREQ_EN, 0x00000001);
gpu_write(gpu, REG_A4XX_VBIF_IN_RD_LIM_CONF0, 0x18181818);
gpu_write(gpu, REG_A4XX_VBIF_IN_RD_LIM_CONF1, 0x00000018);
gpu_write(gpu, REG_A4XX_VBIF_IN_WR_LIM_CONF0, 0x18181818);
gpu_write(gpu, REG_A4XX_VBIF_IN_WR_LIM_CONF1, 0x00000018);
gpu_write(gpu, REG_A4XX_VBIF_ROUND_ROBIN_QOS_ARB, 0x00000003);
} else if (adreno_is_a430(adreno_gpu)) {
gpu_write(gpu, REG_A4XX_VBIF_GATE_OFF_WRREQ_EN, 0x00000001);
gpu_write(gpu, REG_A4XX_VBIF_IN_RD_LIM_CONF0, 0x18181818);
gpu_write(gpu, REG_A4XX_VBIF_IN_RD_LIM_CONF1, 0x00000018);
gpu_write(gpu, REG_A4XX_VBIF_IN_WR_LIM_CONF0, 0x18181818);
gpu_write(gpu, REG_A4XX_VBIF_IN_WR_LIM_CONF1, 0x00000018);
gpu_write(gpu, REG_A4XX_VBIF_ROUND_ROBIN_QOS_ARB, 0x00000003);
} else {
BUG();
}
/* Make all blocks contribute to the GPU BUSY perf counter */
gpu_write(gpu, REG_A4XX_RBBM_GPU_BUSY_MASKED, 0xffffffff);
/* Tune the hystersis counters for SP and CP idle detection */
gpu_write(gpu, REG_A4XX_RBBM_SP_HYST_CNT, 0x10);
gpu_write(gpu, REG_A4XX_RBBM_WAIT_IDLE_CLOCKS_CTL, 0x10);
if (adreno_is_a430(adreno_gpu)) {
gpu_write(gpu, REG_A4XX_RBBM_WAIT_IDLE_CLOCKS_CTL2, 0x30);
}
/* Enable the RBBM error reporting bits */
gpu_write(gpu, REG_A4XX_RBBM_AHB_CTL0, 0x00000001);
/* Enable AHB error reporting*/
gpu_write(gpu, REG_A4XX_RBBM_AHB_CTL1, 0xa6ffffff);
/* Enable power counters*/
gpu_write(gpu, REG_A4XX_RBBM_RBBM_CTL, 0x00000030);
/*
* Turn on hang detection - this spews a lot of useful information
* into the RBBM registers on a hang:
*/
gpu_write(gpu, REG_A4XX_RBBM_INTERFACE_HANG_INT_CTL,
(1 << 30) | 0xFFFF);
gpu_write(gpu, REG_A4XX_RB_GMEM_BASE_ADDR,
(unsigned int)(a4xx_gpu->ocmem.base >> 14));
/* Turn on performance counters: */
gpu_write(gpu, REG_A4XX_RBBM_PERFCTR_CTL, 0x01);
/* use the first CP counter for timestamp queries.. userspace may set
* this as well but it selects the same counter/countable:
*/
gpu_write(gpu, REG_A4XX_CP_PERFCTR_CP_SEL_0, CP_ALWAYS_COUNT);
if (adreno_is_a430(adreno_gpu))
gpu_write(gpu, REG_A4XX_UCHE_CACHE_WAYS_VFD, 0x07);
/* Disable L2 bypass to avoid UCHE out of bounds errors */
gpu_write(gpu, REG_A4XX_UCHE_TRAP_BASE_LO, 0xffff0000);
gpu_write(gpu, REG_A4XX_UCHE_TRAP_BASE_HI, 0xffff0000);
gpu_write(gpu, REG_A4XX_CP_DEBUG, (1 << 25) |
(adreno_is_a420(adreno_gpu) ? (1 << 29) : 0));
/* On A430 enable SP regfile sleep for power savings */
/* TODO downstream does this for !420, so maybe applies for 405 too? */
if (!adreno_is_a420(adreno_gpu)) {
gpu_write(gpu, REG_A4XX_RBBM_SP_REGFILE_SLEEP_CNTL_0,
0x00000441);
gpu_write(gpu, REG_A4XX_RBBM_SP_REGFILE_SLEEP_CNTL_1,
0x00000441);
}
a4xx_enable_hwcg(gpu);
/*
* For A420 set RBBM_CLOCK_DELAY_HLSQ.CGC_HLSQ_TP_EARLY_CYC >= 2
* due to timing issue with HLSQ_TP_CLK_EN
*/
if (adreno_is_a420(adreno_gpu)) {
unsigned int val;
val = gpu_read(gpu, REG_A4XX_RBBM_CLOCK_DELAY_HLSQ);
val &= ~A4XX_CGC_HLSQ_EARLY_CYC__MASK;
val |= 2 << A4XX_CGC_HLSQ_EARLY_CYC__SHIFT;
gpu_write(gpu, REG_A4XX_RBBM_CLOCK_DELAY_HLSQ, val);
}
/* setup access protection: */
gpu_write(gpu, REG_A4XX_CP_PROTECT_CTRL, 0x00000007);
/* RBBM registers */
gpu_write(gpu, REG_A4XX_CP_PROTECT(0), 0x62000010);
gpu_write(gpu, REG_A4XX_CP_PROTECT(1), 0x63000020);
gpu_write(gpu, REG_A4XX_CP_PROTECT(2), 0x64000040);
gpu_write(gpu, REG_A4XX_CP_PROTECT(3), 0x65000080);
gpu_write(gpu, REG_A4XX_CP_PROTECT(4), 0x66000100);
gpu_write(gpu, REG_A4XX_CP_PROTECT(5), 0x64000200);
/* CP registers */
gpu_write(gpu, REG_A4XX_CP_PROTECT(6), 0x67000800);
gpu_write(gpu, REG_A4XX_CP_PROTECT(7), 0x64001600);
/* RB registers */
gpu_write(gpu, REG_A4XX_CP_PROTECT(8), 0x60003300);
/* HLSQ registers */
gpu_write(gpu, REG_A4XX_CP_PROTECT(9), 0x60003800);
/* VPC registers */
gpu_write(gpu, REG_A4XX_CP_PROTECT(10), 0x61003980);
/* SMMU registers */
gpu_write(gpu, REG_A4XX_CP_PROTECT(11), 0x6e010000);
gpu_write(gpu, REG_A4XX_RBBM_INT_0_MASK, A4XX_INT0_MASK);
ret = adreno_hw_init(gpu);
if (ret)
return ret;
/*
* Use the default ringbuffer size and block size but disable the RPTR
* shadow
*/
gpu_write(gpu, REG_A4XX_CP_RB_CNTL,
MSM_GPU_RB_CNTL_DEFAULT | AXXX_CP_RB_CNTL_NO_UPDATE);
/* Set the ringbuffer address */
gpu_write(gpu, REG_A4XX_CP_RB_BASE, lower_32_bits(gpu->rb[0]->iova));
/* Load PM4: */
ptr = (uint32_t *)(adreno_gpu->fw[ADRENO_FW_PM4]->data);
len = adreno_gpu->fw[ADRENO_FW_PM4]->size / 4;
DBG("loading PM4 ucode version: %u", ptr[0]);
gpu_write(gpu, REG_A4XX_CP_ME_RAM_WADDR, 0);
for (i = 1; i < len; i++)
gpu_write(gpu, REG_A4XX_CP_ME_RAM_DATA, ptr[i]);
/* Load PFP: */
ptr = (uint32_t *)(adreno_gpu->fw[ADRENO_FW_PFP]->data);
len = adreno_gpu->fw[ADRENO_FW_PFP]->size / 4;
DBG("loading PFP ucode version: %u", ptr[0]);
gpu_write(gpu, REG_A4XX_CP_PFP_UCODE_ADDR, 0);
for (i = 1; i < len; i++)
gpu_write(gpu, REG_A4XX_CP_PFP_UCODE_DATA, ptr[i]);
/* clear ME_HALT to start micro engine */
gpu_write(gpu, REG_A4XX_CP_ME_CNTL, 0);
return a4xx_me_init(gpu) ? 0 : -EINVAL;
}
static void a4xx_recover(struct msm_gpu *gpu)
{
int i;
adreno_dump_info(gpu);
for (i = 0; i < 8; i++) {
printk("CP_SCRATCH_REG%d: %u\n", i,
gpu_read(gpu, REG_AXXX_CP_SCRATCH_REG0 + i));
}
/* dump registers before resetting gpu, if enabled: */
if (hang_debug)
a4xx_dump(gpu);
gpu_write(gpu, REG_A4XX_RBBM_SW_RESET_CMD, 1);
gpu_read(gpu, REG_A4XX_RBBM_SW_RESET_CMD);
gpu_write(gpu, REG_A4XX_RBBM_SW_RESET_CMD, 0);
adreno_recover(gpu);
}
static void a4xx_destroy(struct msm_gpu *gpu)
{
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
struct a4xx_gpu *a4xx_gpu = to_a4xx_gpu(adreno_gpu);
DBG("%s", gpu->name);
adreno_gpu_cleanup(adreno_gpu);
adreno_gpu_ocmem_cleanup(&a4xx_gpu->ocmem);
kfree(a4xx_gpu);
}
static bool a4xx_idle(struct msm_gpu *gpu)
{
/* wait for ringbuffer to drain: */
if (!adreno_idle(gpu, gpu->rb[0]))
return false;
/* then wait for GPU to finish: */
if (spin_until(!(gpu_read(gpu, REG_A4XX_RBBM_STATUS) &
A4XX_RBBM_STATUS_GPU_BUSY))) {
DRM_ERROR("%s: timeout waiting for GPU to idle!\n", gpu->name);
/* TODO maybe we need to reset GPU here to recover from hang? */
return false;
}
return true;
}
static irqreturn_t a4xx_irq(struct msm_gpu *gpu)
{
uint32_t status;
status = gpu_read(gpu, REG_A4XX_RBBM_INT_0_STATUS);
DBG("%s: Int status %08x", gpu->name, status);
if (status & A4XX_INT0_CP_REG_PROTECT_FAULT) {
uint32_t reg = gpu_read(gpu, REG_A4XX_CP_PROTECT_STATUS);
printk("CP | Protected mode error| %s | addr=%x\n",
reg & (1 << 24) ? "WRITE" : "READ",
(reg & 0xFFFFF) >> 2);
}
gpu_write(gpu, REG_A4XX_RBBM_INT_CLEAR_CMD, status);
msm_gpu_retire(gpu);
return IRQ_HANDLED;
}
static const unsigned int a4xx_registers[] = {
/* RBBM */
0x0000, 0x0002, 0x0004, 0x0021, 0x0023, 0x0024, 0x0026, 0x0026,
0x0028, 0x002B, 0x002E, 0x0034, 0x0037, 0x0044, 0x0047, 0x0066,
0x0068, 0x0095, 0x009C, 0x0170, 0x0174, 0x01AF,
/* CP */
0x0200, 0x0233, 0x0240, 0x0250, 0x04C0, 0x04DD, 0x0500, 0x050B,
0x0578, 0x058F,
/* VSC */
0x0C00, 0x0C03, 0x0C08, 0x0C41, 0x0C50, 0x0C51,
/* GRAS */
0x0C80, 0x0C81, 0x0C88, 0x0C8F,
/* RB */
0x0CC0, 0x0CC0, 0x0CC4, 0x0CD2,
/* PC */
0x0D00, 0x0D0C, 0x0D10, 0x0D17, 0x0D20, 0x0D23,
/* VFD */
0x0E40, 0x0E4A,
/* VPC */
0x0E60, 0x0E61, 0x0E63, 0x0E68,
/* UCHE */
0x0E80, 0x0E84, 0x0E88, 0x0E95,
/* VMIDMT */
0x1000, 0x1000, 0x1002, 0x1002, 0x1004, 0x1004, 0x1008, 0x100A,
0x100C, 0x100D, 0x100F, 0x1010, 0x1012, 0x1016, 0x1024, 0x1024,
0x1027, 0x1027, 0x1100, 0x1100, 0x1102, 0x1102, 0x1104, 0x1104,
0x1110, 0x1110, 0x1112, 0x1116, 0x1124, 0x1124, 0x1300, 0x1300,
0x1380, 0x1380,
/* GRAS CTX 0 */
0x2000, 0x2004, 0x2008, 0x2067, 0x2070, 0x2078, 0x207B, 0x216E,
/* PC CTX 0 */
0x21C0, 0x21C6, 0x21D0, 0x21D0, 0x21D9, 0x21D9, 0x21E5, 0x21E7,
/* VFD CTX 0 */
0x2200, 0x2204, 0x2208, 0x22A9,
/* GRAS CTX 1 */
0x2400, 0x2404, 0x2408, 0x2467, 0x2470, 0x2478, 0x247B, 0x256E,
/* PC CTX 1 */
0x25C0, 0x25C6, 0x25D0, 0x25D0, 0x25D9, 0x25D9, 0x25E5, 0x25E7,
/* VFD CTX 1 */
0x2600, 0x2604, 0x2608, 0x26A9,
/* XPU */
0x2C00, 0x2C01, 0x2C10, 0x2C10, 0x2C12, 0x2C16, 0x2C1D, 0x2C20,
0x2C28, 0x2C28, 0x2C30, 0x2C30, 0x2C32, 0x2C36, 0x2C40, 0x2C40,
0x2C50, 0x2C50, 0x2C52, 0x2C56, 0x2C80, 0x2C80, 0x2C94, 0x2C95,
/* VBIF */
0x3000, 0x3007, 0x300C, 0x3014, 0x3018, 0x301D, 0x3020, 0x3022,
0x3024, 0x3026, 0x3028, 0x302A, 0x302C, 0x302D, 0x3030, 0x3031,
0x3034, 0x3036, 0x3038, 0x3038, 0x303C, 0x303D, 0x3040, 0x3040,
0x3049, 0x3049, 0x3058, 0x3058, 0x305B, 0x3061, 0x3064, 0x3068,
0x306C, 0x306D, 0x3080, 0x3088, 0x308B, 0x308C, 0x3090, 0x3094,
0x3098, 0x3098, 0x309C, 0x309C, 0x30C0, 0x30C0, 0x30C8, 0x30C8,
0x30D0, 0x30D0, 0x30D8, 0x30D8, 0x30E0, 0x30E0, 0x3100, 0x3100,
0x3108, 0x3108, 0x3110, 0x3110, 0x3118, 0x3118, 0x3120, 0x3120,
0x3124, 0x3125, 0x3129, 0x3129, 0x3131, 0x3131, 0x330C, 0x330C,
0x3310, 0x3310, 0x3400, 0x3401, 0x3410, 0x3410, 0x3412, 0x3416,
0x341D, 0x3420, 0x3428, 0x3428, 0x3430, 0x3430, 0x3432, 0x3436,
0x3440, 0x3440, 0x3450, 0x3450, 0x3452, 0x3456, 0x3480, 0x3480,
0x3494, 0x3495, 0x4000, 0x4000, 0x4002, 0x4002, 0x4004, 0x4004,
0x4008, 0x400A, 0x400C, 0x400D, 0x400F, 0x4012, 0x4014, 0x4016,
0x401D, 0x401D, 0x4020, 0x4027, 0x4060, 0x4062, 0x4200, 0x4200,
0x4300, 0x4300, 0x4400, 0x4400, 0x4500, 0x4500, 0x4800, 0x4802,
0x480F, 0x480F, 0x4811, 0x4811, 0x4813, 0x4813, 0x4815, 0x4816,
0x482B, 0x482B, 0x4857, 0x4857, 0x4883, 0x4883, 0x48AF, 0x48AF,
0x48C5, 0x48C5, 0x48E5, 0x48E5, 0x4905, 0x4905, 0x4925, 0x4925,
0x4945, 0x4945, 0x4950, 0x4950, 0x495B, 0x495B, 0x4980, 0x498E,
0x4B00, 0x4B00, 0x4C00, 0x4C00, 0x4D00, 0x4D00, 0x4E00, 0x4E00,
0x4E80, 0x4E80, 0x4F00, 0x4F00, 0x4F08, 0x4F08, 0x4F10, 0x4F10,
0x4F18, 0x4F18, 0x4F20, 0x4F20, 0x4F30, 0x4F30, 0x4F60, 0x4F60,
0x4F80, 0x4F81, 0x4F88, 0x4F89, 0x4FEE, 0x4FEE, 0x4FF3, 0x4FF3,
0x6000, 0x6001, 0x6008, 0x600F, 0x6014, 0x6016, 0x6018, 0x601B,
0x61FD, 0x61FD, 0x623C, 0x623C, 0x6380, 0x6380, 0x63A0, 0x63A0,
0x63C0, 0x63C1, 0x63C8, 0x63C9, 0x63D0, 0x63D4, 0x63D6, 0x63D6,
0x63EE, 0x63EE, 0x6400, 0x6401, 0x6408, 0x640F, 0x6414, 0x6416,
0x6418, 0x641B, 0x65FD, 0x65FD, 0x663C, 0x663C, 0x6780, 0x6780,
0x67A0, 0x67A0, 0x67C0, 0x67C1, 0x67C8, 0x67C9, 0x67D0, 0x67D4,
0x67D6, 0x67D6, 0x67EE, 0x67EE, 0x6800, 0x6801, 0x6808, 0x680F,
0x6814, 0x6816, 0x6818, 0x681B, 0x69FD, 0x69FD, 0x6A3C, 0x6A3C,
0x6B80, 0x6B80, 0x6BA0, 0x6BA0, 0x6BC0, 0x6BC1, 0x6BC8, 0x6BC9,
0x6BD0, 0x6BD4, 0x6BD6, 0x6BD6, 0x6BEE, 0x6BEE,
~0 /* sentinel */
};
static const unsigned int a405_registers[] = {
/* RBBM */
0x0000, 0x0002, 0x0004, 0x0021, 0x0023, 0x0024, 0x0026, 0x0026,
0x0028, 0x002B, 0x002E, 0x0034, 0x0037, 0x0044, 0x0047, 0x0066,
0x0068, 0x0095, 0x009C, 0x0170, 0x0174, 0x01AF,
/* CP */
0x0200, 0x0233, 0x0240, 0x0250, 0x04C0, 0x04DD, 0x0500, 0x050B,
0x0578, 0x058F,
/* VSC */
0x0C00, 0x0C03, 0x0C08, 0x0C41, 0x0C50, 0x0C51,
/* GRAS */
0x0C80, 0x0C81, 0x0C88, 0x0C8F,
/* RB */
0x0CC0, 0x0CC0, 0x0CC4, 0x0CD2,
/* PC */
0x0D00, 0x0D0C, 0x0D10, 0x0D17, 0x0D20, 0x0D23,
/* VFD */
0x0E40, 0x0E4A,
/* VPC */
0x0E60, 0x0E61, 0x0E63, 0x0E68,
/* UCHE */
0x0E80, 0x0E84, 0x0E88, 0x0E95,
/* GRAS CTX 0 */
0x2000, 0x2004, 0x2008, 0x2067, 0x2070, 0x2078, 0x207B, 0x216E,
/* PC CTX 0 */
0x21C0, 0x21C6, 0x21D0, 0x21D0, 0x21D9, 0x21D9, 0x21E5, 0x21E7,
/* VFD CTX 0 */
0x2200, 0x2204, 0x2208, 0x22A9,
/* GRAS CTX 1 */
0x2400, 0x2404, 0x2408, 0x2467, 0x2470, 0x2478, 0x247B, 0x256E,
/* PC CTX 1 */
0x25C0, 0x25C6, 0x25D0, 0x25D0, 0x25D9, 0x25D9, 0x25E5, 0x25E7,
/* VFD CTX 1 */
0x2600, 0x2604, 0x2608, 0x26A9,
/* VBIF version 0x20050000*/
0x3000, 0x3007, 0x302C, 0x302C, 0x3030, 0x3030, 0x3034, 0x3036,
0x3038, 0x3038, 0x303C, 0x303D, 0x3040, 0x3040, 0x3049, 0x3049,
0x3058, 0x3058, 0x305B, 0x3061, 0x3064, 0x3068, 0x306C, 0x306D,
0x3080, 0x3088, 0x308B, 0x308C, 0x3090, 0x3094, 0x3098, 0x3098,
0x309C, 0x309C, 0x30C0, 0x30C0, 0x30C8, 0x30C8, 0x30D0, 0x30D0,
0x30D8, 0x30D8, 0x30E0, 0x30E0, 0x3100, 0x3100, 0x3108, 0x3108,
0x3110, 0x3110, 0x3118, 0x3118, 0x3120, 0x3120, 0x3124, 0x3125,
0x3129, 0x3129, 0x340C, 0x340C, 0x3410, 0x3410,
~0 /* sentinel */
};
static struct msm_gpu_state *a4xx_gpu_state_get(struct msm_gpu *gpu)
{
struct msm_gpu_state *state = kzalloc(sizeof(*state), GFP_KERNEL);
if (!state)
return ERR_PTR(-ENOMEM);
adreno_gpu_state_get(gpu, state);
state->rbbm_status = gpu_read(gpu, REG_A4XX_RBBM_STATUS);
return state;
}
static void a4xx_dump(struct msm_gpu *gpu)
{
printk("status: %08x\n",
gpu_read(gpu, REG_A4XX_RBBM_STATUS));
adreno_dump(gpu);
}
static int a4xx_pm_resume(struct msm_gpu *gpu) {
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
int ret;
ret = msm_gpu_pm_resume(gpu);
if (ret)
return ret;
if (adreno_is_a430(adreno_gpu)) {
unsigned int reg;
/* Set the default register values; set SW_COLLAPSE to 0 */
gpu_write(gpu, REG_A4XX_RBBM_POWER_CNTL_IP, 0x778000);
do {
udelay(5);
reg = gpu_read(gpu, REG_A4XX_RBBM_POWER_STATUS);
} while (!(reg & A4XX_RBBM_POWER_CNTL_IP_SP_TP_PWR_ON));
}
return 0;
}
static int a4xx_pm_suspend(struct msm_gpu *gpu) {
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
int ret;
ret = msm_gpu_pm_suspend(gpu);
if (ret)
return ret;
if (adreno_is_a430(adreno_gpu)) {
/* Set the default register values; set SW_COLLAPSE to 1 */
gpu_write(gpu, REG_A4XX_RBBM_POWER_CNTL_IP, 0x778001);
}
return 0;
}
static int a4xx_get_timestamp(struct msm_gpu *gpu, uint64_t *value)
{
*value = gpu_read64(gpu, REG_A4XX_RBBM_PERFCTR_CP_0_LO);
return 0;
}
static u64 a4xx_gpu_busy(struct msm_gpu *gpu, unsigned long *out_sample_rate)
{
u64 busy_cycles;
busy_cycles = gpu_read64(gpu, REG_A4XX_RBBM_PERFCTR_RBBM_1_LO);
*out_sample_rate = clk_get_rate(gpu->core_clk);
return busy_cycles;
}
static u32 a4xx_get_rptr(struct msm_gpu *gpu, struct msm_ringbuffer *ring)
{
ring->memptrs->rptr = gpu_read(gpu, REG_A4XX_CP_RB_RPTR);
return ring->memptrs->rptr;
}
static const struct adreno_gpu_funcs funcs = {
.base = {
.get_param = adreno_get_param,
.set_param = adreno_set_param,
.hw_init = a4xx_hw_init,
.pm_suspend = a4xx_pm_suspend,
.pm_resume = a4xx_pm_resume,
.recover = a4xx_recover,
.submit = a4xx_submit,
.active_ring = adreno_active_ring,
.irq = a4xx_irq,
.destroy = a4xx_destroy,
#if defined(CONFIG_DEBUG_FS) || defined(CONFIG_DEV_COREDUMP)
.show = adreno_show,
#endif
.gpu_busy = a4xx_gpu_busy,
.gpu_state_get = a4xx_gpu_state_get,
.gpu_state_put = adreno_gpu_state_put,
.create_address_space = adreno_create_address_space,
.get_rptr = a4xx_get_rptr,
},
.get_timestamp = a4xx_get_timestamp,
};
struct msm_gpu *a4xx_gpu_init(struct drm_device *dev)
{
struct a4xx_gpu *a4xx_gpu = NULL;
struct adreno_gpu *adreno_gpu;
struct msm_gpu *gpu;
struct msm_drm_private *priv = dev->dev_private;
struct platform_device *pdev = priv->gpu_pdev;
struct icc_path *ocmem_icc_path;
struct icc_path *icc_path;
int ret;
if (!pdev) {
DRM_DEV_ERROR(dev->dev, "no a4xx device\n");
ret = -ENXIO;
goto fail;
}
a4xx_gpu = kzalloc(sizeof(*a4xx_gpu), GFP_KERNEL);
if (!a4xx_gpu) {
ret = -ENOMEM;
goto fail;
}
adreno_gpu = &a4xx_gpu->base;
gpu = &adreno_gpu->base;
gpu->perfcntrs = NULL;
gpu->num_perfcntrs = 0;
ret = adreno_gpu_init(dev, pdev, adreno_gpu, &funcs, 1);
if (ret)
goto fail;
adreno_gpu->registers = adreno_is_a405(adreno_gpu) ? a405_registers :
a4xx_registers;
/* if needed, allocate gmem: */
ret = adreno_gpu_ocmem_init(dev->dev, adreno_gpu,
&a4xx_gpu->ocmem);
if (ret)
goto fail;
if (!gpu->aspace) {
/* TODO we think it is possible to configure the GPU to
* restrict access to VRAM carveout. But the required
* registers are unknown. For now just bail out and
* limp along with just modesetting. If it turns out
* to not be possible to restrict access, then we must
* implement a cmdstream validator.
*/
DRM_DEV_ERROR(dev->dev, "No memory protection without IOMMU\n");
if (!allow_vram_carveout) {
ret = -ENXIO;
goto fail;
}
}
icc_path = devm_of_icc_get(&pdev->dev, "gfx-mem");
if (IS_ERR(icc_path)) {
ret = PTR_ERR(icc_path);
goto fail;
}
ocmem_icc_path = devm_of_icc_get(&pdev->dev, "ocmem");
if (IS_ERR(ocmem_icc_path)) {
ret = PTR_ERR(ocmem_icc_path);
/* allow -ENODATA, ocmem icc is optional */
if (ret != -ENODATA)
goto fail;
ocmem_icc_path = NULL;
}
/*
* Set the ICC path to maximum speed for now by multiplying the fastest
* frequency by the bus width (8). We'll want to scale this later on to
* improve battery life.
*/
icc_set_bw(icc_path, 0, Bps_to_icc(gpu->fast_rate) * 8);
icc_set_bw(ocmem_icc_path, 0, Bps_to_icc(gpu->fast_rate) * 8);
return gpu;
fail:
if (a4xx_gpu)
a4xx_destroy(&a4xx_gpu->base.base);
return ERR_PTR(ret);
}
| linux-master | drivers/gpu/drm/msm/adreno/a4xx_gpu.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2013-2014 Red Hat
* Author: Rob Clark <[email protected]>
*
* Copyright (c) 2014,2017 The Linux Foundation. All rights reserved.
*/
#include "adreno_gpu.h"
bool hang_debug = false;
MODULE_PARM_DESC(hang_debug, "Dump registers when hang is detected (can be slow!)");
module_param_named(hang_debug, hang_debug, bool, 0600);
bool snapshot_debugbus = false;
MODULE_PARM_DESC(snapshot_debugbus, "Include debugbus sections in GPU devcoredump (if not fused off)");
module_param_named(snapshot_debugbus, snapshot_debugbus, bool, 0600);
bool allow_vram_carveout = false;
MODULE_PARM_DESC(allow_vram_carveout, "Allow using VRAM Carveout, in place of IOMMU");
module_param_named(allow_vram_carveout, allow_vram_carveout, bool, 0600);
static const struct adreno_info gpulist[] = {
{
.chip_ids = ADRENO_CHIP_IDS(0x02000000),
.family = ADRENO_2XX_GEN1,
.revn = 200,
.fw = {
[ADRENO_FW_PM4] = "yamato_pm4.fw",
[ADRENO_FW_PFP] = "yamato_pfp.fw",
},
.gmem = SZ_256K,
.inactive_period = DRM_MSM_INACTIVE_PERIOD,
.init = a2xx_gpu_init,
}, { /* a200 on i.mx51 has only 128kib gmem */
.chip_ids = ADRENO_CHIP_IDS(0x02000001),
.family = ADRENO_2XX_GEN1,
.revn = 201,
.fw = {
[ADRENO_FW_PM4] = "yamato_pm4.fw",
[ADRENO_FW_PFP] = "yamato_pfp.fw",
},
.gmem = SZ_128K,
.inactive_period = DRM_MSM_INACTIVE_PERIOD,
.init = a2xx_gpu_init,
}, {
.chip_ids = ADRENO_CHIP_IDS(0x02020000),
.family = ADRENO_2XX_GEN2,
.revn = 220,
.fw = {
[ADRENO_FW_PM4] = "leia_pm4_470.fw",
[ADRENO_FW_PFP] = "leia_pfp_470.fw",
},
.gmem = SZ_512K,
.inactive_period = DRM_MSM_INACTIVE_PERIOD,
.init = a2xx_gpu_init,
}, {
.chip_ids = ADRENO_CHIP_IDS(
0x03000512,
0x03000520
),
.family = ADRENO_3XX,
.revn = 305,
.fw = {
[ADRENO_FW_PM4] = "a300_pm4.fw",
[ADRENO_FW_PFP] = "a300_pfp.fw",
},
.gmem = SZ_256K,
.inactive_period = DRM_MSM_INACTIVE_PERIOD,
.init = a3xx_gpu_init,
}, {
.chip_ids = ADRENO_CHIP_IDS(0x03000600),
.family = ADRENO_3XX,
.revn = 307, /* because a305c is revn==306 */
.fw = {
[ADRENO_FW_PM4] = "a300_pm4.fw",
[ADRENO_FW_PFP] = "a300_pfp.fw",
},
.gmem = SZ_128K,
.inactive_period = DRM_MSM_INACTIVE_PERIOD,
.init = a3xx_gpu_init,
}, {
.chip_ids = ADRENO_CHIP_IDS(
0x03020000,
0x03020001,
0x03020002
),
.family = ADRENO_3XX,
.revn = 320,
.fw = {
[ADRENO_FW_PM4] = "a300_pm4.fw",
[ADRENO_FW_PFP] = "a300_pfp.fw",
},
.gmem = SZ_512K,
.inactive_period = DRM_MSM_INACTIVE_PERIOD,
.init = a3xx_gpu_init,
}, {
.chip_ids = ADRENO_CHIP_IDS(
0x03030000,
0x03030001,
0x03030002
),
.family = ADRENO_3XX,
.revn = 330,
.fw = {
[ADRENO_FW_PM4] = "a330_pm4.fw",
[ADRENO_FW_PFP] = "a330_pfp.fw",
},
.gmem = SZ_1M,
.inactive_period = DRM_MSM_INACTIVE_PERIOD,
.init = a3xx_gpu_init,
}, {
.chip_ids = ADRENO_CHIP_IDS(0x04000500),
.family = ADRENO_4XX,
.revn = 405,
.fw = {
[ADRENO_FW_PM4] = "a420_pm4.fw",
[ADRENO_FW_PFP] = "a420_pfp.fw",
},
.gmem = SZ_256K,
.inactive_period = DRM_MSM_INACTIVE_PERIOD,
.init = a4xx_gpu_init,
}, {
.chip_ids = ADRENO_CHIP_IDS(0x04020000),
.family = ADRENO_4XX,
.revn = 420,
.fw = {
[ADRENO_FW_PM4] = "a420_pm4.fw",
[ADRENO_FW_PFP] = "a420_pfp.fw",
},
.gmem = (SZ_1M + SZ_512K),
.inactive_period = DRM_MSM_INACTIVE_PERIOD,
.init = a4xx_gpu_init,
}, {
.chip_ids = ADRENO_CHIP_IDS(0x04030002),
.family = ADRENO_4XX,
.revn = 430,
.fw = {
[ADRENO_FW_PM4] = "a420_pm4.fw",
[ADRENO_FW_PFP] = "a420_pfp.fw",
},
.gmem = (SZ_1M + SZ_512K),
.inactive_period = DRM_MSM_INACTIVE_PERIOD,
.init = a4xx_gpu_init,
}, {
.chip_ids = ADRENO_CHIP_IDS(0x05000600),
.family = ADRENO_5XX,
.revn = 506,
.fw = {
[ADRENO_FW_PM4] = "a530_pm4.fw",
[ADRENO_FW_PFP] = "a530_pfp.fw",
},
.gmem = (SZ_128K + SZ_8K),
/*
* Increase inactive period to 250 to avoid bouncing
* the GDSC which appears to make it grumpy
*/
.inactive_period = 250,
.quirks = ADRENO_QUIRK_TWO_PASS_USE_WFI |
ADRENO_QUIRK_LMLOADKILL_DISABLE,
.init = a5xx_gpu_init,
.zapfw = "a506_zap.mdt",
}, {
.chip_ids = ADRENO_CHIP_IDS(0x05000800),
.family = ADRENO_5XX,
.revn = 508,
.fw = {
[ADRENO_FW_PM4] = "a530_pm4.fw",
[ADRENO_FW_PFP] = "a530_pfp.fw",
},
.gmem = (SZ_128K + SZ_8K),
/*
* Increase inactive period to 250 to avoid bouncing
* the GDSC which appears to make it grumpy
*/
.inactive_period = 250,
.quirks = ADRENO_QUIRK_LMLOADKILL_DISABLE,
.init = a5xx_gpu_init,
.zapfw = "a508_zap.mdt",
}, {
.chip_ids = ADRENO_CHIP_IDS(0x05000900),
.family = ADRENO_5XX,
.revn = 509,
.fw = {
[ADRENO_FW_PM4] = "a530_pm4.fw",
[ADRENO_FW_PFP] = "a530_pfp.fw",
},
.gmem = (SZ_256K + SZ_16K),
/*
* Increase inactive period to 250 to avoid bouncing
* the GDSC which appears to make it grumpy
*/
.inactive_period = 250,
.quirks = ADRENO_QUIRK_LMLOADKILL_DISABLE,
.init = a5xx_gpu_init,
/* Adreno 509 uses the same ZAP as 512 */
.zapfw = "a512_zap.mdt",
}, {
.chip_ids = ADRENO_CHIP_IDS(0x05010000),
.family = ADRENO_5XX,
.revn = 510,
.fw = {
[ADRENO_FW_PM4] = "a530_pm4.fw",
[ADRENO_FW_PFP] = "a530_pfp.fw",
},
.gmem = SZ_256K,
/*
* Increase inactive period to 250 to avoid bouncing
* the GDSC which appears to make it grumpy
*/
.inactive_period = 250,
.init = a5xx_gpu_init,
}, {
.chip_ids = ADRENO_CHIP_IDS(0x05010200),
.family = ADRENO_5XX,
.revn = 512,
.fw = {
[ADRENO_FW_PM4] = "a530_pm4.fw",
[ADRENO_FW_PFP] = "a530_pfp.fw",
},
.gmem = (SZ_256K + SZ_16K),
/*
* Increase inactive period to 250 to avoid bouncing
* the GDSC which appears to make it grumpy
*/
.inactive_period = 250,
.quirks = ADRENO_QUIRK_LMLOADKILL_DISABLE,
.init = a5xx_gpu_init,
.zapfw = "a512_zap.mdt",
}, {
.chip_ids = ADRENO_CHIP_IDS(
0x05030002,
0x05030004
),
.family = ADRENO_5XX,
.revn = 530,
.fw = {
[ADRENO_FW_PM4] = "a530_pm4.fw",
[ADRENO_FW_PFP] = "a530_pfp.fw",
[ADRENO_FW_GPMU] = "a530v3_gpmu.fw2",
},
.gmem = SZ_1M,
/*
* Increase inactive period to 250 to avoid bouncing
* the GDSC which appears to make it grumpy
*/
.inactive_period = 250,
.quirks = ADRENO_QUIRK_TWO_PASS_USE_WFI |
ADRENO_QUIRK_FAULT_DETECT_MASK,
.init = a5xx_gpu_init,
.zapfw = "a530_zap.mdt",
}, {
.chip_ids = ADRENO_CHIP_IDS(0x05040001),
.family = ADRENO_5XX,
.revn = 540,
.fw = {
[ADRENO_FW_PM4] = "a530_pm4.fw",
[ADRENO_FW_PFP] = "a530_pfp.fw",
[ADRENO_FW_GPMU] = "a540_gpmu.fw2",
},
.gmem = SZ_1M,
/*
* Increase inactive period to 250 to avoid bouncing
* the GDSC which appears to make it grumpy
*/
.inactive_period = 250,
.quirks = ADRENO_QUIRK_LMLOADKILL_DISABLE,
.init = a5xx_gpu_init,
.zapfw = "a540_zap.mdt",
}, {
.chip_ids = ADRENO_CHIP_IDS(0x06010000),
.family = ADRENO_6XX_GEN1,
.revn = 610,
.fw = {
[ADRENO_FW_SQE] = "a630_sqe.fw",
},
.gmem = (SZ_128K + SZ_4K),
.inactive_period = DRM_MSM_INACTIVE_PERIOD,
.init = a6xx_gpu_init,
.zapfw = "a610_zap.mdt",
.hwcg = a612_hwcg,
/*
* There are (at least) three SoCs implementing A610: SM6125
* (trinket), SM6115 (bengal) and SM6225 (khaje). Trinket does
* not have speedbinning, as only a single SKU exists and we
* don't support khaje upstream yet. Hence, this matching
* table is only valid for bengal.
*/
.speedbins = ADRENO_SPEEDBINS(
{ 0, 0 },
{ 206, 1 },
{ 200, 2 },
{ 157, 3 },
{ 127, 4 },
),
}, {
.chip_ids = ADRENO_CHIP_IDS(0x06010800),
.family = ADRENO_6XX_GEN1,
.revn = 618,
.fw = {
[ADRENO_FW_SQE] = "a630_sqe.fw",
[ADRENO_FW_GMU] = "a630_gmu.bin",
},
.gmem = SZ_512K,
.inactive_period = DRM_MSM_INACTIVE_PERIOD,
.quirks = ADRENO_QUIRK_HAS_CACHED_COHERENT,
.init = a6xx_gpu_init,
.speedbins = ADRENO_SPEEDBINS(
{ 0, 0 },
{ 169, 1 },
{ 174, 2 },
),
}, {
.machine = "qcom,sm4350",
.chip_ids = ADRENO_CHIP_IDS(0x06010900),
.family = ADRENO_6XX_GEN1,
.revn = 619,
.fw = {
[ADRENO_FW_SQE] = "a630_sqe.fw",
[ADRENO_FW_GMU] = "a619_gmu.bin",
},
.gmem = SZ_512K,
.inactive_period = DRM_MSM_INACTIVE_PERIOD,
.init = a6xx_gpu_init,
.zapfw = "a615_zap.mdt",
.hwcg = a615_hwcg,
.speedbins = ADRENO_SPEEDBINS(
{ 0, 0 },
{ 138, 1 },
{ 92, 2 },
),
}, {
.machine = "qcom,sm6375",
.chip_ids = ADRENO_CHIP_IDS(0x06010900),
.family = ADRENO_6XX_GEN1,
.revn = 619,
.fw = {
[ADRENO_FW_SQE] = "a630_sqe.fw",
[ADRENO_FW_GMU] = "a619_gmu.bin",
},
.gmem = SZ_512K,
.inactive_period = DRM_MSM_INACTIVE_PERIOD,
.init = a6xx_gpu_init,
.zapfw = "a615_zap.mdt",
.hwcg = a615_hwcg,
.speedbins = ADRENO_SPEEDBINS(
{ 0, 0 },
{ 190, 1 },
{ 177, 2 },
),
}, {
.chip_ids = ADRENO_CHIP_IDS(0x06010900),
.family = ADRENO_6XX_GEN1,
.revn = 619,
.fw = {
[ADRENO_FW_SQE] = "a630_sqe.fw",
[ADRENO_FW_GMU] = "a619_gmu.bin",
},
.gmem = SZ_512K,
.inactive_period = DRM_MSM_INACTIVE_PERIOD,
.quirks = ADRENO_QUIRK_HAS_CACHED_COHERENT,
.init = a6xx_gpu_init,
.zapfw = "a615_zap.mdt",
.hwcg = a615_hwcg,
.speedbins = ADRENO_SPEEDBINS(
{ 0, 0 },
{ 120, 4 },
{ 138, 3 },
{ 169, 2 },
{ 180, 1 },
),
}, {
.chip_ids = ADRENO_CHIP_IDS(
0x06030001,
0x06030002
),
.family = ADRENO_6XX_GEN1,
.revn = 630,
.fw = {
[ADRENO_FW_SQE] = "a630_sqe.fw",
[ADRENO_FW_GMU] = "a630_gmu.bin",
},
.gmem = SZ_1M,
.inactive_period = DRM_MSM_INACTIVE_PERIOD,
.quirks = ADRENO_QUIRK_HAS_CACHED_COHERENT,
.init = a6xx_gpu_init,
.zapfw = "a630_zap.mdt",
.hwcg = a630_hwcg,
}, {
.chip_ids = ADRENO_CHIP_IDS(0x06040001),
.family = ADRENO_6XX_GEN2,
.revn = 640,
.fw = {
[ADRENO_FW_SQE] = "a630_sqe.fw",
[ADRENO_FW_GMU] = "a640_gmu.bin",
},
.gmem = SZ_1M,
.inactive_period = DRM_MSM_INACTIVE_PERIOD,
.quirks = ADRENO_QUIRK_HAS_CACHED_COHERENT,
.init = a6xx_gpu_init,
.zapfw = "a640_zap.mdt",
.hwcg = a640_hwcg,
.speedbins = ADRENO_SPEEDBINS(
{ 0, 0 },
{ 1, 1 },
),
}, {
.chip_ids = ADRENO_CHIP_IDS(0x06050002),
.family = ADRENO_6XX_GEN3,
.revn = 650,
.fw = {
[ADRENO_FW_SQE] = "a650_sqe.fw",
[ADRENO_FW_GMU] = "a650_gmu.bin",
},
.gmem = SZ_1M + SZ_128K,
.inactive_period = DRM_MSM_INACTIVE_PERIOD,
.quirks = ADRENO_QUIRK_HAS_CACHED_COHERENT |
ADRENO_QUIRK_HAS_HW_APRIV,
.init = a6xx_gpu_init,
.zapfw = "a650_zap.mdt",
.hwcg = a650_hwcg,
.address_space_size = SZ_16G,
.speedbins = ADRENO_SPEEDBINS(
{ 0, 0 },
{ 1, 1 },
{ 2, 3 }, /* Yep, 2 and 3 are swapped! :/ */
{ 3, 2 },
),
}, {
.chip_ids = ADRENO_CHIP_IDS(0x06060001),
.family = ADRENO_6XX_GEN4,
.revn = 660,
.fw = {
[ADRENO_FW_SQE] = "a660_sqe.fw",
[ADRENO_FW_GMU] = "a660_gmu.bin",
},
.gmem = SZ_1M + SZ_512K,
.inactive_period = DRM_MSM_INACTIVE_PERIOD,
.quirks = ADRENO_QUIRK_HAS_CACHED_COHERENT |
ADRENO_QUIRK_HAS_HW_APRIV,
.init = a6xx_gpu_init,
.zapfw = "a660_zap.mdt",
.hwcg = a660_hwcg,
.address_space_size = SZ_16G,
}, {
.chip_ids = ADRENO_CHIP_IDS(0x06030500),
.family = ADRENO_6XX_GEN4,
.fw = {
[ADRENO_FW_SQE] = "a660_sqe.fw",
[ADRENO_FW_GMU] = "a660_gmu.bin",
},
.gmem = SZ_512K,
.inactive_period = DRM_MSM_INACTIVE_PERIOD,
.quirks = ADRENO_QUIRK_HAS_CACHED_COHERENT |
ADRENO_QUIRK_HAS_HW_APRIV,
.init = a6xx_gpu_init,
.hwcg = a660_hwcg,
.address_space_size = SZ_16G,
.speedbins = ADRENO_SPEEDBINS(
{ 0, 0 },
{ 117, 0 },
{ 190, 1 },
),
}, {
.chip_ids = ADRENO_CHIP_IDS(0x06080000),
.family = ADRENO_6XX_GEN2,
.revn = 680,
.fw = {
[ADRENO_FW_SQE] = "a630_sqe.fw",
[ADRENO_FW_GMU] = "a640_gmu.bin",
},
.gmem = SZ_2M,
.inactive_period = DRM_MSM_INACTIVE_PERIOD,
.quirks = ADRENO_QUIRK_HAS_CACHED_COHERENT,
.init = a6xx_gpu_init,
.zapfw = "a640_zap.mdt",
.hwcg = a640_hwcg,
}, {
.chip_ids = ADRENO_CHIP_IDS(0x06090000),
.family = ADRENO_6XX_GEN4,
.fw = {
[ADRENO_FW_SQE] = "a660_sqe.fw",
[ADRENO_FW_GMU] = "a660_gmu.bin",
},
.gmem = SZ_4M,
.inactive_period = DRM_MSM_INACTIVE_PERIOD,
.quirks = ADRENO_QUIRK_HAS_CACHED_COHERENT |
ADRENO_QUIRK_HAS_HW_APRIV,
.init = a6xx_gpu_init,
.zapfw = "a690_zap.mdt",
.hwcg = a690_hwcg,
.address_space_size = SZ_16G,
},
};
MODULE_FIRMWARE("qcom/a300_pm4.fw");
MODULE_FIRMWARE("qcom/a300_pfp.fw");
MODULE_FIRMWARE("qcom/a330_pm4.fw");
MODULE_FIRMWARE("qcom/a330_pfp.fw");
MODULE_FIRMWARE("qcom/a420_pm4.fw");
MODULE_FIRMWARE("qcom/a420_pfp.fw");
MODULE_FIRMWARE("qcom/a530_pm4.fw");
MODULE_FIRMWARE("qcom/a530_pfp.fw");
MODULE_FIRMWARE("qcom/a530v3_gpmu.fw2");
MODULE_FIRMWARE("qcom/a530_zap.mdt");
MODULE_FIRMWARE("qcom/a530_zap.b00");
MODULE_FIRMWARE("qcom/a530_zap.b01");
MODULE_FIRMWARE("qcom/a530_zap.b02");
MODULE_FIRMWARE("qcom/a540_gpmu.fw2");
MODULE_FIRMWARE("qcom/a619_gmu.bin");
MODULE_FIRMWARE("qcom/a630_sqe.fw");
MODULE_FIRMWARE("qcom/a630_gmu.bin");
MODULE_FIRMWARE("qcom/a630_zap.mbn");
MODULE_FIRMWARE("qcom/a640_gmu.bin");
MODULE_FIRMWARE("qcom/a650_gmu.bin");
MODULE_FIRMWARE("qcom/a650_sqe.fw");
MODULE_FIRMWARE("qcom/a660_gmu.bin");
MODULE_FIRMWARE("qcom/a660_sqe.fw");
MODULE_FIRMWARE("qcom/leia_pfp_470.fw");
MODULE_FIRMWARE("qcom/leia_pm4_470.fw");
MODULE_FIRMWARE("qcom/yamato_pfp.fw");
MODULE_FIRMWARE("qcom/yamato_pm4.fw");
static const struct adreno_info *adreno_info(uint32_t chip_id)
{
/* identify gpu: */
for (int i = 0; i < ARRAY_SIZE(gpulist); i++) {
const struct adreno_info *info = &gpulist[i];
if (info->machine && !of_machine_is_compatible(info->machine))
continue;
for (int j = 0; info->chip_ids[j]; j++)
if (info->chip_ids[j] == chip_id)
return info;
}
return NULL;
}
struct msm_gpu *adreno_load_gpu(struct drm_device *dev)
{
struct msm_drm_private *priv = dev->dev_private;
struct platform_device *pdev = priv->gpu_pdev;
struct msm_gpu *gpu = NULL;
struct adreno_gpu *adreno_gpu;
int ret;
if (pdev)
gpu = dev_to_gpu(&pdev->dev);
if (!gpu) {
dev_err_once(dev->dev, "no GPU device was found\n");
return NULL;
}
adreno_gpu = to_adreno_gpu(gpu);
/*
* The number one reason for HW init to fail is if the firmware isn't
* loaded yet. Try that first and don't bother continuing on
* otherwise
*/
ret = adreno_load_fw(adreno_gpu);
if (ret)
return NULL;
if (gpu->funcs->ucode_load) {
ret = gpu->funcs->ucode_load(gpu);
if (ret)
return NULL;
}
/*
* Now that we have firmware loaded, and are ready to begin
* booting the gpu, go ahead and enable runpm:
*/
pm_runtime_enable(&pdev->dev);
ret = pm_runtime_get_sync(&pdev->dev);
if (ret < 0) {
pm_runtime_put_noidle(&pdev->dev);
DRM_DEV_ERROR(dev->dev, "Couldn't power up the GPU: %d\n", ret);
goto err_disable_rpm;
}
mutex_lock(&gpu->lock);
ret = msm_gpu_hw_init(gpu);
mutex_unlock(&gpu->lock);
if (ret) {
DRM_DEV_ERROR(dev->dev, "gpu hw init failed: %d\n", ret);
goto err_put_rpm;
}
pm_runtime_put_autosuspend(&pdev->dev);
#ifdef CONFIG_DEBUG_FS
if (gpu->funcs->debugfs_init) {
gpu->funcs->debugfs_init(gpu, dev->primary);
gpu->funcs->debugfs_init(gpu, dev->render);
}
#endif
return gpu;
err_put_rpm:
pm_runtime_put_sync_suspend(&pdev->dev);
err_disable_rpm:
pm_runtime_disable(&pdev->dev);
return NULL;
}
static int find_chipid(struct device *dev, uint32_t *chipid)
{
struct device_node *node = dev->of_node;
const char *compat;
int ret;
/* first search the compat strings for qcom,adreno-XYZ.W: */
ret = of_property_read_string_index(node, "compatible", 0, &compat);
if (ret == 0) {
unsigned int r, patch;
if (sscanf(compat, "qcom,adreno-%u.%u", &r, &patch) == 2 ||
sscanf(compat, "amd,imageon-%u.%u", &r, &patch) == 2) {
uint32_t core, major, minor;
core = r / 100;
r %= 100;
major = r / 10;
r %= 10;
minor = r;
*chipid = (core << 24) |
(major << 16) |
(minor << 8) |
patch;
return 0;
}
if (sscanf(compat, "qcom,adreno-%08x", chipid) == 1)
return 0;
}
/* and if that fails, fall back to legacy "qcom,chipid" property: */
ret = of_property_read_u32(node, "qcom,chipid", chipid);
if (ret) {
DRM_DEV_ERROR(dev, "could not parse qcom,chipid: %d\n", ret);
return ret;
}
dev_warn(dev, "Using legacy qcom,chipid binding!\n");
return 0;
}
static int adreno_bind(struct device *dev, struct device *master, void *data)
{
static struct adreno_platform_config config = {};
const struct adreno_info *info;
struct msm_drm_private *priv = dev_get_drvdata(master);
struct drm_device *drm = priv->dev;
struct msm_gpu *gpu;
int ret;
ret = find_chipid(dev, &config.chip_id);
if (ret)
return ret;
dev->platform_data = &config;
priv->gpu_pdev = to_platform_device(dev);
info = adreno_info(config.chip_id);
if (!info) {
dev_warn(drm->dev, "Unknown GPU revision: %"ADRENO_CHIPID_FMT"\n",
ADRENO_CHIPID_ARGS(config.chip_id));
return -ENXIO;
}
config.info = info;
DBG("Found GPU: %"ADRENO_CHIPID_FMT, ADRENO_CHIPID_ARGS(config.chip_id));
priv->is_a2xx = info->family < ADRENO_3XX;
priv->has_cached_coherent =
!!(info->quirks & ADRENO_QUIRK_HAS_CACHED_COHERENT);
gpu = info->init(drm);
if (IS_ERR(gpu)) {
dev_warn(drm->dev, "failed to load adreno gpu\n");
return PTR_ERR(gpu);
}
ret = dev_pm_opp_of_find_icc_paths(dev, NULL);
if (ret)
return ret;
return 0;
}
static int adreno_system_suspend(struct device *dev);
static void adreno_unbind(struct device *dev, struct device *master,
void *data)
{
struct msm_drm_private *priv = dev_get_drvdata(master);
struct msm_gpu *gpu = dev_to_gpu(dev);
if (pm_runtime_enabled(dev))
WARN_ON_ONCE(adreno_system_suspend(dev));
gpu->funcs->destroy(gpu);
priv->gpu_pdev = NULL;
}
static const struct component_ops a3xx_ops = {
.bind = adreno_bind,
.unbind = adreno_unbind,
};
static void adreno_device_register_headless(void)
{
/* on imx5, we don't have a top-level mdp/dpu node
* this creates a dummy node for the driver for that case
*/
struct platform_device_info dummy_info = {
.parent = NULL,
.name = "msm",
.id = -1,
.res = NULL,
.num_res = 0,
.data = NULL,
.size_data = 0,
.dma_mask = ~0,
};
platform_device_register_full(&dummy_info);
}
static int adreno_probe(struct platform_device *pdev)
{
int ret;
ret = component_add(&pdev->dev, &a3xx_ops);
if (ret)
return ret;
if (of_device_is_compatible(pdev->dev.of_node, "amd,imageon"))
adreno_device_register_headless();
return 0;
}
static int adreno_remove(struct platform_device *pdev)
{
component_del(&pdev->dev, &a3xx_ops);
return 0;
}
static void adreno_shutdown(struct platform_device *pdev)
{
WARN_ON_ONCE(adreno_system_suspend(&pdev->dev));
}
static const struct of_device_id dt_match[] = {
{ .compatible = "qcom,adreno" },
{ .compatible = "qcom,adreno-3xx" },
/* for compatibility with imx5 gpu: */
{ .compatible = "amd,imageon" },
/* for backwards compat w/ downstream kgsl DT files: */
{ .compatible = "qcom,kgsl-3d0" },
{}
};
static int adreno_runtime_resume(struct device *dev)
{
struct msm_gpu *gpu = dev_to_gpu(dev);
return gpu->funcs->pm_resume(gpu);
}
static int adreno_runtime_suspend(struct device *dev)
{
struct msm_gpu *gpu = dev_to_gpu(dev);
/*
* We should be holding a runpm ref, which will prevent
* runtime suspend. In the system suspend path, we've
* already waited for active jobs to complete.
*/
WARN_ON_ONCE(gpu->active_submits);
return gpu->funcs->pm_suspend(gpu);
}
static void suspend_scheduler(struct msm_gpu *gpu)
{
int i;
/*
* Shut down the scheduler before we force suspend, so that
* suspend isn't racing with scheduler kthread feeding us
* more work.
*
* Note, we just want to park the thread, and let any jobs
* that are already on the hw queue complete normally, as
* opposed to the drm_sched_stop() path used for handling
* faulting/timed-out jobs. We can't really cancel any jobs
* already on the hw queue without racing with the GPU.
*/
for (i = 0; i < gpu->nr_rings; i++) {
struct drm_gpu_scheduler *sched = &gpu->rb[i]->sched;
kthread_park(sched->thread);
}
}
static void resume_scheduler(struct msm_gpu *gpu)
{
int i;
for (i = 0; i < gpu->nr_rings; i++) {
struct drm_gpu_scheduler *sched = &gpu->rb[i]->sched;
kthread_unpark(sched->thread);
}
}
static int adreno_system_suspend(struct device *dev)
{
struct msm_gpu *gpu = dev_to_gpu(dev);
int remaining, ret;
if (!gpu)
return 0;
suspend_scheduler(gpu);
remaining = wait_event_timeout(gpu->retire_event,
gpu->active_submits == 0,
msecs_to_jiffies(1000));
if (remaining == 0) {
dev_err(dev, "Timeout waiting for GPU to suspend\n");
ret = -EBUSY;
goto out;
}
ret = pm_runtime_force_suspend(dev);
out:
if (ret)
resume_scheduler(gpu);
return ret;
}
static int adreno_system_resume(struct device *dev)
{
struct msm_gpu *gpu = dev_to_gpu(dev);
if (!gpu)
return 0;
resume_scheduler(gpu);
return pm_runtime_force_resume(dev);
}
static const struct dev_pm_ops adreno_pm_ops = {
SYSTEM_SLEEP_PM_OPS(adreno_system_suspend, adreno_system_resume)
RUNTIME_PM_OPS(adreno_runtime_suspend, adreno_runtime_resume, NULL)
};
static struct platform_driver adreno_driver = {
.probe = adreno_probe,
.remove = adreno_remove,
.shutdown = adreno_shutdown,
.driver = {
.name = "adreno",
.of_match_table = dt_match,
.pm = &adreno_pm_ops,
},
};
void __init adreno_register(void)
{
platform_driver_register(&adreno_driver);
}
void __exit adreno_unregister(void)
{
platform_driver_unregister(&adreno_driver);
}
| linux-master | drivers/gpu/drm/msm/adreno/adreno_device.c |
// SPDX-License-Identifier: GPL-2.0-only
/* Copyright (c) 2016-2017 The Linux Foundation. All rights reserved.
*/
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/cpumask.h>
#include <linux/firmware/qcom/qcom_scm.h>
#include <linux/pm_opp.h>
#include <linux/nvmem-consumer.h>
#include <linux/slab.h>
#include "msm_gem.h"
#include "msm_mmu.h"
#include "a5xx_gpu.h"
extern bool hang_debug;
static void a5xx_dump(struct msm_gpu *gpu);
#define GPU_PAS_ID 13
static void update_shadow_rptr(struct msm_gpu *gpu, struct msm_ringbuffer *ring)
{
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
struct a5xx_gpu *a5xx_gpu = to_a5xx_gpu(adreno_gpu);
if (a5xx_gpu->has_whereami) {
OUT_PKT7(ring, CP_WHERE_AM_I, 2);
OUT_RING(ring, lower_32_bits(shadowptr(a5xx_gpu, ring)));
OUT_RING(ring, upper_32_bits(shadowptr(a5xx_gpu, ring)));
}
}
void a5xx_flush(struct msm_gpu *gpu, struct msm_ringbuffer *ring,
bool sync)
{
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
struct a5xx_gpu *a5xx_gpu = to_a5xx_gpu(adreno_gpu);
uint32_t wptr;
unsigned long flags;
/*
* Most flush operations need to issue a WHERE_AM_I opcode to sync up
* the rptr shadow
*/
if (sync)
update_shadow_rptr(gpu, ring);
spin_lock_irqsave(&ring->preempt_lock, flags);
/* Copy the shadow to the actual register */
ring->cur = ring->next;
/* Make sure to wrap wptr if we need to */
wptr = get_wptr(ring);
spin_unlock_irqrestore(&ring->preempt_lock, flags);
/* Make sure everything is posted before making a decision */
mb();
/* Update HW if this is the current ring and we are not in preempt */
if (a5xx_gpu->cur_ring == ring && !a5xx_in_preempt(a5xx_gpu))
gpu_write(gpu, REG_A5XX_CP_RB_WPTR, wptr);
}
static void a5xx_submit_in_rb(struct msm_gpu *gpu, struct msm_gem_submit *submit)
{
struct msm_ringbuffer *ring = submit->ring;
struct drm_gem_object *obj;
uint32_t *ptr, dwords;
unsigned int i;
for (i = 0; i < submit->nr_cmds; i++) {
switch (submit->cmd[i].type) {
case MSM_SUBMIT_CMD_IB_TARGET_BUF:
break;
case MSM_SUBMIT_CMD_CTX_RESTORE_BUF:
if (gpu->cur_ctx_seqno == submit->queue->ctx->seqno)
break;
fallthrough;
case MSM_SUBMIT_CMD_BUF:
/* copy commands into RB: */
obj = submit->bos[submit->cmd[i].idx].obj;
dwords = submit->cmd[i].size;
ptr = msm_gem_get_vaddr(obj);
/* _get_vaddr() shouldn't fail at this point,
* since we've already mapped it once in
* submit_reloc()
*/
if (WARN_ON(IS_ERR_OR_NULL(ptr)))
return;
for (i = 0; i < dwords; i++) {
/* normally the OUT_PKTn() would wait
* for space for the packet. But since
* we just OUT_RING() the whole thing,
* need to call adreno_wait_ring()
* ourself:
*/
adreno_wait_ring(ring, 1);
OUT_RING(ring, ptr[i]);
}
msm_gem_put_vaddr(obj);
break;
}
}
a5xx_flush(gpu, ring, true);
a5xx_preempt_trigger(gpu);
/* we might not necessarily have a cmd from userspace to
* trigger an event to know that submit has completed, so
* do this manually:
*/
a5xx_idle(gpu, ring);
ring->memptrs->fence = submit->seqno;
msm_gpu_retire(gpu);
}
static void a5xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit)
{
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
struct a5xx_gpu *a5xx_gpu = to_a5xx_gpu(adreno_gpu);
struct msm_ringbuffer *ring = submit->ring;
unsigned int i, ibs = 0;
if (IS_ENABLED(CONFIG_DRM_MSM_GPU_SUDO) && submit->in_rb) {
gpu->cur_ctx_seqno = 0;
a5xx_submit_in_rb(gpu, submit);
return;
}
OUT_PKT7(ring, CP_PREEMPT_ENABLE_GLOBAL, 1);
OUT_RING(ring, 0x02);
/* Turn off protected mode to write to special registers */
OUT_PKT7(ring, CP_SET_PROTECTED_MODE, 1);
OUT_RING(ring, 0);
/* Set the save preemption record for the ring/command */
OUT_PKT4(ring, REG_A5XX_CP_CONTEXT_SWITCH_SAVE_ADDR_LO, 2);
OUT_RING(ring, lower_32_bits(a5xx_gpu->preempt_iova[submit->ring->id]));
OUT_RING(ring, upper_32_bits(a5xx_gpu->preempt_iova[submit->ring->id]));
/* Turn back on protected mode */
OUT_PKT7(ring, CP_SET_PROTECTED_MODE, 1);
OUT_RING(ring, 1);
/* Enable local preemption for finegrain preemption */
OUT_PKT7(ring, CP_PREEMPT_ENABLE_LOCAL, 1);
OUT_RING(ring, 0x1);
/* Allow CP_CONTEXT_SWITCH_YIELD packets in the IB2 */
OUT_PKT7(ring, CP_YIELD_ENABLE, 1);
OUT_RING(ring, 0x02);
/* Submit the commands */
for (i = 0; i < submit->nr_cmds; i++) {
switch (submit->cmd[i].type) {
case MSM_SUBMIT_CMD_IB_TARGET_BUF:
break;
case MSM_SUBMIT_CMD_CTX_RESTORE_BUF:
if (gpu->cur_ctx_seqno == submit->queue->ctx->seqno)
break;
fallthrough;
case MSM_SUBMIT_CMD_BUF:
OUT_PKT7(ring, CP_INDIRECT_BUFFER_PFE, 3);
OUT_RING(ring, lower_32_bits(submit->cmd[i].iova));
OUT_RING(ring, upper_32_bits(submit->cmd[i].iova));
OUT_RING(ring, submit->cmd[i].size);
ibs++;
break;
}
/*
* Periodically update shadow-wptr if needed, so that we
* can see partial progress of submits with large # of
* cmds.. otherwise we could needlessly stall waiting for
* ringbuffer state, simply due to looking at a shadow
* rptr value that has not been updated
*/
if ((ibs % 32) == 0)
update_shadow_rptr(gpu, ring);
}
/*
* Write the render mode to NULL (0) to indicate to the CP that the IBs
* are done rendering - otherwise a lucky preemption would start
* replaying from the last checkpoint
*/
OUT_PKT7(ring, CP_SET_RENDER_MODE, 5);
OUT_RING(ring, 0);
OUT_RING(ring, 0);
OUT_RING(ring, 0);
OUT_RING(ring, 0);
OUT_RING(ring, 0);
/* Turn off IB level preemptions */
OUT_PKT7(ring, CP_YIELD_ENABLE, 1);
OUT_RING(ring, 0x01);
/* Write the fence to the scratch register */
OUT_PKT4(ring, REG_A5XX_CP_SCRATCH_REG(2), 1);
OUT_RING(ring, submit->seqno);
/*
* Execute a CACHE_FLUSH_TS event. This will ensure that the
* timestamp is written to the memory and then triggers the interrupt
*/
OUT_PKT7(ring, CP_EVENT_WRITE, 4);
OUT_RING(ring, CP_EVENT_WRITE_0_EVENT(CACHE_FLUSH_TS) |
CP_EVENT_WRITE_0_IRQ);
OUT_RING(ring, lower_32_bits(rbmemptr(ring, fence)));
OUT_RING(ring, upper_32_bits(rbmemptr(ring, fence)));
OUT_RING(ring, submit->seqno);
/* Yield the floor on command completion */
OUT_PKT7(ring, CP_CONTEXT_SWITCH_YIELD, 4);
/*
* If dword[2:1] are non zero, they specify an address for the CP to
* write the value of dword[3] to on preemption complete. Write 0 to
* skip the write
*/
OUT_RING(ring, 0x00);
OUT_RING(ring, 0x00);
/* Data value - not used if the address above is 0 */
OUT_RING(ring, 0x01);
/* Set bit 0 to trigger an interrupt on preempt complete */
OUT_RING(ring, 0x01);
/* A WHERE_AM_I packet is not needed after a YIELD */
a5xx_flush(gpu, ring, false);
/* Check to see if we need to start preemption */
a5xx_preempt_trigger(gpu);
}
static const struct adreno_five_hwcg_regs {
u32 offset;
u32 value;
} a5xx_hwcg[] = {
{REG_A5XX_RBBM_CLOCK_CNTL_SP0, 0x02222222},
{REG_A5XX_RBBM_CLOCK_CNTL_SP1, 0x02222222},
{REG_A5XX_RBBM_CLOCK_CNTL_SP2, 0x02222222},
{REG_A5XX_RBBM_CLOCK_CNTL_SP3, 0x02222222},
{REG_A5XX_RBBM_CLOCK_CNTL2_SP0, 0x02222220},
{REG_A5XX_RBBM_CLOCK_CNTL2_SP1, 0x02222220},
{REG_A5XX_RBBM_CLOCK_CNTL2_SP2, 0x02222220},
{REG_A5XX_RBBM_CLOCK_CNTL2_SP3, 0x02222220},
{REG_A5XX_RBBM_CLOCK_HYST_SP0, 0x0000F3CF},
{REG_A5XX_RBBM_CLOCK_HYST_SP1, 0x0000F3CF},
{REG_A5XX_RBBM_CLOCK_HYST_SP2, 0x0000F3CF},
{REG_A5XX_RBBM_CLOCK_HYST_SP3, 0x0000F3CF},
{REG_A5XX_RBBM_CLOCK_DELAY_SP0, 0x00000080},
{REG_A5XX_RBBM_CLOCK_DELAY_SP1, 0x00000080},
{REG_A5XX_RBBM_CLOCK_DELAY_SP2, 0x00000080},
{REG_A5XX_RBBM_CLOCK_DELAY_SP3, 0x00000080},
{REG_A5XX_RBBM_CLOCK_CNTL_TP0, 0x22222222},
{REG_A5XX_RBBM_CLOCK_CNTL_TP1, 0x22222222},
{REG_A5XX_RBBM_CLOCK_CNTL_TP2, 0x22222222},
{REG_A5XX_RBBM_CLOCK_CNTL_TP3, 0x22222222},
{REG_A5XX_RBBM_CLOCK_CNTL2_TP0, 0x22222222},
{REG_A5XX_RBBM_CLOCK_CNTL2_TP1, 0x22222222},
{REG_A5XX_RBBM_CLOCK_CNTL2_TP2, 0x22222222},
{REG_A5XX_RBBM_CLOCK_CNTL2_TP3, 0x22222222},
{REG_A5XX_RBBM_CLOCK_CNTL3_TP0, 0x00002222},
{REG_A5XX_RBBM_CLOCK_CNTL3_TP1, 0x00002222},
{REG_A5XX_RBBM_CLOCK_CNTL3_TP2, 0x00002222},
{REG_A5XX_RBBM_CLOCK_CNTL3_TP3, 0x00002222},
{REG_A5XX_RBBM_CLOCK_HYST_TP0, 0x77777777},
{REG_A5XX_RBBM_CLOCK_HYST_TP1, 0x77777777},
{REG_A5XX_RBBM_CLOCK_HYST_TP2, 0x77777777},
{REG_A5XX_RBBM_CLOCK_HYST_TP3, 0x77777777},
{REG_A5XX_RBBM_CLOCK_HYST2_TP0, 0x77777777},
{REG_A5XX_RBBM_CLOCK_HYST2_TP1, 0x77777777},
{REG_A5XX_RBBM_CLOCK_HYST2_TP2, 0x77777777},
{REG_A5XX_RBBM_CLOCK_HYST2_TP3, 0x77777777},
{REG_A5XX_RBBM_CLOCK_HYST3_TP0, 0x00007777},
{REG_A5XX_RBBM_CLOCK_HYST3_TP1, 0x00007777},
{REG_A5XX_RBBM_CLOCK_HYST3_TP2, 0x00007777},
{REG_A5XX_RBBM_CLOCK_HYST3_TP3, 0x00007777},
{REG_A5XX_RBBM_CLOCK_DELAY_TP0, 0x11111111},
{REG_A5XX_RBBM_CLOCK_DELAY_TP1, 0x11111111},
{REG_A5XX_RBBM_CLOCK_DELAY_TP2, 0x11111111},
{REG_A5XX_RBBM_CLOCK_DELAY_TP3, 0x11111111},
{REG_A5XX_RBBM_CLOCK_DELAY2_TP0, 0x11111111},
{REG_A5XX_RBBM_CLOCK_DELAY2_TP1, 0x11111111},
{REG_A5XX_RBBM_CLOCK_DELAY2_TP2, 0x11111111},
{REG_A5XX_RBBM_CLOCK_DELAY2_TP3, 0x11111111},
{REG_A5XX_RBBM_CLOCK_DELAY3_TP0, 0x00001111},
{REG_A5XX_RBBM_CLOCK_DELAY3_TP1, 0x00001111},
{REG_A5XX_RBBM_CLOCK_DELAY3_TP2, 0x00001111},
{REG_A5XX_RBBM_CLOCK_DELAY3_TP3, 0x00001111},
{REG_A5XX_RBBM_CLOCK_CNTL_UCHE, 0x22222222},
{REG_A5XX_RBBM_CLOCK_CNTL2_UCHE, 0x22222222},
{REG_A5XX_RBBM_CLOCK_CNTL3_UCHE, 0x22222222},
{REG_A5XX_RBBM_CLOCK_CNTL4_UCHE, 0x00222222},
{REG_A5XX_RBBM_CLOCK_HYST_UCHE, 0x00444444},
{REG_A5XX_RBBM_CLOCK_DELAY_UCHE, 0x00000002},
{REG_A5XX_RBBM_CLOCK_CNTL_RB0, 0x22222222},
{REG_A5XX_RBBM_CLOCK_CNTL_RB1, 0x22222222},
{REG_A5XX_RBBM_CLOCK_CNTL_RB2, 0x22222222},
{REG_A5XX_RBBM_CLOCK_CNTL_RB3, 0x22222222},
{REG_A5XX_RBBM_CLOCK_CNTL2_RB0, 0x00222222},
{REG_A5XX_RBBM_CLOCK_CNTL2_RB1, 0x00222222},
{REG_A5XX_RBBM_CLOCK_CNTL2_RB2, 0x00222222},
{REG_A5XX_RBBM_CLOCK_CNTL2_RB3, 0x00222222},
{REG_A5XX_RBBM_CLOCK_CNTL_CCU0, 0x00022220},
{REG_A5XX_RBBM_CLOCK_CNTL_CCU1, 0x00022220},
{REG_A5XX_RBBM_CLOCK_CNTL_CCU2, 0x00022220},
{REG_A5XX_RBBM_CLOCK_CNTL_CCU3, 0x00022220},
{REG_A5XX_RBBM_CLOCK_CNTL_RAC, 0x05522222},
{REG_A5XX_RBBM_CLOCK_CNTL2_RAC, 0x00505555},
{REG_A5XX_RBBM_CLOCK_HYST_RB_CCU0, 0x04040404},
{REG_A5XX_RBBM_CLOCK_HYST_RB_CCU1, 0x04040404},
{REG_A5XX_RBBM_CLOCK_HYST_RB_CCU2, 0x04040404},
{REG_A5XX_RBBM_CLOCK_HYST_RB_CCU3, 0x04040404},
{REG_A5XX_RBBM_CLOCK_HYST_RAC, 0x07444044},
{REG_A5XX_RBBM_CLOCK_DELAY_RB_CCU_L1_0, 0x00000002},
{REG_A5XX_RBBM_CLOCK_DELAY_RB_CCU_L1_1, 0x00000002},
{REG_A5XX_RBBM_CLOCK_DELAY_RB_CCU_L1_2, 0x00000002},
{REG_A5XX_RBBM_CLOCK_DELAY_RB_CCU_L1_3, 0x00000002},
{REG_A5XX_RBBM_CLOCK_DELAY_RAC, 0x00010011},
{REG_A5XX_RBBM_CLOCK_CNTL_TSE_RAS_RBBM, 0x04222222},
{REG_A5XX_RBBM_CLOCK_MODE_GPC, 0x02222222},
{REG_A5XX_RBBM_CLOCK_MODE_VFD, 0x00002222},
{REG_A5XX_RBBM_CLOCK_HYST_TSE_RAS_RBBM, 0x00000000},
{REG_A5XX_RBBM_CLOCK_HYST_GPC, 0x04104004},
{REG_A5XX_RBBM_CLOCK_HYST_VFD, 0x00000000},
{REG_A5XX_RBBM_CLOCK_DELAY_HLSQ, 0x00000000},
{REG_A5XX_RBBM_CLOCK_DELAY_TSE_RAS_RBBM, 0x00004000},
{REG_A5XX_RBBM_CLOCK_DELAY_GPC, 0x00000200},
{REG_A5XX_RBBM_CLOCK_DELAY_VFD, 0x00002222}
}, a50x_hwcg[] = {
{REG_A5XX_RBBM_CLOCK_CNTL_SP0, 0x02222222},
{REG_A5XX_RBBM_CLOCK_CNTL2_SP0, 0x02222220},
{REG_A5XX_RBBM_CLOCK_HYST_SP0, 0x0000F3CF},
{REG_A5XX_RBBM_CLOCK_DELAY_SP0, 0x00000080},
{REG_A5XX_RBBM_CLOCK_CNTL_TP0, 0x22222222},
{REG_A5XX_RBBM_CLOCK_CNTL2_TP0, 0x22222222},
{REG_A5XX_RBBM_CLOCK_CNTL3_TP0, 0x00002222},
{REG_A5XX_RBBM_CLOCK_HYST_TP0, 0x77777777},
{REG_A5XX_RBBM_CLOCK_HYST2_TP0, 0x77777777},
{REG_A5XX_RBBM_CLOCK_HYST3_TP0, 0x00007777},
{REG_A5XX_RBBM_CLOCK_DELAY_TP0, 0x11111111},
{REG_A5XX_RBBM_CLOCK_DELAY2_TP0, 0x11111111},
{REG_A5XX_RBBM_CLOCK_DELAY3_TP0, 0x00001111},
{REG_A5XX_RBBM_CLOCK_CNTL2_UCHE, 0x22222222},
{REG_A5XX_RBBM_CLOCK_CNTL3_UCHE, 0x22222222},
{REG_A5XX_RBBM_CLOCK_CNTL4_UCHE, 0x00222222},
{REG_A5XX_RBBM_CLOCK_CNTL_UCHE, 0x22222222},
{REG_A5XX_RBBM_CLOCK_HYST_UCHE, 0x00FFFFF4},
{REG_A5XX_RBBM_CLOCK_DELAY_UCHE, 0x00000002},
{REG_A5XX_RBBM_CLOCK_CNTL_RB0, 0x22222222},
{REG_A5XX_RBBM_CLOCK_CNTL2_RB0, 0x00222222},
{REG_A5XX_RBBM_CLOCK_CNTL_CCU0, 0x00022220},
{REG_A5XX_RBBM_CLOCK_CNTL_RAC, 0x05522222},
{REG_A5XX_RBBM_CLOCK_CNTL2_RAC, 0x00505555},
{REG_A5XX_RBBM_CLOCK_HYST_RB_CCU0, 0x04040404},
{REG_A5XX_RBBM_CLOCK_HYST_RAC, 0x07444044},
{REG_A5XX_RBBM_CLOCK_DELAY_RB_CCU_L1_0, 0x00000002},
{REG_A5XX_RBBM_CLOCK_DELAY_RAC, 0x00010011},
{REG_A5XX_RBBM_CLOCK_CNTL_TSE_RAS_RBBM, 0x04222222},
{REG_A5XX_RBBM_CLOCK_MODE_GPC, 0x02222222},
{REG_A5XX_RBBM_CLOCK_MODE_VFD, 0x00002222},
{REG_A5XX_RBBM_CLOCK_HYST_TSE_RAS_RBBM, 0x00000000},
{REG_A5XX_RBBM_CLOCK_HYST_GPC, 0x04104004},
{REG_A5XX_RBBM_CLOCK_HYST_VFD, 0x00000000},
{REG_A5XX_RBBM_CLOCK_DELAY_HLSQ, 0x00000000},
{REG_A5XX_RBBM_CLOCK_DELAY_TSE_RAS_RBBM, 0x00004000},
{REG_A5XX_RBBM_CLOCK_DELAY_GPC, 0x00000200},
{REG_A5XX_RBBM_CLOCK_DELAY_VFD, 0x00002222},
}, a512_hwcg[] = {
{REG_A5XX_RBBM_CLOCK_CNTL_SP0, 0x02222222},
{REG_A5XX_RBBM_CLOCK_CNTL_SP1, 0x02222222},
{REG_A5XX_RBBM_CLOCK_CNTL2_SP0, 0x02222220},
{REG_A5XX_RBBM_CLOCK_CNTL2_SP1, 0x02222220},
{REG_A5XX_RBBM_CLOCK_HYST_SP0, 0x0000F3CF},
{REG_A5XX_RBBM_CLOCK_HYST_SP1, 0x0000F3CF},
{REG_A5XX_RBBM_CLOCK_DELAY_SP0, 0x00000080},
{REG_A5XX_RBBM_CLOCK_DELAY_SP1, 0x00000080},
{REG_A5XX_RBBM_CLOCK_CNTL_TP0, 0x22222222},
{REG_A5XX_RBBM_CLOCK_CNTL_TP1, 0x22222222},
{REG_A5XX_RBBM_CLOCK_CNTL2_TP0, 0x22222222},
{REG_A5XX_RBBM_CLOCK_CNTL2_TP1, 0x22222222},
{REG_A5XX_RBBM_CLOCK_CNTL3_TP0, 0x00002222},
{REG_A5XX_RBBM_CLOCK_CNTL3_TP1, 0x00002222},
{REG_A5XX_RBBM_CLOCK_HYST_TP0, 0x77777777},
{REG_A5XX_RBBM_CLOCK_HYST_TP1, 0x77777777},
{REG_A5XX_RBBM_CLOCK_HYST2_TP0, 0x77777777},
{REG_A5XX_RBBM_CLOCK_HYST2_TP1, 0x77777777},
{REG_A5XX_RBBM_CLOCK_HYST3_TP0, 0x00007777},
{REG_A5XX_RBBM_CLOCK_HYST3_TP1, 0x00007777},
{REG_A5XX_RBBM_CLOCK_DELAY_TP0, 0x11111111},
{REG_A5XX_RBBM_CLOCK_DELAY_TP1, 0x11111111},
{REG_A5XX_RBBM_CLOCK_DELAY2_TP0, 0x11111111},
{REG_A5XX_RBBM_CLOCK_DELAY2_TP1, 0x11111111},
{REG_A5XX_RBBM_CLOCK_DELAY3_TP0, 0x00001111},
{REG_A5XX_RBBM_CLOCK_DELAY3_TP1, 0x00001111},
{REG_A5XX_RBBM_CLOCK_CNTL_UCHE, 0x22222222},
{REG_A5XX_RBBM_CLOCK_CNTL2_UCHE, 0x22222222},
{REG_A5XX_RBBM_CLOCK_CNTL3_UCHE, 0x22222222},
{REG_A5XX_RBBM_CLOCK_CNTL4_UCHE, 0x00222222},
{REG_A5XX_RBBM_CLOCK_HYST_UCHE, 0x00444444},
{REG_A5XX_RBBM_CLOCK_DELAY_UCHE, 0x00000002},
{REG_A5XX_RBBM_CLOCK_CNTL_RB0, 0x22222222},
{REG_A5XX_RBBM_CLOCK_CNTL_RB1, 0x22222222},
{REG_A5XX_RBBM_CLOCK_CNTL2_RB0, 0x00222222},
{REG_A5XX_RBBM_CLOCK_CNTL2_RB1, 0x00222222},
{REG_A5XX_RBBM_CLOCK_CNTL_CCU0, 0x00022220},
{REG_A5XX_RBBM_CLOCK_CNTL_CCU1, 0x00022220},
{REG_A5XX_RBBM_CLOCK_CNTL_RAC, 0x05522222},
{REG_A5XX_RBBM_CLOCK_CNTL2_RAC, 0x00505555},
{REG_A5XX_RBBM_CLOCK_HYST_RB_CCU0, 0x04040404},
{REG_A5XX_RBBM_CLOCK_HYST_RB_CCU1, 0x04040404},
{REG_A5XX_RBBM_CLOCK_HYST_RAC, 0x07444044},
{REG_A5XX_RBBM_CLOCK_DELAY_RB_CCU_L1_0, 0x00000002},
{REG_A5XX_RBBM_CLOCK_DELAY_RB_CCU_L1_1, 0x00000002},
{REG_A5XX_RBBM_CLOCK_DELAY_RAC, 0x00010011},
{REG_A5XX_RBBM_CLOCK_CNTL_TSE_RAS_RBBM, 0x04222222},
{REG_A5XX_RBBM_CLOCK_MODE_GPC, 0x02222222},
{REG_A5XX_RBBM_CLOCK_MODE_VFD, 0x00002222},
{REG_A5XX_RBBM_CLOCK_HYST_TSE_RAS_RBBM, 0x00000000},
{REG_A5XX_RBBM_CLOCK_HYST_GPC, 0x04104004},
{REG_A5XX_RBBM_CLOCK_HYST_VFD, 0x00000000},
{REG_A5XX_RBBM_CLOCK_DELAY_HLSQ, 0x00000000},
{REG_A5XX_RBBM_CLOCK_DELAY_TSE_RAS_RBBM, 0x00004000},
{REG_A5XX_RBBM_CLOCK_DELAY_GPC, 0x00000200},
{REG_A5XX_RBBM_CLOCK_DELAY_VFD, 0x00002222},
};
void a5xx_set_hwcg(struct msm_gpu *gpu, bool state)
{
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
const struct adreno_five_hwcg_regs *regs;
unsigned int i, sz;
if (adreno_is_a506(adreno_gpu) || adreno_is_a508(adreno_gpu)) {
regs = a50x_hwcg;
sz = ARRAY_SIZE(a50x_hwcg);
} else if (adreno_is_a509(adreno_gpu) || adreno_is_a512(adreno_gpu)) {
regs = a512_hwcg;
sz = ARRAY_SIZE(a512_hwcg);
} else {
regs = a5xx_hwcg;
sz = ARRAY_SIZE(a5xx_hwcg);
}
for (i = 0; i < sz; i++)
gpu_write(gpu, regs[i].offset,
state ? regs[i].value : 0);
if (adreno_is_a540(adreno_gpu)) {
gpu_write(gpu, REG_A5XX_RBBM_CLOCK_DELAY_GPMU, state ? 0x00000770 : 0);
gpu_write(gpu, REG_A5XX_RBBM_CLOCK_HYST_GPMU, state ? 0x00000004 : 0);
}
gpu_write(gpu, REG_A5XX_RBBM_CLOCK_CNTL, state ? 0xAAA8AA00 : 0);
gpu_write(gpu, REG_A5XX_RBBM_ISDB_CNT, state ? 0x182 : 0x180);
}
static int a5xx_me_init(struct msm_gpu *gpu)
{
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
struct msm_ringbuffer *ring = gpu->rb[0];
OUT_PKT7(ring, CP_ME_INIT, 8);
OUT_RING(ring, 0x0000002F);
/* Enable multiple hardware contexts */
OUT_RING(ring, 0x00000003);
/* Enable error detection */
OUT_RING(ring, 0x20000000);
/* Don't enable header dump */
OUT_RING(ring, 0x00000000);
OUT_RING(ring, 0x00000000);
/* Specify workarounds for various microcode issues */
if (adreno_is_a506(adreno_gpu) || adreno_is_a530(adreno_gpu)) {
/* Workaround for token end syncs
* Force a WFI after every direct-render 3D mode draw and every
* 2D mode 3 draw
*/
OUT_RING(ring, 0x0000000B);
} else if (adreno_is_a510(adreno_gpu)) {
/* Workaround for token and syncs */
OUT_RING(ring, 0x00000001);
} else {
/* No workarounds enabled */
OUT_RING(ring, 0x00000000);
}
OUT_RING(ring, 0x00000000);
OUT_RING(ring, 0x00000000);
a5xx_flush(gpu, ring, true);
return a5xx_idle(gpu, ring) ? 0 : -EINVAL;
}
static int a5xx_preempt_start(struct msm_gpu *gpu)
{
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
struct a5xx_gpu *a5xx_gpu = to_a5xx_gpu(adreno_gpu);
struct msm_ringbuffer *ring = gpu->rb[0];
if (gpu->nr_rings == 1)
return 0;
/* Turn off protected mode to write to special registers */
OUT_PKT7(ring, CP_SET_PROTECTED_MODE, 1);
OUT_RING(ring, 0);
/* Set the save preemption record for the ring/command */
OUT_PKT4(ring, REG_A5XX_CP_CONTEXT_SWITCH_SAVE_ADDR_LO, 2);
OUT_RING(ring, lower_32_bits(a5xx_gpu->preempt_iova[ring->id]));
OUT_RING(ring, upper_32_bits(a5xx_gpu->preempt_iova[ring->id]));
/* Turn back on protected mode */
OUT_PKT7(ring, CP_SET_PROTECTED_MODE, 1);
OUT_RING(ring, 1);
OUT_PKT7(ring, CP_PREEMPT_ENABLE_GLOBAL, 1);
OUT_RING(ring, 0x00);
OUT_PKT7(ring, CP_PREEMPT_ENABLE_LOCAL, 1);
OUT_RING(ring, 0x01);
OUT_PKT7(ring, CP_YIELD_ENABLE, 1);
OUT_RING(ring, 0x01);
/* Yield the floor on command completion */
OUT_PKT7(ring, CP_CONTEXT_SWITCH_YIELD, 4);
OUT_RING(ring, 0x00);
OUT_RING(ring, 0x00);
OUT_RING(ring, 0x01);
OUT_RING(ring, 0x01);
/* The WHERE_AMI_I packet is not needed after a YIELD is issued */
a5xx_flush(gpu, ring, false);
return a5xx_idle(gpu, ring) ? 0 : -EINVAL;
}
static void a5xx_ucode_check_version(struct a5xx_gpu *a5xx_gpu,
struct drm_gem_object *obj)
{
u32 *buf = msm_gem_get_vaddr(obj);
if (IS_ERR(buf))
return;
/*
* If the lowest nibble is 0xa that is an indication that this microcode
* has been patched. The actual version is in dword [3] but we only care
* about the patchlevel which is the lowest nibble of dword [3]
*/
if (((buf[0] & 0xf) == 0xa) && (buf[2] & 0xf) >= 1)
a5xx_gpu->has_whereami = true;
msm_gem_put_vaddr(obj);
}
static int a5xx_ucode_load(struct msm_gpu *gpu)
{
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
struct a5xx_gpu *a5xx_gpu = to_a5xx_gpu(adreno_gpu);
int ret;
if (!a5xx_gpu->pm4_bo) {
a5xx_gpu->pm4_bo = adreno_fw_create_bo(gpu,
adreno_gpu->fw[ADRENO_FW_PM4], &a5xx_gpu->pm4_iova);
if (IS_ERR(a5xx_gpu->pm4_bo)) {
ret = PTR_ERR(a5xx_gpu->pm4_bo);
a5xx_gpu->pm4_bo = NULL;
DRM_DEV_ERROR(gpu->dev->dev, "could not allocate PM4: %d\n",
ret);
return ret;
}
msm_gem_object_set_name(a5xx_gpu->pm4_bo, "pm4fw");
}
if (!a5xx_gpu->pfp_bo) {
a5xx_gpu->pfp_bo = adreno_fw_create_bo(gpu,
adreno_gpu->fw[ADRENO_FW_PFP], &a5xx_gpu->pfp_iova);
if (IS_ERR(a5xx_gpu->pfp_bo)) {
ret = PTR_ERR(a5xx_gpu->pfp_bo);
a5xx_gpu->pfp_bo = NULL;
DRM_DEV_ERROR(gpu->dev->dev, "could not allocate PFP: %d\n",
ret);
return ret;
}
msm_gem_object_set_name(a5xx_gpu->pfp_bo, "pfpfw");
a5xx_ucode_check_version(a5xx_gpu, a5xx_gpu->pfp_bo);
}
if (a5xx_gpu->has_whereami) {
if (!a5xx_gpu->shadow_bo) {
a5xx_gpu->shadow = msm_gem_kernel_new(gpu->dev,
sizeof(u32) * gpu->nr_rings,
MSM_BO_WC | MSM_BO_MAP_PRIV,
gpu->aspace, &a5xx_gpu->shadow_bo,
&a5xx_gpu->shadow_iova);
if (IS_ERR(a5xx_gpu->shadow))
return PTR_ERR(a5xx_gpu->shadow);
msm_gem_object_set_name(a5xx_gpu->shadow_bo, "shadow");
}
} else if (gpu->nr_rings > 1) {
/* Disable preemption if WHERE_AM_I isn't available */
a5xx_preempt_fini(gpu);
gpu->nr_rings = 1;
}
return 0;
}
#define SCM_GPU_ZAP_SHADER_RESUME 0
static int a5xx_zap_shader_resume(struct msm_gpu *gpu)
{
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
int ret;
/*
* Adreno 506 have CPZ Retention feature and doesn't require
* to resume zap shader
*/
if (adreno_is_a506(adreno_gpu))
return 0;
ret = qcom_scm_set_remote_state(SCM_GPU_ZAP_SHADER_RESUME, GPU_PAS_ID);
if (ret)
DRM_ERROR("%s: zap-shader resume failed: %d\n",
gpu->name, ret);
return ret;
}
static int a5xx_zap_shader_init(struct msm_gpu *gpu)
{
static bool loaded;
int ret;
/*
* If the zap shader is already loaded into memory we just need to kick
* the remote processor to reinitialize it
*/
if (loaded)
return a5xx_zap_shader_resume(gpu);
ret = adreno_zap_shader_load(gpu, GPU_PAS_ID);
loaded = !ret;
return ret;
}
#define A5XX_INT_MASK (A5XX_RBBM_INT_0_MASK_RBBM_AHB_ERROR | \
A5XX_RBBM_INT_0_MASK_RBBM_TRANSFER_TIMEOUT | \
A5XX_RBBM_INT_0_MASK_RBBM_ME_MS_TIMEOUT | \
A5XX_RBBM_INT_0_MASK_RBBM_PFP_MS_TIMEOUT | \
A5XX_RBBM_INT_0_MASK_RBBM_ETS_MS_TIMEOUT | \
A5XX_RBBM_INT_0_MASK_RBBM_ATB_ASYNC_OVERFLOW | \
A5XX_RBBM_INT_0_MASK_CP_HW_ERROR | \
A5XX_RBBM_INT_0_MASK_MISC_HANG_DETECT | \
A5XX_RBBM_INT_0_MASK_CP_SW | \
A5XX_RBBM_INT_0_MASK_CP_CACHE_FLUSH_TS | \
A5XX_RBBM_INT_0_MASK_UCHE_OOB_ACCESS | \
A5XX_RBBM_INT_0_MASK_GPMU_VOLTAGE_DROOP)
static int a5xx_hw_init(struct msm_gpu *gpu)
{
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
struct a5xx_gpu *a5xx_gpu = to_a5xx_gpu(adreno_gpu);
u32 regbit;
int ret;
gpu_write(gpu, REG_A5XX_VBIF_ROUND_ROBIN_QOS_ARB, 0x00000003);
if (adreno_is_a509(adreno_gpu) || adreno_is_a512(adreno_gpu) ||
adreno_is_a540(adreno_gpu))
gpu_write(gpu, REG_A5XX_VBIF_GATE_OFF_WRREQ_EN, 0x00000009);
/* Make all blocks contribute to the GPU BUSY perf counter */
gpu_write(gpu, REG_A5XX_RBBM_PERFCTR_GPU_BUSY_MASKED, 0xFFFFFFFF);
/* Enable RBBM error reporting bits */
gpu_write(gpu, REG_A5XX_RBBM_AHB_CNTL0, 0x00000001);
if (adreno_gpu->info->quirks & ADRENO_QUIRK_FAULT_DETECT_MASK) {
/*
* Mask out the activity signals from RB1-3 to avoid false
* positives
*/
gpu_write(gpu, REG_A5XX_RBBM_INTERFACE_HANG_MASK_CNTL11,
0xF0000000);
gpu_write(gpu, REG_A5XX_RBBM_INTERFACE_HANG_MASK_CNTL12,
0xFFFFFFFF);
gpu_write(gpu, REG_A5XX_RBBM_INTERFACE_HANG_MASK_CNTL13,
0xFFFFFFFF);
gpu_write(gpu, REG_A5XX_RBBM_INTERFACE_HANG_MASK_CNTL14,
0xFFFFFFFF);
gpu_write(gpu, REG_A5XX_RBBM_INTERFACE_HANG_MASK_CNTL15,
0xFFFFFFFF);
gpu_write(gpu, REG_A5XX_RBBM_INTERFACE_HANG_MASK_CNTL16,
0xFFFFFFFF);
gpu_write(gpu, REG_A5XX_RBBM_INTERFACE_HANG_MASK_CNTL17,
0xFFFFFFFF);
gpu_write(gpu, REG_A5XX_RBBM_INTERFACE_HANG_MASK_CNTL18,
0xFFFFFFFF);
}
/* Enable fault detection */
gpu_write(gpu, REG_A5XX_RBBM_INTERFACE_HANG_INT_CNTL,
(1 << 30) | 0xFFFF);
/* Turn on performance counters */
gpu_write(gpu, REG_A5XX_RBBM_PERFCTR_CNTL, 0x01);
/* Select CP0 to always count cycles */
gpu_write(gpu, REG_A5XX_CP_PERFCTR_CP_SEL_0, PERF_CP_ALWAYS_COUNT);
/* Select RBBM0 to countable 6 to get the busy status for devfreq */
gpu_write(gpu, REG_A5XX_RBBM_PERFCTR_RBBM_SEL_0, 6);
/* Increase VFD cache access so LRZ and other data gets evicted less */
gpu_write(gpu, REG_A5XX_UCHE_CACHE_WAYS, 0x02);
/* Disable L2 bypass in the UCHE */
gpu_write(gpu, REG_A5XX_UCHE_TRAP_BASE_LO, 0xFFFF0000);
gpu_write(gpu, REG_A5XX_UCHE_TRAP_BASE_HI, 0x0001FFFF);
gpu_write(gpu, REG_A5XX_UCHE_WRITE_THRU_BASE_LO, 0xFFFF0000);
gpu_write(gpu, REG_A5XX_UCHE_WRITE_THRU_BASE_HI, 0x0001FFFF);
/* Set the GMEM VA range (0 to gpu->gmem) */
gpu_write(gpu, REG_A5XX_UCHE_GMEM_RANGE_MIN_LO, 0x00100000);
gpu_write(gpu, REG_A5XX_UCHE_GMEM_RANGE_MIN_HI, 0x00000000);
gpu_write(gpu, REG_A5XX_UCHE_GMEM_RANGE_MAX_LO,
0x00100000 + adreno_gpu->info->gmem - 1);
gpu_write(gpu, REG_A5XX_UCHE_GMEM_RANGE_MAX_HI, 0x00000000);
if (adreno_is_a506(adreno_gpu) || adreno_is_a508(adreno_gpu) ||
adreno_is_a510(adreno_gpu)) {
gpu_write(gpu, REG_A5XX_CP_MEQ_THRESHOLDS, 0x20);
if (adreno_is_a506(adreno_gpu) || adreno_is_a508(adreno_gpu))
gpu_write(gpu, REG_A5XX_CP_MERCIU_SIZE, 0x400);
else
gpu_write(gpu, REG_A5XX_CP_MERCIU_SIZE, 0x20);
gpu_write(gpu, REG_A5XX_CP_ROQ_THRESHOLDS_2, 0x40000030);
gpu_write(gpu, REG_A5XX_CP_ROQ_THRESHOLDS_1, 0x20100D0A);
} else {
gpu_write(gpu, REG_A5XX_CP_MEQ_THRESHOLDS, 0x40);
if (adreno_is_a530(adreno_gpu))
gpu_write(gpu, REG_A5XX_CP_MERCIU_SIZE, 0x40);
else
gpu_write(gpu, REG_A5XX_CP_MERCIU_SIZE, 0x400);
gpu_write(gpu, REG_A5XX_CP_ROQ_THRESHOLDS_2, 0x80000060);
gpu_write(gpu, REG_A5XX_CP_ROQ_THRESHOLDS_1, 0x40201B16);
}
if (adreno_is_a506(adreno_gpu) || adreno_is_a508(adreno_gpu))
gpu_write(gpu, REG_A5XX_PC_DBG_ECO_CNTL,
(0x100 << 11 | 0x100 << 22));
else if (adreno_is_a509(adreno_gpu) || adreno_is_a510(adreno_gpu) ||
adreno_is_a512(adreno_gpu))
gpu_write(gpu, REG_A5XX_PC_DBG_ECO_CNTL,
(0x200 << 11 | 0x200 << 22));
else
gpu_write(gpu, REG_A5XX_PC_DBG_ECO_CNTL,
(0x400 << 11 | 0x300 << 22));
if (adreno_gpu->info->quirks & ADRENO_QUIRK_TWO_PASS_USE_WFI)
gpu_rmw(gpu, REG_A5XX_PC_DBG_ECO_CNTL, 0, (1 << 8));
/*
* Disable the RB sampler datapath DP2 clock gating optimization
* for 1-SP GPUs, as it is enabled by default.
*/
if (adreno_is_a506(adreno_gpu) || adreno_is_a508(adreno_gpu) ||
adreno_is_a509(adreno_gpu) || adreno_is_a512(adreno_gpu))
gpu_rmw(gpu, REG_A5XX_RB_DBG_ECO_CNTL, 0, (1 << 9));
/* Disable UCHE global filter as SP can invalidate/flush independently */
gpu_write(gpu, REG_A5XX_UCHE_MODE_CNTL, BIT(29));
/* Enable USE_RETENTION_FLOPS */
gpu_write(gpu, REG_A5XX_CP_CHICKEN_DBG, 0x02000000);
/* Enable ME/PFP split notification */
gpu_write(gpu, REG_A5XX_RBBM_AHB_CNTL1, 0xA6FFFFFF);
/*
* In A5x, CCU can send context_done event of a particular context to
* UCHE which ultimately reaches CP even when there is valid
* transaction of that context inside CCU. This can let CP to program
* config registers, which will make the "valid transaction" inside
* CCU to be interpreted differently. This can cause gpu fault. This
* bug is fixed in latest A510 revision. To enable this bug fix -
* bit[11] of RB_DBG_ECO_CNTL need to be set to 0, default is 1
* (disable). For older A510 version this bit is unused.
*/
if (adreno_is_a510(adreno_gpu))
gpu_rmw(gpu, REG_A5XX_RB_DBG_ECO_CNTL, (1 << 11), 0);
/* Enable HWCG */
a5xx_set_hwcg(gpu, true);
gpu_write(gpu, REG_A5XX_RBBM_AHB_CNTL2, 0x0000003F);
/* Set the highest bank bit */
if (adreno_is_a540(adreno_gpu) || adreno_is_a530(adreno_gpu))
regbit = 2;
else
regbit = 1;
gpu_write(gpu, REG_A5XX_TPL1_MODE_CNTL, regbit << 7);
gpu_write(gpu, REG_A5XX_RB_MODE_CNTL, regbit << 1);
if (adreno_is_a509(adreno_gpu) || adreno_is_a512(adreno_gpu) ||
adreno_is_a540(adreno_gpu))
gpu_write(gpu, REG_A5XX_UCHE_DBG_ECO_CNTL_2, regbit);
/* Disable All flat shading optimization (ALLFLATOPTDIS) */
gpu_rmw(gpu, REG_A5XX_VPC_DBG_ECO_CNTL, 0, (1 << 10));
/* Protect registers from the CP */
gpu_write(gpu, REG_A5XX_CP_PROTECT_CNTL, 0x00000007);
/* RBBM */
gpu_write(gpu, REG_A5XX_CP_PROTECT(0), ADRENO_PROTECT_RW(0x04, 4));
gpu_write(gpu, REG_A5XX_CP_PROTECT(1), ADRENO_PROTECT_RW(0x08, 8));
gpu_write(gpu, REG_A5XX_CP_PROTECT(2), ADRENO_PROTECT_RW(0x10, 16));
gpu_write(gpu, REG_A5XX_CP_PROTECT(3), ADRENO_PROTECT_RW(0x20, 32));
gpu_write(gpu, REG_A5XX_CP_PROTECT(4), ADRENO_PROTECT_RW(0x40, 64));
gpu_write(gpu, REG_A5XX_CP_PROTECT(5), ADRENO_PROTECT_RW(0x80, 64));
/* Content protect */
gpu_write(gpu, REG_A5XX_CP_PROTECT(6),
ADRENO_PROTECT_RW(REG_A5XX_RBBM_SECVID_TSB_TRUSTED_BASE_LO,
16));
gpu_write(gpu, REG_A5XX_CP_PROTECT(7),
ADRENO_PROTECT_RW(REG_A5XX_RBBM_SECVID_TRUST_CNTL, 2));
/* CP */
gpu_write(gpu, REG_A5XX_CP_PROTECT(8), ADRENO_PROTECT_RW(0x800, 64));
gpu_write(gpu, REG_A5XX_CP_PROTECT(9), ADRENO_PROTECT_RW(0x840, 8));
gpu_write(gpu, REG_A5XX_CP_PROTECT(10), ADRENO_PROTECT_RW(0x880, 32));
gpu_write(gpu, REG_A5XX_CP_PROTECT(11), ADRENO_PROTECT_RW(0xAA0, 1));
/* RB */
gpu_write(gpu, REG_A5XX_CP_PROTECT(12), ADRENO_PROTECT_RW(0xCC0, 1));
gpu_write(gpu, REG_A5XX_CP_PROTECT(13), ADRENO_PROTECT_RW(0xCF0, 2));
/* VPC */
gpu_write(gpu, REG_A5XX_CP_PROTECT(14), ADRENO_PROTECT_RW(0xE68, 8));
gpu_write(gpu, REG_A5XX_CP_PROTECT(15), ADRENO_PROTECT_RW(0xE70, 16));
/* UCHE */
gpu_write(gpu, REG_A5XX_CP_PROTECT(16), ADRENO_PROTECT_RW(0xE80, 16));
/* SMMU */
gpu_write(gpu, REG_A5XX_CP_PROTECT(17),
ADRENO_PROTECT_RW(0x10000, 0x8000));
gpu_write(gpu, REG_A5XX_RBBM_SECVID_TSB_CNTL, 0);
/*
* Disable the trusted memory range - we don't actually supported secure
* memory rendering at this point in time and we don't want to block off
* part of the virtual memory space.
*/
gpu_write64(gpu, REG_A5XX_RBBM_SECVID_TSB_TRUSTED_BASE_LO, 0x00000000);
gpu_write(gpu, REG_A5XX_RBBM_SECVID_TSB_TRUSTED_SIZE, 0x00000000);
/* Put the GPU into 64 bit by default */
gpu_write(gpu, REG_A5XX_CP_ADDR_MODE_CNTL, 0x1);
gpu_write(gpu, REG_A5XX_VSC_ADDR_MODE_CNTL, 0x1);
gpu_write(gpu, REG_A5XX_GRAS_ADDR_MODE_CNTL, 0x1);
gpu_write(gpu, REG_A5XX_RB_ADDR_MODE_CNTL, 0x1);
gpu_write(gpu, REG_A5XX_PC_ADDR_MODE_CNTL, 0x1);
gpu_write(gpu, REG_A5XX_HLSQ_ADDR_MODE_CNTL, 0x1);
gpu_write(gpu, REG_A5XX_VFD_ADDR_MODE_CNTL, 0x1);
gpu_write(gpu, REG_A5XX_VPC_ADDR_MODE_CNTL, 0x1);
gpu_write(gpu, REG_A5XX_UCHE_ADDR_MODE_CNTL, 0x1);
gpu_write(gpu, REG_A5XX_SP_ADDR_MODE_CNTL, 0x1);
gpu_write(gpu, REG_A5XX_TPL1_ADDR_MODE_CNTL, 0x1);
gpu_write(gpu, REG_A5XX_RBBM_SECVID_TSB_ADDR_MODE_CNTL, 0x1);
/*
* VPC corner case with local memory load kill leads to corrupt
* internal state. Normal Disable does not work for all a5x chips.
* So do the following setting to disable it.
*/
if (adreno_gpu->info->quirks & ADRENO_QUIRK_LMLOADKILL_DISABLE) {
gpu_rmw(gpu, REG_A5XX_VPC_DBG_ECO_CNTL, 0, BIT(23));
gpu_rmw(gpu, REG_A5XX_HLSQ_DBG_ECO_CNTL, BIT(18), 0);
}
ret = adreno_hw_init(gpu);
if (ret)
return ret;
if (adreno_is_a530(adreno_gpu) || adreno_is_a540(adreno_gpu))
a5xx_gpmu_ucode_init(gpu);
gpu_write64(gpu, REG_A5XX_CP_ME_INSTR_BASE_LO, a5xx_gpu->pm4_iova);
gpu_write64(gpu, REG_A5XX_CP_PFP_INSTR_BASE_LO, a5xx_gpu->pfp_iova);
/* Set the ringbuffer address */
gpu_write64(gpu, REG_A5XX_CP_RB_BASE, gpu->rb[0]->iova);
/*
* If the microcode supports the WHERE_AM_I opcode then we can use that
* in lieu of the RPTR shadow and enable preemption. Otherwise, we
* can't safely use the RPTR shadow or preemption. In either case, the
* RPTR shadow should be disabled in hardware.
*/
gpu_write(gpu, REG_A5XX_CP_RB_CNTL,
MSM_GPU_RB_CNTL_DEFAULT | AXXX_CP_RB_CNTL_NO_UPDATE);
/* Configure the RPTR shadow if needed: */
if (a5xx_gpu->shadow_bo) {
gpu_write64(gpu, REG_A5XX_CP_RB_RPTR_ADDR,
shadowptr(a5xx_gpu, gpu->rb[0]));
}
a5xx_preempt_hw_init(gpu);
/* Disable the interrupts through the initial bringup stage */
gpu_write(gpu, REG_A5XX_RBBM_INT_0_MASK, A5XX_INT_MASK);
/* Clear ME_HALT to start the micro engine */
gpu_write(gpu, REG_A5XX_CP_PFP_ME_CNTL, 0);
ret = a5xx_me_init(gpu);
if (ret)
return ret;
ret = a5xx_power_init(gpu);
if (ret)
return ret;
/*
* Send a pipeline event stat to get misbehaving counters to start
* ticking correctly
*/
if (adreno_is_a530(adreno_gpu)) {
OUT_PKT7(gpu->rb[0], CP_EVENT_WRITE, 1);
OUT_RING(gpu->rb[0], CP_EVENT_WRITE_0_EVENT(STAT_EVENT));
a5xx_flush(gpu, gpu->rb[0], true);
if (!a5xx_idle(gpu, gpu->rb[0]))
return -EINVAL;
}
/*
* If the chip that we are using does support loading one, then
* try to load a zap shader into the secure world. If successful
* we can use the CP to switch out of secure mode. If not then we
* have no resource but to try to switch ourselves out manually. If we
* guessed wrong then access to the RBBM_SECVID_TRUST_CNTL register will
* be blocked and a permissions violation will soon follow.
*/
ret = a5xx_zap_shader_init(gpu);
if (!ret) {
OUT_PKT7(gpu->rb[0], CP_SET_SECURE_MODE, 1);
OUT_RING(gpu->rb[0], 0x00000000);
a5xx_flush(gpu, gpu->rb[0], true);
if (!a5xx_idle(gpu, gpu->rb[0]))
return -EINVAL;
} else if (ret == -ENODEV) {
/*
* This device does not use zap shader (but print a warning
* just in case someone got their dt wrong.. hopefully they
* have a debug UART to realize the error of their ways...
* if you mess this up you are about to crash horribly)
*/
dev_warn_once(gpu->dev->dev,
"Zap shader not enabled - using SECVID_TRUST_CNTL instead\n");
gpu_write(gpu, REG_A5XX_RBBM_SECVID_TRUST_CNTL, 0x0);
} else {
return ret;
}
/* Last step - yield the ringbuffer */
a5xx_preempt_start(gpu);
return 0;
}
static void a5xx_recover(struct msm_gpu *gpu)
{
int i;
adreno_dump_info(gpu);
for (i = 0; i < 8; i++) {
printk("CP_SCRATCH_REG%d: %u\n", i,
gpu_read(gpu, REG_A5XX_CP_SCRATCH_REG(i)));
}
if (hang_debug)
a5xx_dump(gpu);
gpu_write(gpu, REG_A5XX_RBBM_SW_RESET_CMD, 1);
gpu_read(gpu, REG_A5XX_RBBM_SW_RESET_CMD);
gpu_write(gpu, REG_A5XX_RBBM_SW_RESET_CMD, 0);
adreno_recover(gpu);
}
static void a5xx_destroy(struct msm_gpu *gpu)
{
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
struct a5xx_gpu *a5xx_gpu = to_a5xx_gpu(adreno_gpu);
DBG("%s", gpu->name);
a5xx_preempt_fini(gpu);
if (a5xx_gpu->pm4_bo) {
msm_gem_unpin_iova(a5xx_gpu->pm4_bo, gpu->aspace);
drm_gem_object_put(a5xx_gpu->pm4_bo);
}
if (a5xx_gpu->pfp_bo) {
msm_gem_unpin_iova(a5xx_gpu->pfp_bo, gpu->aspace);
drm_gem_object_put(a5xx_gpu->pfp_bo);
}
if (a5xx_gpu->gpmu_bo) {
msm_gem_unpin_iova(a5xx_gpu->gpmu_bo, gpu->aspace);
drm_gem_object_put(a5xx_gpu->gpmu_bo);
}
if (a5xx_gpu->shadow_bo) {
msm_gem_unpin_iova(a5xx_gpu->shadow_bo, gpu->aspace);
drm_gem_object_put(a5xx_gpu->shadow_bo);
}
adreno_gpu_cleanup(adreno_gpu);
kfree(a5xx_gpu);
}
static inline bool _a5xx_check_idle(struct msm_gpu *gpu)
{
if (gpu_read(gpu, REG_A5XX_RBBM_STATUS) & ~A5XX_RBBM_STATUS_HI_BUSY)
return false;
/*
* Nearly every abnormality ends up pausing the GPU and triggering a
* fault so we can safely just watch for this one interrupt to fire
*/
return !(gpu_read(gpu, REG_A5XX_RBBM_INT_0_STATUS) &
A5XX_RBBM_INT_0_MASK_MISC_HANG_DETECT);
}
bool a5xx_idle(struct msm_gpu *gpu, struct msm_ringbuffer *ring)
{
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
struct a5xx_gpu *a5xx_gpu = to_a5xx_gpu(adreno_gpu);
if (ring != a5xx_gpu->cur_ring) {
WARN(1, "Tried to idle a non-current ringbuffer\n");
return false;
}
/* wait for CP to drain ringbuffer: */
if (!adreno_idle(gpu, ring))
return false;
if (spin_until(_a5xx_check_idle(gpu))) {
DRM_ERROR("%s: %ps: timeout waiting for GPU to idle: status %8.8X irq %8.8X rptr/wptr %d/%d\n",
gpu->name, __builtin_return_address(0),
gpu_read(gpu, REG_A5XX_RBBM_STATUS),
gpu_read(gpu, REG_A5XX_RBBM_INT_0_STATUS),
gpu_read(gpu, REG_A5XX_CP_RB_RPTR),
gpu_read(gpu, REG_A5XX_CP_RB_WPTR));
return false;
}
return true;
}
static int a5xx_fault_handler(void *arg, unsigned long iova, int flags, void *data)
{
struct msm_gpu *gpu = arg;
struct adreno_smmu_fault_info *info = data;
char block[12] = "unknown";
u32 scratch[] = {
gpu_read(gpu, REG_A5XX_CP_SCRATCH_REG(4)),
gpu_read(gpu, REG_A5XX_CP_SCRATCH_REG(5)),
gpu_read(gpu, REG_A5XX_CP_SCRATCH_REG(6)),
gpu_read(gpu, REG_A5XX_CP_SCRATCH_REG(7)),
};
if (info)
snprintf(block, sizeof(block), "%x", info->fsynr1);
return adreno_fault_handler(gpu, iova, flags, info, block, scratch);
}
static void a5xx_cp_err_irq(struct msm_gpu *gpu)
{
u32 status = gpu_read(gpu, REG_A5XX_CP_INTERRUPT_STATUS);
if (status & A5XX_CP_INT_CP_OPCODE_ERROR) {
u32 val;
gpu_write(gpu, REG_A5XX_CP_PFP_STAT_ADDR, 0);
/*
* REG_A5XX_CP_PFP_STAT_DATA is indexed, and we want index 1 so
* read it twice
*/
gpu_read(gpu, REG_A5XX_CP_PFP_STAT_DATA);
val = gpu_read(gpu, REG_A5XX_CP_PFP_STAT_DATA);
dev_err_ratelimited(gpu->dev->dev, "CP | opcode error | possible opcode=0x%8.8X\n",
val);
}
if (status & A5XX_CP_INT_CP_HW_FAULT_ERROR)
dev_err_ratelimited(gpu->dev->dev, "CP | HW fault | status=0x%8.8X\n",
gpu_read(gpu, REG_A5XX_CP_HW_FAULT));
if (status & A5XX_CP_INT_CP_DMA_ERROR)
dev_err_ratelimited(gpu->dev->dev, "CP | DMA error\n");
if (status & A5XX_CP_INT_CP_REGISTER_PROTECTION_ERROR) {
u32 val = gpu_read(gpu, REG_A5XX_CP_PROTECT_STATUS);
dev_err_ratelimited(gpu->dev->dev,
"CP | protected mode error | %s | addr=0x%8.8X | status=0x%8.8X\n",
val & (1 << 24) ? "WRITE" : "READ",
(val & 0xFFFFF) >> 2, val);
}
if (status & A5XX_CP_INT_CP_AHB_ERROR) {
u32 status = gpu_read(gpu, REG_A5XX_CP_AHB_FAULT);
const char *access[16] = { "reserved", "reserved",
"timestamp lo", "timestamp hi", "pfp read", "pfp write",
"", "", "me read", "me write", "", "", "crashdump read",
"crashdump write" };
dev_err_ratelimited(gpu->dev->dev,
"CP | AHB error | addr=%X access=%s error=%d | status=0x%8.8X\n",
status & 0xFFFFF, access[(status >> 24) & 0xF],
(status & (1 << 31)), status);
}
}
static void a5xx_rbbm_err_irq(struct msm_gpu *gpu, u32 status)
{
if (status & A5XX_RBBM_INT_0_MASK_RBBM_AHB_ERROR) {
u32 val = gpu_read(gpu, REG_A5XX_RBBM_AHB_ERROR_STATUS);
dev_err_ratelimited(gpu->dev->dev,
"RBBM | AHB bus error | %s | addr=0x%X | ports=0x%X:0x%X\n",
val & (1 << 28) ? "WRITE" : "READ",
(val & 0xFFFFF) >> 2, (val >> 20) & 0x3,
(val >> 24) & 0xF);
/* Clear the error */
gpu_write(gpu, REG_A5XX_RBBM_AHB_CMD, (1 << 4));
/* Clear the interrupt */
gpu_write(gpu, REG_A5XX_RBBM_INT_CLEAR_CMD,
A5XX_RBBM_INT_0_MASK_RBBM_AHB_ERROR);
}
if (status & A5XX_RBBM_INT_0_MASK_RBBM_TRANSFER_TIMEOUT)
dev_err_ratelimited(gpu->dev->dev, "RBBM | AHB transfer timeout\n");
if (status & A5XX_RBBM_INT_0_MASK_RBBM_ME_MS_TIMEOUT)
dev_err_ratelimited(gpu->dev->dev, "RBBM | ME master split | status=0x%X\n",
gpu_read(gpu, REG_A5XX_RBBM_AHB_ME_SPLIT_STATUS));
if (status & A5XX_RBBM_INT_0_MASK_RBBM_PFP_MS_TIMEOUT)
dev_err_ratelimited(gpu->dev->dev, "RBBM | PFP master split | status=0x%X\n",
gpu_read(gpu, REG_A5XX_RBBM_AHB_PFP_SPLIT_STATUS));
if (status & A5XX_RBBM_INT_0_MASK_RBBM_ETS_MS_TIMEOUT)
dev_err_ratelimited(gpu->dev->dev, "RBBM | ETS master split | status=0x%X\n",
gpu_read(gpu, REG_A5XX_RBBM_AHB_ETS_SPLIT_STATUS));
if (status & A5XX_RBBM_INT_0_MASK_RBBM_ATB_ASYNC_OVERFLOW)
dev_err_ratelimited(gpu->dev->dev, "RBBM | ATB ASYNC overflow\n");
if (status & A5XX_RBBM_INT_0_MASK_RBBM_ATB_BUS_OVERFLOW)
dev_err_ratelimited(gpu->dev->dev, "RBBM | ATB bus overflow\n");
}
static void a5xx_uche_err_irq(struct msm_gpu *gpu)
{
uint64_t addr = (uint64_t) gpu_read(gpu, REG_A5XX_UCHE_TRAP_LOG_HI);
addr |= gpu_read(gpu, REG_A5XX_UCHE_TRAP_LOG_LO);
dev_err_ratelimited(gpu->dev->dev, "UCHE | Out of bounds access | addr=0x%llX\n",
addr);
}
static void a5xx_gpmu_err_irq(struct msm_gpu *gpu)
{
dev_err_ratelimited(gpu->dev->dev, "GPMU | voltage droop\n");
}
static void a5xx_fault_detect_irq(struct msm_gpu *gpu)
{
struct drm_device *dev = gpu->dev;
struct msm_ringbuffer *ring = gpu->funcs->active_ring(gpu);
/*
* If stalled on SMMU fault, we could trip the GPU's hang detection,
* but the fault handler will trigger the devcore dump, and we want
* to otherwise resume normally rather than killing the submit, so
* just bail.
*/
if (gpu_read(gpu, REG_A5XX_RBBM_STATUS3) & BIT(24))
return;
DRM_DEV_ERROR(dev->dev, "gpu fault ring %d fence %x status %8.8X rb %4.4x/%4.4x ib1 %16.16llX/%4.4x ib2 %16.16llX/%4.4x\n",
ring ? ring->id : -1, ring ? ring->fctx->last_fence : 0,
gpu_read(gpu, REG_A5XX_RBBM_STATUS),
gpu_read(gpu, REG_A5XX_CP_RB_RPTR),
gpu_read(gpu, REG_A5XX_CP_RB_WPTR),
gpu_read64(gpu, REG_A5XX_CP_IB1_BASE),
gpu_read(gpu, REG_A5XX_CP_IB1_BUFSZ),
gpu_read64(gpu, REG_A5XX_CP_IB2_BASE),
gpu_read(gpu, REG_A5XX_CP_IB2_BUFSZ));
/* Turn off the hangcheck timer to keep it from bothering us */
del_timer(&gpu->hangcheck_timer);
kthread_queue_work(gpu->worker, &gpu->recover_work);
}
#define RBBM_ERROR_MASK \
(A5XX_RBBM_INT_0_MASK_RBBM_AHB_ERROR | \
A5XX_RBBM_INT_0_MASK_RBBM_TRANSFER_TIMEOUT | \
A5XX_RBBM_INT_0_MASK_RBBM_ME_MS_TIMEOUT | \
A5XX_RBBM_INT_0_MASK_RBBM_PFP_MS_TIMEOUT | \
A5XX_RBBM_INT_0_MASK_RBBM_ETS_MS_TIMEOUT | \
A5XX_RBBM_INT_0_MASK_RBBM_ATB_ASYNC_OVERFLOW)
static irqreturn_t a5xx_irq(struct msm_gpu *gpu)
{
struct msm_drm_private *priv = gpu->dev->dev_private;
u32 status = gpu_read(gpu, REG_A5XX_RBBM_INT_0_STATUS);
/*
* Clear all the interrupts except RBBM_AHB_ERROR - if we clear it
* before the source is cleared the interrupt will storm.
*/
gpu_write(gpu, REG_A5XX_RBBM_INT_CLEAR_CMD,
status & ~A5XX_RBBM_INT_0_MASK_RBBM_AHB_ERROR);
if (priv->disable_err_irq) {
status &= A5XX_RBBM_INT_0_MASK_CP_CACHE_FLUSH_TS |
A5XX_RBBM_INT_0_MASK_CP_SW;
}
/* Pass status to a5xx_rbbm_err_irq because we've already cleared it */
if (status & RBBM_ERROR_MASK)
a5xx_rbbm_err_irq(gpu, status);
if (status & A5XX_RBBM_INT_0_MASK_CP_HW_ERROR)
a5xx_cp_err_irq(gpu);
if (status & A5XX_RBBM_INT_0_MASK_MISC_HANG_DETECT)
a5xx_fault_detect_irq(gpu);
if (status & A5XX_RBBM_INT_0_MASK_UCHE_OOB_ACCESS)
a5xx_uche_err_irq(gpu);
if (status & A5XX_RBBM_INT_0_MASK_GPMU_VOLTAGE_DROOP)
a5xx_gpmu_err_irq(gpu);
if (status & A5XX_RBBM_INT_0_MASK_CP_CACHE_FLUSH_TS) {
a5xx_preempt_trigger(gpu);
msm_gpu_retire(gpu);
}
if (status & A5XX_RBBM_INT_0_MASK_CP_SW)
a5xx_preempt_irq(gpu);
return IRQ_HANDLED;
}
static const u32 a5xx_registers[] = {
0x0000, 0x0002, 0x0004, 0x0020, 0x0022, 0x0026, 0x0029, 0x002B,
0x002E, 0x0035, 0x0038, 0x0042, 0x0044, 0x0044, 0x0047, 0x0095,
0x0097, 0x00BB, 0x03A0, 0x0464, 0x0469, 0x046F, 0x04D2, 0x04D3,
0x04E0, 0x0533, 0x0540, 0x0555, 0x0800, 0x081A, 0x081F, 0x0841,
0x0860, 0x0860, 0x0880, 0x08A0, 0x0B00, 0x0B12, 0x0B15, 0x0B28,
0x0B78, 0x0B7F, 0x0BB0, 0x0BBD, 0x0BC0, 0x0BC6, 0x0BD0, 0x0C53,
0x0C60, 0x0C61, 0x0C80, 0x0C82, 0x0C84, 0x0C85, 0x0C90, 0x0C98,
0x0CA0, 0x0CA0, 0x0CB0, 0x0CB2, 0x2180, 0x2185, 0x2580, 0x2585,
0x0CC1, 0x0CC1, 0x0CC4, 0x0CC7, 0x0CCC, 0x0CCC, 0x0CD0, 0x0CD8,
0x0CE0, 0x0CE5, 0x0CE8, 0x0CE8, 0x0CEC, 0x0CF1, 0x0CFB, 0x0D0E,
0x2100, 0x211E, 0x2140, 0x2145, 0x2500, 0x251E, 0x2540, 0x2545,
0x0D10, 0x0D17, 0x0D20, 0x0D23, 0x0D30, 0x0D30, 0x20C0, 0x20C0,
0x24C0, 0x24C0, 0x0E40, 0x0E43, 0x0E4A, 0x0E4A, 0x0E50, 0x0E57,
0x0E60, 0x0E7C, 0x0E80, 0x0E8E, 0x0E90, 0x0E96, 0x0EA0, 0x0EA8,
0x0EB0, 0x0EB2, 0xE140, 0xE147, 0xE150, 0xE187, 0xE1A0, 0xE1A9,
0xE1B0, 0xE1B6, 0xE1C0, 0xE1C7, 0xE1D0, 0xE1D1, 0xE200, 0xE201,
0xE210, 0xE21C, 0xE240, 0xE268, 0xE000, 0xE006, 0xE010, 0xE09A,
0xE0A0, 0xE0A4, 0xE0AA, 0xE0EB, 0xE100, 0xE105, 0xE380, 0xE38F,
0xE3B0, 0xE3B0, 0xE400, 0xE405, 0xE408, 0xE4E9, 0xE4F0, 0xE4F0,
0xE280, 0xE280, 0xE282, 0xE2A3, 0xE2A5, 0xE2C2, 0xE940, 0xE947,
0xE950, 0xE987, 0xE9A0, 0xE9A9, 0xE9B0, 0xE9B6, 0xE9C0, 0xE9C7,
0xE9D0, 0xE9D1, 0xEA00, 0xEA01, 0xEA10, 0xEA1C, 0xEA40, 0xEA68,
0xE800, 0xE806, 0xE810, 0xE89A, 0xE8A0, 0xE8A4, 0xE8AA, 0xE8EB,
0xE900, 0xE905, 0xEB80, 0xEB8F, 0xEBB0, 0xEBB0, 0xEC00, 0xEC05,
0xEC08, 0xECE9, 0xECF0, 0xECF0, 0xEA80, 0xEA80, 0xEA82, 0xEAA3,
0xEAA5, 0xEAC2, 0xA800, 0xA800, 0xA820, 0xA828, 0xA840, 0xA87D,
0XA880, 0xA88D, 0xA890, 0xA8A3, 0xA8D0, 0xA8D8, 0xA8E0, 0xA8F5,
0xAC60, 0xAC60, ~0,
};
static void a5xx_dump(struct msm_gpu *gpu)
{
DRM_DEV_INFO(gpu->dev->dev, "status: %08x\n",
gpu_read(gpu, REG_A5XX_RBBM_STATUS));
adreno_dump(gpu);
}
static int a5xx_pm_resume(struct msm_gpu *gpu)
{
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
int ret;
/* Turn on the core power */
ret = msm_gpu_pm_resume(gpu);
if (ret)
return ret;
/* Adreno 506, 508, 509, 510, 512 needs manual RBBM sus/res control */
if (!(adreno_is_a530(adreno_gpu) || adreno_is_a540(adreno_gpu))) {
/* Halt the sp_input_clk at HM level */
gpu_write(gpu, REG_A5XX_RBBM_CLOCK_CNTL, 0x00000055);
a5xx_set_hwcg(gpu, true);
/* Turn on sp_input_clk at HM level */
gpu_rmw(gpu, REG_A5XX_RBBM_CLOCK_CNTL, 0xff, 0);
return 0;
}
/* Turn the RBCCU domain first to limit the chances of voltage droop */
gpu_write(gpu, REG_A5XX_GPMU_RBCCU_POWER_CNTL, 0x778000);
/* Wait 3 usecs before polling */
udelay(3);
ret = spin_usecs(gpu, 20, REG_A5XX_GPMU_RBCCU_PWR_CLK_STATUS,
(1 << 20), (1 << 20));
if (ret) {
DRM_ERROR("%s: timeout waiting for RBCCU GDSC enable: %X\n",
gpu->name,
gpu_read(gpu, REG_A5XX_GPMU_RBCCU_PWR_CLK_STATUS));
return ret;
}
/* Turn on the SP domain */
gpu_write(gpu, REG_A5XX_GPMU_SP_POWER_CNTL, 0x778000);
ret = spin_usecs(gpu, 20, REG_A5XX_GPMU_SP_PWR_CLK_STATUS,
(1 << 20), (1 << 20));
if (ret)
DRM_ERROR("%s: timeout waiting for SP GDSC enable\n",
gpu->name);
return ret;
}
static int a5xx_pm_suspend(struct msm_gpu *gpu)
{
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
struct a5xx_gpu *a5xx_gpu = to_a5xx_gpu(adreno_gpu);
u32 mask = 0xf;
int i, ret;
/* A506, A508, A510 have 3 XIN ports in VBIF */
if (adreno_is_a506(adreno_gpu) || adreno_is_a508(adreno_gpu) ||
adreno_is_a510(adreno_gpu))
mask = 0x7;
/* Clear the VBIF pipe before shutting down */
gpu_write(gpu, REG_A5XX_VBIF_XIN_HALT_CTRL0, mask);
spin_until((gpu_read(gpu, REG_A5XX_VBIF_XIN_HALT_CTRL1) &
mask) == mask);
gpu_write(gpu, REG_A5XX_VBIF_XIN_HALT_CTRL0, 0);
/*
* Reset the VBIF before power collapse to avoid issue with FIFO
* entries on Adreno A510 and A530 (the others will tend to lock up)
*/
if (adreno_is_a510(adreno_gpu) || adreno_is_a530(adreno_gpu)) {
gpu_write(gpu, REG_A5XX_RBBM_BLOCK_SW_RESET_CMD, 0x003C0000);
gpu_write(gpu, REG_A5XX_RBBM_BLOCK_SW_RESET_CMD, 0x00000000);
}
ret = msm_gpu_pm_suspend(gpu);
if (ret)
return ret;
if (a5xx_gpu->has_whereami)
for (i = 0; i < gpu->nr_rings; i++)
a5xx_gpu->shadow[i] = 0;
return 0;
}
static int a5xx_get_timestamp(struct msm_gpu *gpu, uint64_t *value)
{
*value = gpu_read64(gpu, REG_A5XX_RBBM_ALWAYSON_COUNTER_LO);
return 0;
}
struct a5xx_crashdumper {
void *ptr;
struct drm_gem_object *bo;
u64 iova;
};
struct a5xx_gpu_state {
struct msm_gpu_state base;
u32 *hlsqregs;
};
static int a5xx_crashdumper_init(struct msm_gpu *gpu,
struct a5xx_crashdumper *dumper)
{
dumper->ptr = msm_gem_kernel_new(gpu->dev,
SZ_1M, MSM_BO_WC, gpu->aspace,
&dumper->bo, &dumper->iova);
if (!IS_ERR(dumper->ptr))
msm_gem_object_set_name(dumper->bo, "crashdump");
return PTR_ERR_OR_ZERO(dumper->ptr);
}
static int a5xx_crashdumper_run(struct msm_gpu *gpu,
struct a5xx_crashdumper *dumper)
{
u32 val;
if (IS_ERR_OR_NULL(dumper->ptr))
return -EINVAL;
gpu_write64(gpu, REG_A5XX_CP_CRASH_SCRIPT_BASE_LO, dumper->iova);
gpu_write(gpu, REG_A5XX_CP_CRASH_DUMP_CNTL, 1);
return gpu_poll_timeout(gpu, REG_A5XX_CP_CRASH_DUMP_CNTL, val,
val & 0x04, 100, 10000);
}
/*
* These are a list of the registers that need to be read through the HLSQ
* aperture through the crashdumper. These are not nominally accessible from
* the CPU on a secure platform.
*/
static const struct {
u32 type;
u32 regoffset;
u32 count;
} a5xx_hlsq_aperture_regs[] = {
{ 0x35, 0xe00, 0x32 }, /* HSLQ non-context */
{ 0x31, 0x2080, 0x1 }, /* HLSQ 2D context 0 */
{ 0x33, 0x2480, 0x1 }, /* HLSQ 2D context 1 */
{ 0x32, 0xe780, 0x62 }, /* HLSQ 3D context 0 */
{ 0x34, 0xef80, 0x62 }, /* HLSQ 3D context 1 */
{ 0x3f, 0x0ec0, 0x40 }, /* SP non-context */
{ 0x3d, 0x2040, 0x1 }, /* SP 2D context 0 */
{ 0x3b, 0x2440, 0x1 }, /* SP 2D context 1 */
{ 0x3e, 0xe580, 0x170 }, /* SP 3D context 0 */
{ 0x3c, 0xed80, 0x170 }, /* SP 3D context 1 */
{ 0x3a, 0x0f00, 0x1c }, /* TP non-context */
{ 0x38, 0x2000, 0xa }, /* TP 2D context 0 */
{ 0x36, 0x2400, 0xa }, /* TP 2D context 1 */
{ 0x39, 0xe700, 0x80 }, /* TP 3D context 0 */
{ 0x37, 0xef00, 0x80 }, /* TP 3D context 1 */
};
static void a5xx_gpu_state_get_hlsq_regs(struct msm_gpu *gpu,
struct a5xx_gpu_state *a5xx_state)
{
struct a5xx_crashdumper dumper = { 0 };
u32 offset, count = 0;
u64 *ptr;
int i;
if (a5xx_crashdumper_init(gpu, &dumper))
return;
/* The script will be written at offset 0 */
ptr = dumper.ptr;
/* Start writing the data at offset 256k */
offset = dumper.iova + (256 * SZ_1K);
/* Count how many additional registers to get from the HLSQ aperture */
for (i = 0; i < ARRAY_SIZE(a5xx_hlsq_aperture_regs); i++)
count += a5xx_hlsq_aperture_regs[i].count;
a5xx_state->hlsqregs = kcalloc(count, sizeof(u32), GFP_KERNEL);
if (!a5xx_state->hlsqregs)
return;
/* Build the crashdump script */
for (i = 0; i < ARRAY_SIZE(a5xx_hlsq_aperture_regs); i++) {
u32 type = a5xx_hlsq_aperture_regs[i].type;
u32 c = a5xx_hlsq_aperture_regs[i].count;
/* Write the register to select the desired bank */
*ptr++ = ((u64) type << 8);
*ptr++ = (((u64) REG_A5XX_HLSQ_DBG_READ_SEL) << 44) |
(1 << 21) | 1;
*ptr++ = offset;
*ptr++ = (((u64) REG_A5XX_HLSQ_DBG_AHB_READ_APERTURE) << 44)
| c;
offset += c * sizeof(u32);
}
/* Write two zeros to close off the script */
*ptr++ = 0;
*ptr++ = 0;
if (a5xx_crashdumper_run(gpu, &dumper)) {
kfree(a5xx_state->hlsqregs);
msm_gem_kernel_put(dumper.bo, gpu->aspace);
return;
}
/* Copy the data from the crashdumper to the state */
memcpy(a5xx_state->hlsqregs, dumper.ptr + (256 * SZ_1K),
count * sizeof(u32));
msm_gem_kernel_put(dumper.bo, gpu->aspace);
}
static struct msm_gpu_state *a5xx_gpu_state_get(struct msm_gpu *gpu)
{
struct a5xx_gpu_state *a5xx_state = kzalloc(sizeof(*a5xx_state),
GFP_KERNEL);
bool stalled = !!(gpu_read(gpu, REG_A5XX_RBBM_STATUS3) & BIT(24));
if (!a5xx_state)
return ERR_PTR(-ENOMEM);
/* Temporarily disable hardware clock gating before reading the hw */
a5xx_set_hwcg(gpu, false);
/* First get the generic state from the adreno core */
adreno_gpu_state_get(gpu, &(a5xx_state->base));
a5xx_state->base.rbbm_status = gpu_read(gpu, REG_A5XX_RBBM_STATUS);
/*
* Get the HLSQ regs with the help of the crashdumper, but only if
* we are not stalled in an iommu fault (in which case the crashdumper
* would not have access to memory)
*/
if (!stalled)
a5xx_gpu_state_get_hlsq_regs(gpu, a5xx_state);
a5xx_set_hwcg(gpu, true);
return &a5xx_state->base;
}
static void a5xx_gpu_state_destroy(struct kref *kref)
{
struct msm_gpu_state *state = container_of(kref,
struct msm_gpu_state, ref);
struct a5xx_gpu_state *a5xx_state = container_of(state,
struct a5xx_gpu_state, base);
kfree(a5xx_state->hlsqregs);
adreno_gpu_state_destroy(state);
kfree(a5xx_state);
}
static int a5xx_gpu_state_put(struct msm_gpu_state *state)
{
if (IS_ERR_OR_NULL(state))
return 1;
return kref_put(&state->ref, a5xx_gpu_state_destroy);
}
#if defined(CONFIG_DEBUG_FS) || defined(CONFIG_DEV_COREDUMP)
static void a5xx_show(struct msm_gpu *gpu, struct msm_gpu_state *state,
struct drm_printer *p)
{
int i, j;
u32 pos = 0;
struct a5xx_gpu_state *a5xx_state = container_of(state,
struct a5xx_gpu_state, base);
if (IS_ERR_OR_NULL(state))
return;
adreno_show(gpu, state, p);
/* Dump the additional a5xx HLSQ registers */
if (!a5xx_state->hlsqregs)
return;
drm_printf(p, "registers-hlsq:\n");
for (i = 0; i < ARRAY_SIZE(a5xx_hlsq_aperture_regs); i++) {
u32 o = a5xx_hlsq_aperture_regs[i].regoffset;
u32 c = a5xx_hlsq_aperture_regs[i].count;
for (j = 0; j < c; j++, pos++, o++) {
/*
* To keep the crashdump simple we pull the entire range
* for each register type but not all of the registers
* in the range are valid. Fortunately invalid registers
* stick out like a sore thumb with a value of
* 0xdeadbeef
*/
if (a5xx_state->hlsqregs[pos] == 0xdeadbeef)
continue;
drm_printf(p, " - { offset: 0x%04x, value: 0x%08x }\n",
o << 2, a5xx_state->hlsqregs[pos]);
}
}
}
#endif
static struct msm_ringbuffer *a5xx_active_ring(struct msm_gpu *gpu)
{
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
struct a5xx_gpu *a5xx_gpu = to_a5xx_gpu(adreno_gpu);
return a5xx_gpu->cur_ring;
}
static u64 a5xx_gpu_busy(struct msm_gpu *gpu, unsigned long *out_sample_rate)
{
u64 busy_cycles;
busy_cycles = gpu_read64(gpu, REG_A5XX_RBBM_PERFCTR_RBBM_0_LO);
*out_sample_rate = clk_get_rate(gpu->core_clk);
return busy_cycles;
}
static uint32_t a5xx_get_rptr(struct msm_gpu *gpu, struct msm_ringbuffer *ring)
{
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
struct a5xx_gpu *a5xx_gpu = to_a5xx_gpu(adreno_gpu);
if (a5xx_gpu->has_whereami)
return a5xx_gpu->shadow[ring->id];
return ring->memptrs->rptr = gpu_read(gpu, REG_A5XX_CP_RB_RPTR);
}
static const struct adreno_gpu_funcs funcs = {
.base = {
.get_param = adreno_get_param,
.set_param = adreno_set_param,
.hw_init = a5xx_hw_init,
.ucode_load = a5xx_ucode_load,
.pm_suspend = a5xx_pm_suspend,
.pm_resume = a5xx_pm_resume,
.recover = a5xx_recover,
.submit = a5xx_submit,
.active_ring = a5xx_active_ring,
.irq = a5xx_irq,
.destroy = a5xx_destroy,
#if defined(CONFIG_DEBUG_FS) || defined(CONFIG_DEV_COREDUMP)
.show = a5xx_show,
#endif
#if defined(CONFIG_DEBUG_FS)
.debugfs_init = a5xx_debugfs_init,
#endif
.gpu_busy = a5xx_gpu_busy,
.gpu_state_get = a5xx_gpu_state_get,
.gpu_state_put = a5xx_gpu_state_put,
.create_address_space = adreno_create_address_space,
.get_rptr = a5xx_get_rptr,
},
.get_timestamp = a5xx_get_timestamp,
};
static void check_speed_bin(struct device *dev)
{
struct nvmem_cell *cell;
u32 val;
/*
* If the OPP table specifies a opp-supported-hw property then we have
* to set something with dev_pm_opp_set_supported_hw() or the table
* doesn't get populated so pick an arbitrary value that should
* ensure the default frequencies are selected but not conflict with any
* actual bins
*/
val = 0x80;
cell = nvmem_cell_get(dev, "speed_bin");
if (!IS_ERR(cell)) {
void *buf = nvmem_cell_read(cell, NULL);
if (!IS_ERR(buf)) {
u8 bin = *((u8 *) buf);
val = (1 << bin);
kfree(buf);
}
nvmem_cell_put(cell);
}
devm_pm_opp_set_supported_hw(dev, &val, 1);
}
struct msm_gpu *a5xx_gpu_init(struct drm_device *dev)
{
struct msm_drm_private *priv = dev->dev_private;
struct platform_device *pdev = priv->gpu_pdev;
struct adreno_platform_config *config = pdev->dev.platform_data;
struct a5xx_gpu *a5xx_gpu = NULL;
struct adreno_gpu *adreno_gpu;
struct msm_gpu *gpu;
unsigned int nr_rings;
int ret;
if (!pdev) {
DRM_DEV_ERROR(dev->dev, "No A5XX device is defined\n");
return ERR_PTR(-ENXIO);
}
a5xx_gpu = kzalloc(sizeof(*a5xx_gpu), GFP_KERNEL);
if (!a5xx_gpu)
return ERR_PTR(-ENOMEM);
adreno_gpu = &a5xx_gpu->base;
gpu = &adreno_gpu->base;
adreno_gpu->registers = a5xx_registers;
a5xx_gpu->lm_leakage = 0x4E001A;
check_speed_bin(&pdev->dev);
nr_rings = 4;
if (config->info->revn == 510)
nr_rings = 1;
ret = adreno_gpu_init(dev, pdev, adreno_gpu, &funcs, nr_rings);
if (ret) {
a5xx_destroy(&(a5xx_gpu->base.base));
return ERR_PTR(ret);
}
if (gpu->aspace)
msm_mmu_set_fault_handler(gpu->aspace->mmu, gpu, a5xx_fault_handler);
/* Set up the preemption specific bits and pieces for each ringbuffer */
a5xx_preempt_init(gpu);
return gpu;
}
| linux-master | drivers/gpu/drm/msm/adreno/a5xx_gpu.c |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.